xdp-tools-1.5.4/0000755000175100001660000000000015003640462013015 5ustar runnerdockerxdp-tools-1.5.4/mkarchive.sh0000755000175100001660000000135315003640462015327 0ustar runnerdocker#!/bin/bash WORKDIR=$(dirname "${BASH_SOURCE[0]}") VERSION="${1:-$(make -f $WORKDIR/version.mk)}" OUTFILE="$WORKDIR/xdp-tools-$VERSION.tar.gz" PREFIX=xdp-tools-$VERSION TMPDIR=$(mktemp -d) set -o errexit set -o nounset trap 'status=$?; rm -rf $TMPDIR; exit $status' EXIT HUP INT QUIT TERM [ -d .git ] || exit 1 if git status -s | grep -Eq '^ ?[AM]'; then echo "Please commit changes first" >&2 exit 1 fi git archive -o "$TMPDIR/xdp-tools.tar.gz" --prefix "${PREFIX}/" HEAD ( cd lib/libbpf && git archive -o "$TMPDIR/libbpf.tar.gz" --prefix "${PREFIX}/lib/libbpf/" HEAD) tar -C "$TMPDIR" -xzf "$TMPDIR/xdp-tools.tar.gz" tar -C "$TMPDIR" -xzf "$TMPDIR/libbpf.tar.gz" tar -C "$TMPDIR" -czf "$OUTFILE" "$PREFIX" echo "Created $OUTFILE" xdp-tools-1.5.4/README.org0000644000175100001660000000336715003640462014474 0ustar runnerdocker* xdp-tools - Library and utilities for use with XDP This repository contains the =libxdp= library for working with the eXpress Data Path facility of the Linux kernel, and a collection of utilities and example code that uses the library. The repository contains the following: - [[lib/libxdp/][lib/libxdp/]] - the =libxdp= library itself - can be built standalone using =make libxdp= - [[xdp-bench/][xdp-bench/]] - an XDP benchmarking tool - [[xdp-dump/][xdp-dump/]] - a tcpdump-like tool for capturing packets at the XDP layer - [[xdp-filter/][xdp-filter/]] - a simple packet filtering utility powered by XDP - [[xdp-forward/][xdp-forward/]] - an XDP forwarding plane - [[xdp-loader/][xdp-loader/]] - a command-line utility for loading XDP programs using =libxdp= - [[xdp-monitor/][xdp-monitor/]] - a simple XDP tracepoint monitoring tool - [[xdp-trafficgen/][xdp-trafficgen/]] - an XDP-based packet generator - [[headers/xdp/][headers/xdp/]] - reusable eBPF code snippets for XDP (installed in /usr/include/xdp by =make install=). - [[lib/util/][lib/util/]] - common code shared between the different utilities - [[packaging/][packaging/]] - files used for distro packaging - lib/libbpf/ - a git submodule with [[https://github.com/libbpf/libbpf][libbpf]], used if the system version is not recent enough To compile, first run =./configure=, then simply type =make=. Make sure you either have a sufficiently recent libbpf installed on your system, or that you pulled down the libbpf git submodule (=git submodule init && git submodule update=). For a general introduction to XDP, please see the [[https://github.com/xdp-project/xdp-tutorial][XDP tutorial]], and for more BPF and XDP examples, see the [[https://github.com/xdp-project/bpf-examples][bpf-examples repository]]. xdp-tools-1.5.4/.github/0000755000175100001660000000000015003640462014355 5ustar runnerdockerxdp-tools-1.5.4/.github/scripts/0000755000175100001660000000000015003640462016044 5ustar runnerdockerxdp-tools-1.5.4/.github/scripts/.config0000644000175100001660000022762115003640462017324 0ustar runnerdocker# # Automatically generated file; DO NOT EDIT. # Linux/x86 5.9.0-rc1 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (GCC) 8.2.1 20180801 (Red Hat 8.2.1-2)" CONFIG_CC_IS_GCC=y CONFIG_GCC_VERSION=80201 CONFIG_LD_VERSION=230000000 CONFIG_CLANG_VERSION=0 CONFIG_CC_CAN_LINK=y CONFIG_CC_CAN_LINK_STATIC=y CONFIG_CC_HAS_ASM_GOTO=y CONFIG_IRQ_WORK=y CONFIG_BUILDTIME_TABLE_SORT=y CONFIG_THREAD_INFO_IN_TASK=y # # General setup # CONFIG_INIT_ENV_ARG_LIMIT=32 # CONFIG_COMPILE_TEST is not set CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y CONFIG_BUILD_SALT="" CONFIG_HAVE_KERNEL_GZIP=y CONFIG_HAVE_KERNEL_BZIP2=y CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_LZ4=y CONFIG_HAVE_KERNEL_ZSTD=y CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_BZIP2 is not set # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_XZ is not set # CONFIG_KERNEL_LZO is not set # CONFIG_KERNEL_LZ4 is not set # CONFIG_KERNEL_ZSTD is not set CONFIG_DEFAULT_INIT="" CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE_SYSCTL=y # CONFIG_WATCH_QUEUE is not set CONFIG_CROSS_MEMORY_ATTACH=y # CONFIG_USELIB is not set CONFIG_AUDIT=y CONFIG_HAVE_ARCH_AUDITSYSCALL=y CONFIG_AUDITSYSCALL=y # # IRQ subsystem # CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_IRQ_SHOW=y CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y CONFIG_GENERIC_PENDING_IRQ=y CONFIG_GENERIC_IRQ_MIGRATION=y CONFIG_HARDIRQS_SW_RESEND=y CONFIG_IRQ_DOMAIN=y CONFIG_IRQ_DOMAIN_HIERARCHY=y CONFIG_GENERIC_MSI_IRQ=y CONFIG_GENERIC_MSI_IRQ_DOMAIN=y CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y CONFIG_GENERIC_IRQ_RESERVATION_MODE=y CONFIG_IRQ_FORCED_THREADING=y CONFIG_SPARSE_IRQ=y # CONFIG_GENERIC_IRQ_DEBUGFS is not set # end of IRQ subsystem CONFIG_CLOCKSOURCE_WATCHDOG=y CONFIG_ARCH_CLOCKSOURCE_INIT=y CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y CONFIG_GENERIC_TIME_VSYSCALL=y CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y CONFIG_GENERIC_CMOS_UPDATE=y CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y # # Timers subsystem # CONFIG_TICK_ONESHOT=y CONFIG_NO_HZ_COMMON=y # CONFIG_HZ_PERIODIC is not set CONFIG_NO_HZ_IDLE=y # CONFIG_NO_HZ_FULL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y # end of Timers subsystem # CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set CONFIG_PREEMPT=y CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y # # CPU/Task time and stats accounting # CONFIG_TICK_CPU_ACCOUNTING=y # CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set # CONFIG_IRQ_TIME_ACCOUNTING is not set CONFIG_BSD_PROCESS_ACCT=y # CONFIG_BSD_PROCESS_ACCT_V3 is not set CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y # CONFIG_PSI is not set # end of CPU/Task time and stats accounting # CONFIG_CPU_ISOLATION is not set # # RCU Subsystem # CONFIG_TREE_RCU=y CONFIG_PREEMPT_RCU=y # CONFIG_RCU_EXPERT is not set CONFIG_SRCU=y CONFIG_TREE_SRCU=y CONFIG_TASKS_RCU_GENERIC=y CONFIG_TASKS_RCU=y CONFIG_TASKS_RUDE_RCU=y CONFIG_RCU_STALL_COMMON=y CONFIG_RCU_NEED_SEGCBLIST=y # end of RCU Subsystem CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y # CONFIG_IKHEADERS is not set CONFIG_LOG_BUF_SHIFT=21 CONFIG_LOG_CPU_MAX_BUF_SHIFT=0 CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y # # Scheduler features # # CONFIG_UCLAMP_TASK is not set # end of Scheduler features CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_CC_HAS_INT128=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set CONFIG_CGROUPS=y CONFIG_PAGE_COUNTER=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y CONFIG_CGROUP_WRITEBACK=y CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y # CONFIG_RT_GROUP_SCHED is not set # CONFIG_CGROUP_PIDS is not set # CONFIG_CGROUP_RDMA is not set CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y # CONFIG_CGROUP_DEBUG is not set CONFIG_SOCK_CGROUP_DATA=y CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_TIME_NS=y CONFIG_IPC_NS=y CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_NET_NS=y # CONFIG_CHECKPOINT_RESTORE is not set # CONFIG_SCHED_AUTOGROUP is not set # CONFIG_SYSFS_DEPRECATED is not set CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y CONFIG_RD_BZIP2=y CONFIG_RD_LZMA=y CONFIG_RD_XZ=y CONFIG_RD_LZO=y CONFIG_RD_LZ4=y CONFIG_RD_ZSTD=y CONFIG_BOOT_CONFIG=y CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_SYSCTL_EXCEPTION_TRACE=y CONFIG_HAVE_PCSPKR_PLATFORM=y CONFIG_BPF=y CONFIG_EXPERT=y CONFIG_MULTIUSER=y CONFIG_SGETMASK_SYSCALL=y # CONFIG_SYSFS_SYSCALL is not set CONFIG_FHANDLE=y CONFIG_POSIX_TIMERS=y CONFIG_PRINTK=y CONFIG_PRINTK_NMI=y CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_PCSPKR_PLATFORM=y CONFIG_BASE_FULL=y CONFIG_FUTEX=y CONFIG_FUTEX_PI=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y CONFIG_AIO=y CONFIG_IO_URING=y CONFIG_ADVISE_SYSCALLS=y CONFIG_MEMBARRIER=y CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y CONFIG_KALLSYMS_BASE_RELATIVE=y CONFIG_BPF_LSM=y CONFIG_BPF_SYSCALL=y CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_BPF_JIT_DEFAULT_ON=y CONFIG_USERMODE_DRIVER=y CONFIG_BPF_PRELOAD=y CONFIG_BPF_PRELOAD_UMD=y # CONFIG_USERFAULTFD is not set CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y CONFIG_RSEQ=y # CONFIG_DEBUG_RSEQ is not set # CONFIG_EMBEDDED is not set CONFIG_HAVE_PERF_EVENTS=y # CONFIG_PC104 is not set # # Kernel Performance Events And Counters # CONFIG_PERF_EVENTS=y # CONFIG_DEBUG_PERF_USE_VMALLOC is not set # end of Kernel Performance Events And Counters CONFIG_VM_EVENT_COUNTERS=y CONFIG_SLUB_DEBUG=y # CONFIG_SLUB_MEMCG_SYSFS_ON is not set CONFIG_COMPAT_BRK=y # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_SLAB_MERGE_DEFAULT=y # CONFIG_SLAB_FREELIST_RANDOM is not set # CONFIG_SLAB_FREELIST_HARDENED is not set # CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set CONFIG_SLUB_CPU_PARTIAL=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y # end of General setup CONFIG_64BIT=y CONFIG_X86_64=y CONFIG_X86=y CONFIG_INSTRUCTION_DECODER=y CONFIG_OUTPUT_FORMAT="elf64-x86-64" CONFIG_LOCKDEP_SUPPORT=y CONFIG_STACKTRACE_SUPPORT=y CONFIG_MMU=y CONFIG_ARCH_MMAP_RND_BITS_MIN=28 CONFIG_ARCH_MMAP_RND_BITS_MAX=32 CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 CONFIG_GENERIC_ISA_DMA=y CONFIG_GENERIC_BUG=y CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_ARCH_HAS_CPU_RELAX=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y CONFIG_ARCH_HAS_FILTER_PGPROT=y CONFIG_HAVE_SETUP_PER_CPU_AREA=y CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_ARCH_WANT_GENERAL_HUGETLB=y CONFIG_ZONE_DMA32=y CONFIG_AUDIT_ARCH=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_X86_64_SMP=y CONFIG_ARCH_SUPPORTS_UPROBES=y CONFIG_FIX_EARLYCON_MEM=y CONFIG_PGTABLE_LEVELS=4 CONFIG_CC_HAS_SANE_STACKPROTECTOR=y # # Processor type and features # CONFIG_ZONE_DMA=y CONFIG_SMP=y CONFIG_X86_FEATURE_NAMES=y CONFIG_X86_MPPARSE=y # CONFIG_GOLDFISH is not set # CONFIG_RETPOLINE is not set # CONFIG_X86_CPU_RESCTRL is not set CONFIG_X86_EXTENDED_PLATFORM=y # CONFIG_X86_VSMP is not set # CONFIG_X86_GOLDFISH is not set # CONFIG_X86_INTEL_LPSS is not set # CONFIG_X86_AMD_PLATFORM_DEVICE is not set # CONFIG_IOSF_MBI is not set CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y CONFIG_SCHED_OMIT_FRAME_POINTER=y # CONFIG_HYPERVISOR_GUEST is not set # CONFIG_MK8 is not set # CONFIG_MPSC is not set CONFIG_MCORE2=y # CONFIG_MATOM is not set # CONFIG_GENERIC_CPU is not set CONFIG_X86_INTERNODE_CACHE_SHIFT=6 CONFIG_X86_L1_CACHE_SHIFT=6 CONFIG_X86_INTEL_USERCOPY=y CONFIG_X86_USE_PPRO_CHECKSUM=y CONFIG_X86_P6_NOP=y CONFIG_X86_TSC=y CONFIG_X86_CMPXCHG64=y CONFIG_X86_CMOV=y CONFIG_X86_MINIMUM_CPU_FAMILY=64 CONFIG_X86_DEBUGCTLMSR=y CONFIG_IA32_FEAT_CTL=y CONFIG_X86_VMX_FEATURE_NAMES=y # CONFIG_PROCESSOR_SELECT is not set CONFIG_CPU_SUP_INTEL=y CONFIG_CPU_SUP_AMD=y CONFIG_CPU_SUP_HYGON=y CONFIG_CPU_SUP_CENTAUR=y CONFIG_CPU_SUP_ZHAOXIN=y CONFIG_HPET_TIMER=y CONFIG_DMI=y CONFIG_GART_IOMMU=y # CONFIG_MAXSMP is not set CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=512 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=128 CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y # CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set CONFIG_X86_MCE=y # CONFIG_X86_MCELOG_LEGACY is not set CONFIG_X86_MCE_INTEL=y CONFIG_X86_MCE_AMD=y CONFIG_X86_MCE_THRESHOLD=y # CONFIG_X86_MCE_INJECT is not set CONFIG_X86_THERMAL_VECTOR=y # # Performance monitoring # CONFIG_PERF_EVENTS_INTEL_UNCORE=y # CONFIG_PERF_EVENTS_INTEL_RAPL is not set # CONFIG_PERF_EVENTS_INTEL_CSTATE is not set # CONFIG_PERF_EVENTS_AMD_POWER is not set # end of Performance monitoring # CONFIG_X86_16BIT is not set CONFIG_X86_VSYSCALL_EMULATION=y CONFIG_X86_IOPL_IOPERM=y # CONFIG_I8K is not set # CONFIG_MICROCODE is not set CONFIG_X86_MSR=y CONFIG_X86_CPUID=y # CONFIG_X86_5LEVEL is not set CONFIG_X86_DIRECT_GBPAGES=y # CONFIG_X86_CPA_STATISTICS is not set # CONFIG_AMD_MEM_ENCRYPT is not set CONFIG_NUMA=y CONFIG_AMD_NUMA=y CONFIG_X86_64_ACPI_NUMA=y # CONFIG_NUMA_EMU is not set CONFIG_NODES_SHIFT=6 CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_DEFAULT=y CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_ARCH_PROC_KCORE_TEXT=y CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 # CONFIG_X86_PMEM_LEGACY is not set # CONFIG_X86_CHECK_BIOS_CORRUPTION is not set CONFIG_X86_RESERVE_LOW=64 CONFIG_MTRR=y # CONFIG_MTRR_SANITIZER is not set CONFIG_X86_PAT=y CONFIG_ARCH_USES_PG_UNCACHED=y CONFIG_ARCH_RANDOM=y CONFIG_X86_SMAP=y CONFIG_X86_UMIP=y # CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS is not set CONFIG_X86_INTEL_TSX_MODE_OFF=y # CONFIG_X86_INTEL_TSX_MODE_ON is not set # CONFIG_X86_INTEL_TSX_MODE_AUTO is not set CONFIG_EFI=y CONFIG_EFI_STUB=y # CONFIG_EFI_MIXED is not set CONFIG_SECCOMP=y # CONFIG_HZ_100 is not set # CONFIG_HZ_250 is not set # CONFIG_HZ_300 is not set CONFIG_HZ_1000=y CONFIG_HZ=1000 CONFIG_SCHED_HRTICK=y CONFIG_KEXEC=y # CONFIG_KEXEC_FILE is not set # CONFIG_CRASH_DUMP is not set CONFIG_PHYSICAL_START=0x1000000 CONFIG_RELOCATABLE=y # CONFIG_RANDOMIZE_BASE is not set CONFIG_PHYSICAL_ALIGN=0x1000000 CONFIG_HOTPLUG_CPU=y # CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set # CONFIG_DEBUG_HOTPLUG_CPU0 is not set # CONFIG_LEGACY_VSYSCALL_EMULATE is not set # CONFIG_LEGACY_VSYSCALL_XONLY is not set CONFIG_LEGACY_VSYSCALL_NONE=y # CONFIG_CMDLINE_BOOL is not set CONFIG_MODIFY_LDT_SYSCALL=y CONFIG_HAVE_LIVEPATCH=y # CONFIG_LIVEPATCH is not set # end of Processor type and features CONFIG_ARCH_HAS_ADD_PAGES=y CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y CONFIG_USE_PERCPU_NUMA_NODE_ID=y CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y CONFIG_ARCH_ENABLE_THP_MIGRATION=y # # Power management and ACPI options # # CONFIG_SUSPEND is not set # CONFIG_HIBERNATION is not set # CONFIG_PM is not set # CONFIG_ENERGY_MODEL is not set CONFIG_ARCH_SUPPORTS_ACPI=y CONFIG_ACPI=y CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y # CONFIG_ACPI_DEBUGGER is not set # CONFIG_ACPI_SPCR_TABLE is not set CONFIG_ACPI_LPIT=y # CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set # CONFIG_ACPI_EC_DEBUGFS is not set # CONFIG_ACPI_AC is not set # CONFIG_ACPI_BATTERY is not set # CONFIG_ACPI_BUTTON is not set # CONFIG_ACPI_TINY_POWER_BUTTON is not set # CONFIG_ACPI_VIDEO is not set # CONFIG_ACPI_FAN is not set # CONFIG_ACPI_DOCK is not set CONFIG_ACPI_CPU_FREQ_PSS=y CONFIG_ACPI_PROCESSOR_CSTATE=y CONFIG_ACPI_PROCESSOR_IDLE=y CONFIG_ACPI_CPPC_LIB=y CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_HOTPLUG_CPU=y # CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set # CONFIG_ACPI_THERMAL is not set CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y # CONFIG_ACPI_TABLE_UPGRADE is not set # CONFIG_ACPI_DEBUG is not set # CONFIG_ACPI_PCI_SLOT is not set CONFIG_ACPI_CONTAINER=y CONFIG_ACPI_HOTPLUG_IOAPIC=y # CONFIG_ACPI_SBS is not set # CONFIG_ACPI_HED is not set # CONFIG_ACPI_CUSTOM_METHOD is not set # CONFIG_ACPI_BGRT is not set # CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set # CONFIG_ACPI_NFIT is not set CONFIG_ACPI_NUMA=y # CONFIG_ACPI_HMAT is not set CONFIG_HAVE_ACPI_APEI=y CONFIG_HAVE_ACPI_APEI_NMI=y # CONFIG_ACPI_APEI is not set # CONFIG_DPTF_POWER is not set # CONFIG_PMIC_OPREGION is not set # CONFIG_ACPI_CONFIGFS is not set # CONFIG_X86_PM_TIMER is not set # CONFIG_SFI is not set # # CPU Frequency scaling # CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_ATTR_SET=y CONFIG_CPU_FREQ_GOV_COMMON=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_GOV_POWERSAVE is not set CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y # # CPU frequency scaling drivers # CONFIG_X86_INTEL_PSTATE=y # CONFIG_X86_PCC_CPUFREQ is not set CONFIG_X86_ACPI_CPUFREQ=y CONFIG_X86_ACPI_CPUFREQ_CPB=y CONFIG_X86_POWERNOW_K8=y # CONFIG_X86_AMD_FREQ_SENSITIVITY is not set # CONFIG_X86_SPEEDSTEP_CENTRINO is not set # CONFIG_X86_P4_CLOCKMOD is not set # # shared options # # end of CPU Frequency scaling # # CPU Idle # CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y # CONFIG_CPU_IDLE_GOV_TEO is not set # end of CPU Idle # CONFIG_INTEL_IDLE is not set # end of Power management and ACPI options # # Bus options (PCI etc.) # CONFIG_PCI_DIRECT=y CONFIG_PCI_MMCONFIG=y CONFIG_MMCONF_FAM10H=y # CONFIG_PCI_CNB20LE_QUIRK is not set # CONFIG_ISA_BUS is not set CONFIG_ISA_DMA_API=y CONFIG_AMD_NB=y # CONFIG_X86_SYSFB is not set # end of Bus options (PCI etc.) # # Binary Emulations # # CONFIG_IA32_EMULATION is not set # CONFIG_X86_X32 is not set # end of Binary Emulations # # Firmware Drivers # # CONFIG_EDD is not set CONFIG_FIRMWARE_MEMMAP=y CONFIG_DMIID=y # CONFIG_DMI_SYSFS is not set CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y # CONFIG_FW_CFG_SYSFS is not set # CONFIG_GOOGLE_FIRMWARE is not set # # EFI (Extensible Firmware Interface) Support # # CONFIG_EFI_VARS is not set CONFIG_EFI_ESRT=y CONFIG_EFI_RUNTIME_MAP=y # CONFIG_EFI_FAKE_MEMMAP is not set CONFIG_EFI_RUNTIME_WRAPPERS=y CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y # CONFIG_EFI_CAPSULE_LOADER is not set # CONFIG_EFI_TEST is not set # CONFIG_APPLE_PROPERTIES is not set # CONFIG_RESET_ATTACK_MITIGATION is not set # CONFIG_EFI_RCI2_TABLE is not set # CONFIG_EFI_DISABLE_PCI_DMA is not set # end of EFI (Extensible Firmware Interface) Support CONFIG_EFI_EARLYCON=y # # Tegra firmware driver # # end of Tegra firmware driver # end of Firmware Drivers CONFIG_HAVE_KVM=y CONFIG_VIRTUALIZATION=y # CONFIG_KVM is not set CONFIG_KVM_WERROR=y CONFIG_AS_AVX512=y CONFIG_AS_SHA1_NI=y CONFIG_AS_SHA256_NI=y # # General architecture-dependent options # CONFIG_CRASH_CORE=y CONFIG_KEXEC_CORE=y CONFIG_HOTPLUG_SMT=y CONFIG_GENERIC_ENTRY=y # CONFIG_OPROFILE is not set CONFIG_HAVE_OPROFILE=y CONFIG_OPROFILE_NMI_TIMER=y CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y # CONFIG_STATIC_KEYS_SELFTEST is not set CONFIG_OPTPROBES=y CONFIG_KPROBES_ON_FTRACE=y CONFIG_UPROBES=y CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y CONFIG_ARCH_USE_BUILTIN_BSWAP=y CONFIG_KRETPROBES=y CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y CONFIG_HAVE_OPTPROBES=y CONFIG_HAVE_KPROBES_ON_FTRACE=y CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y CONFIG_HAVE_NMI=y CONFIG_HAVE_ARCH_TRACEHOOK=y CONFIG_HAVE_DMA_CONTIGUOUS=y CONFIG_GENERIC_SMP_IDLE_THREAD=y CONFIG_ARCH_HAS_FORTIFY_SOURCE=y CONFIG_ARCH_HAS_SET_MEMORY=y CONFIG_ARCH_HAS_SET_DIRECT_MAP=y CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y CONFIG_HAVE_ASM_MODVERSIONS=y CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y CONFIG_HAVE_RSEQ=y CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y CONFIG_HAVE_HW_BREAKPOINT=y CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y CONFIG_HAVE_USER_RETURN_NOTIFIER=y CONFIG_HAVE_PERF_EVENTS_NMI=y CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y CONFIG_HAVE_PERF_REGS=y CONFIG_HAVE_PERF_USER_STACK_DUMP=y CONFIG_HAVE_ARCH_JUMP_LABEL=y CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y CONFIG_HAVE_CMPXCHG_LOCAL=y CONFIG_HAVE_CMPXCHG_DOUBLE=y CONFIG_HAVE_ARCH_SECCOMP_FILTER=y CONFIG_SECCOMP_FILTER=y CONFIG_HAVE_ARCH_STACKLEAK=y CONFIG_HAVE_STACKPROTECTOR=y # CONFIG_STACKPROTECTOR is not set CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y CONFIG_HAVE_CONTEXT_TRACKING=y CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y CONFIG_HAVE_MOVE_PMD=y CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y CONFIG_HAVE_ARCH_HUGE_VMAP=y CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y CONFIG_HAVE_ARCH_SOFT_DIRTY=y CONFIG_HAVE_MOD_ARCH_SPECIFIC=y CONFIG_MODULES_USE_ELF_RELA=y CONFIG_ARCH_HAS_ELF_RANDOMIZE=y CONFIG_HAVE_ARCH_MMAP_RND_BITS=y CONFIG_HAVE_EXIT_THREAD=y CONFIG_ARCH_MMAP_RND_BITS=28 CONFIG_HAVE_STACK_VALIDATION=y CONFIG_HAVE_RELIABLE_STACKTRACE=y CONFIG_COMPAT_32BIT_TIME=y CONFIG_HAVE_ARCH_VMAP_STACK=y CONFIG_VMAP_STACK=y CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y # CONFIG_LOCK_EVENT_COUNTS is not set CONFIG_ARCH_HAS_MEM_ENCRYPT=y # # GCOV-based kernel profiling # # CONFIG_GCOV_KERNEL is not set CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y # end of GCOV-based kernel profiling CONFIG_HAVE_GCC_PLUGINS=y # end of General architecture-dependent options CONFIG_RT_MUTEXES=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set CONFIG_MODVERSIONS=y CONFIG_ASM_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y # CONFIG_MODULE_SIG is not set # CONFIG_MODULE_COMPRESS is not set # CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_TRIM_UNUSED_KSYMS is not set CONFIG_MODULES_TREE_LOOKUP=y CONFIG_BLOCK=y CONFIG_BLK_SCSI_REQUEST=y CONFIG_BLK_CGROUP_RWSTAT=y CONFIG_BLK_DEV_BSG=y CONFIG_BLK_DEV_BSGLIB=y # CONFIG_BLK_DEV_INTEGRITY is not set # CONFIG_BLK_DEV_ZONED is not set CONFIG_BLK_DEV_THROTTLING=y # CONFIG_BLK_DEV_THROTTLING_LOW is not set # CONFIG_BLK_CMDLINE_PARSER is not set # CONFIG_BLK_WBT is not set CONFIG_BLK_CGROUP_IOLATENCY=y # CONFIG_BLK_CGROUP_IOCOST is not set CONFIG_BLK_DEBUG_FS=y # CONFIG_BLK_SED_OPAL is not set # CONFIG_BLK_INLINE_ENCRYPTION is not set # # Partition Types # CONFIG_PARTITION_ADVANCED=y # CONFIG_ACORN_PARTITION is not set # CONFIG_AIX_PARTITION is not set CONFIG_OSF_PARTITION=y CONFIG_AMIGA_PARTITION=y # CONFIG_ATARI_PARTITION is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y CONFIG_BSD_DISKLABEL=y CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_LDM_PARTITION is not set CONFIG_SGI_PARTITION=y # CONFIG_ULTRIX_PARTITION is not set CONFIG_SUN_PARTITION=y CONFIG_KARMA_PARTITION=y CONFIG_EFI_PARTITION=y # CONFIG_SYSV68_PARTITION is not set # CONFIG_CMDLINE_PARTITION is not set # end of Partition Types CONFIG_BLK_MQ_PCI=y CONFIG_BLK_MQ_VIRTIO=y # # IO Schedulers # CONFIG_MQ_IOSCHED_DEADLINE=y CONFIG_MQ_IOSCHED_KYBER=y # CONFIG_IOSCHED_BFQ is not set # end of IO Schedulers CONFIG_ASN1=y CONFIG_UNINLINE_SPIN_UNLOCK=y CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y CONFIG_MUTEX_SPIN_ON_OWNER=y CONFIG_RWSEM_SPIN_ON_OWNER=y CONFIG_LOCK_SPIN_ON_OWNER=y CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y CONFIG_QUEUED_SPINLOCKS=y CONFIG_ARCH_USE_QUEUED_RWLOCKS=y CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y CONFIG_FREEZER=y # # Executable file formats # CONFIG_BINFMT_ELF=y CONFIG_ELFCORE=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=y CONFIG_COREDUMP=y # end of Executable file formats # # Memory Management options # CONFIG_SELECT_MEMORY_MODEL=y CONFIG_SPARSEMEM_MANUAL=y CONFIG_SPARSEMEM=y CONFIG_NEED_MULTIPLE_NODES=y CONFIG_SPARSEMEM_EXTREME=y CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y CONFIG_SPARSEMEM_VMEMMAP=y CONFIG_HAVE_FAST_GUP=y CONFIG_MEMORY_ISOLATION=y # CONFIG_MEMORY_HOTPLUG is not set CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_MEMORY_BALLOON=y CONFIG_BALLOON_COMPACTION=y CONFIG_COMPACTION=y CONFIG_PAGE_REPORTING=y CONFIG_MIGRATION=y CONFIG_CONTIG_ALLOC=y CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y CONFIG_KSM=y CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y CONFIG_MEMORY_FAILURE=y CONFIG_HWPOISON_INJECT=y CONFIG_TRANSPARENT_HUGEPAGE=y # CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y CONFIG_ARCH_WANTS_THP_SWAP=y CONFIG_THP_SWAP=y # CONFIG_CLEANCACHE is not set # CONFIG_FRONTSWAP is not set CONFIG_CMA=y # CONFIG_CMA_DEBUG is not set # CONFIG_CMA_DEBUGFS is not set CONFIG_CMA_AREAS=7 # CONFIG_ZPOOL is not set # CONFIG_ZBUD is not set # CONFIG_ZSMALLOC is not set CONFIG_GENERIC_EARLY_IOREMAP=y # CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set # CONFIG_IDLE_PAGE_TRACKING is not set CONFIG_ARCH_HAS_PTE_DEVMAP=y # CONFIG_PERCPU_STATS is not set # CONFIG_GUP_BENCHMARK is not set # CONFIG_READ_ONLY_THP_FOR_FS is not set CONFIG_ARCH_HAS_PTE_SPECIAL=y # end of Memory Management options CONFIG_NET=y CONFIG_NET_INGRESS=y CONFIG_NET_EGRESS=y CONFIG_SKB_EXTENSIONS=y # # Networking options # CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set CONFIG_TLS=y # CONFIG_TLS_DEVICE is not set # CONFIG_TLS_TOE is not set CONFIG_XFRM=y CONFIG_XFRM_ALGO=y CONFIG_XFRM_USER=y # CONFIG_XFRM_INTERFACE is not set CONFIG_XFRM_SUB_POLICY=y # CONFIG_XFRM_MIGRATE is not set # CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_XDP_SOCKETS=y CONFIG_XDP_SOCKETS_DIAG=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y # CONFIG_IP_FIB_TRIE_STATS is not set CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y # CONFIG_IP_PNP is not set CONFIG_NET_IPIP=y CONFIG_NET_IPGRE_DEMUX=y CONFIG_NET_IP_TUNNEL=y CONFIG_NET_IPGRE=y CONFIG_NET_IPGRE_BROADCAST=y CONFIG_IP_MROUTE_COMMON=y CONFIG_IP_MROUTE=y # CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y CONFIG_SYN_COOKIES=y # CONFIG_NET_IPVTI is not set CONFIG_NET_UDP_TUNNEL=y # CONFIG_NET_FOU is not set # CONFIG_NET_FOU_IP_TUNNELS is not set # CONFIG_INET_AH is not set # CONFIG_INET_ESP is not set # CONFIG_INET_IPCOMP is not set CONFIG_INET_TUNNEL=y CONFIG_INET_DIAG=y CONFIG_INET_TCP_DIAG=y # CONFIG_INET_UDP_DIAG is not set # CONFIG_INET_RAW_DIAG is not set # CONFIG_INET_DIAG_DESTROY is not set CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=y CONFIG_TCP_CONG_WESTWOOD=m CONFIG_TCP_CONG_HTCP=m # CONFIG_TCP_CONG_HSTCP is not set # CONFIG_TCP_CONG_HYBLA is not set # CONFIG_TCP_CONG_VEGAS is not set # CONFIG_TCP_CONG_NV is not set # CONFIG_TCP_CONG_SCALABLE is not set # CONFIG_TCP_CONG_LP is not set # CONFIG_TCP_CONG_VENO is not set # CONFIG_TCP_CONG_YEAH is not set # CONFIG_TCP_CONG_ILLINOIS is not set # CONFIG_TCP_CONG_DCTCP is not set # CONFIG_TCP_CONG_CDG is not set # CONFIG_TCP_CONG_BBR is not set # CONFIG_DEFAULT_CUBIC is not set CONFIG_DEFAULT_RENO=y CONFIG_DEFAULT_TCP_CONG="reno" CONFIG_TCP_MD5SIG=y CONFIG_IPV6=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y # CONFIG_IPV6_OPTIMISTIC_DAD is not set # CONFIG_INET6_AH is not set # CONFIG_INET6_ESP is not set # CONFIG_INET6_IPCOMP is not set CONFIG_IPV6_MIP6=y # CONFIG_IPV6_ILA is not set CONFIG_INET6_TUNNEL=y # CONFIG_IPV6_VTI is not set CONFIG_IPV6_SIT=y # CONFIG_IPV6_SIT_6RD is not set CONFIG_IPV6_NDISC_NODETYPE=y CONFIG_IPV6_TUNNEL=y CONFIG_IPV6_GRE=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y # CONFIG_IPV6_MROUTE is not set CONFIG_IPV6_SEG6_LWTUNNEL=y # CONFIG_IPV6_SEG6_HMAC is not set CONFIG_IPV6_SEG6_BPF=y # CONFIG_IPV6_RPL_LWTUNNEL is not set CONFIG_NETLABEL=y # CONFIG_MPTCP is not set CONFIG_NETWORK_SECMARK=y CONFIG_NET_PTP_CLASSIFY=y # CONFIG_NETWORK_PHY_TIMESTAMPING is not set CONFIG_NETFILTER=y CONFIG_NETFILTER_ADVANCED=y # # Core Netfilter Configuration # CONFIG_NETFILTER_INGRESS=y CONFIG_NETFILTER_NETLINK=y # CONFIG_NETFILTER_NETLINK_ACCT is not set CONFIG_NETFILTER_NETLINK_QUEUE=y CONFIG_NETFILTER_NETLINK_LOG=y # CONFIG_NETFILTER_NETLINK_OSF is not set # CONFIG_NF_CONNTRACK is not set # CONFIG_NF_LOG_NETDEV is not set # CONFIG_NF_TABLES is not set CONFIG_NETFILTER_XTABLES=y # # Xtables combined modules # # CONFIG_NETFILTER_XT_MARK is not set # # Xtables targets # # CONFIG_NETFILTER_XT_TARGET_AUDIT is not set # CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set # CONFIG_NETFILTER_XT_TARGET_HMARK is not set # CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set # CONFIG_NETFILTER_XT_TARGET_LOG is not set # CONFIG_NETFILTER_XT_TARGET_MARK is not set # CONFIG_NETFILTER_XT_TARGET_NFLOG is not set # CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set # CONFIG_NETFILTER_XT_TARGET_RATEEST is not set # CONFIG_NETFILTER_XT_TARGET_TEE is not set # CONFIG_NETFILTER_XT_TARGET_SECMARK is not set # CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set # # Xtables matches # # CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set CONFIG_NETFILTER_XT_MATCH_BPF=y # CONFIG_NETFILTER_XT_MATCH_CGROUP is not set # CONFIG_NETFILTER_XT_MATCH_COMMENT is not set # CONFIG_NETFILTER_XT_MATCH_CPU is not set # CONFIG_NETFILTER_XT_MATCH_DCCP is not set # CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set # CONFIG_NETFILTER_XT_MATCH_DSCP is not set # CONFIG_NETFILTER_XT_MATCH_ECN is not set # CONFIG_NETFILTER_XT_MATCH_ESP is not set # CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set # CONFIG_NETFILTER_XT_MATCH_HL is not set # CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set # CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set # CONFIG_NETFILTER_XT_MATCH_L2TP is not set # CONFIG_NETFILTER_XT_MATCH_LENGTH is not set # CONFIG_NETFILTER_XT_MATCH_LIMIT is not set # CONFIG_NETFILTER_XT_MATCH_MAC is not set # CONFIG_NETFILTER_XT_MATCH_MARK is not set # CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set # CONFIG_NETFILTER_XT_MATCH_NFACCT is not set # CONFIG_NETFILTER_XT_MATCH_OSF is not set # CONFIG_NETFILTER_XT_MATCH_OWNER is not set # CONFIG_NETFILTER_XT_MATCH_POLICY is not set # CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set # CONFIG_NETFILTER_XT_MATCH_QUOTA is not set # CONFIG_NETFILTER_XT_MATCH_RATEEST is not set # CONFIG_NETFILTER_XT_MATCH_REALM is not set # CONFIG_NETFILTER_XT_MATCH_RECENT is not set # CONFIG_NETFILTER_XT_MATCH_SCTP is not set # CONFIG_NETFILTER_XT_MATCH_SOCKET is not set CONFIG_NETFILTER_XT_MATCH_STATISTIC=y # CONFIG_NETFILTER_XT_MATCH_STRING is not set # CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set # CONFIG_NETFILTER_XT_MATCH_TIME is not set # CONFIG_NETFILTER_XT_MATCH_U32 is not set # end of Core Netfilter Configuration # CONFIG_IP_SET is not set # CONFIG_IP_VS is not set # # IP: Netfilter Configuration # # CONFIG_NF_SOCKET_IPV4 is not set # CONFIG_NF_TPROXY_IPV4 is not set # CONFIG_NF_DUP_IPV4 is not set # CONFIG_NF_LOG_ARP is not set # CONFIG_NF_LOG_IPV4 is not set # CONFIG_NF_REJECT_IPV4 is not set CONFIG_IP_NF_IPTABLES=y # CONFIG_IP_NF_MATCH_AH is not set # CONFIG_IP_NF_MATCH_ECN is not set # CONFIG_IP_NF_MATCH_TTL is not set # CONFIG_IP_NF_FILTER is not set # CONFIG_IP_NF_MANGLE is not set # CONFIG_IP_NF_RAW is not set # CONFIG_IP_NF_SECURITY is not set # CONFIG_IP_NF_ARPTABLES is not set # end of IP: Netfilter Configuration # # IPv6: Netfilter Configuration # # CONFIG_NF_SOCKET_IPV6 is not set # CONFIG_NF_TPROXY_IPV6 is not set # CONFIG_NF_DUP_IPV6 is not set # CONFIG_NF_REJECT_IPV6 is not set # CONFIG_NF_LOG_IPV6 is not set CONFIG_IP6_NF_IPTABLES=y # CONFIG_IP6_NF_MATCH_AH is not set # CONFIG_IP6_NF_MATCH_EUI64 is not set # CONFIG_IP6_NF_MATCH_FRAG is not set # CONFIG_IP6_NF_MATCH_OPTS is not set # CONFIG_IP6_NF_MATCH_HL is not set # CONFIG_IP6_NF_MATCH_IPV6HEADER is not set # CONFIG_IP6_NF_MATCH_MH is not set # CONFIG_IP6_NF_MATCH_RT is not set # CONFIG_IP6_NF_MATCH_SRH is not set # CONFIG_IP6_NF_FILTER is not set # CONFIG_IP6_NF_MANGLE is not set # CONFIG_IP6_NF_RAW is not set # CONFIG_IP6_NF_SECURITY is not set # end of IPv6: Netfilter Configuration CONFIG_BPFILTER=y CONFIG_BPFILTER_UMH=m # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set # CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_L2TP is not set # CONFIG_BRIDGE is not set CONFIG_HAVE_NET_DSA=y # CONFIG_NET_DSA is not set CONFIG_VLAN_8021Q=y # CONFIG_VLAN_8021Q_GVRP is not set # CONFIG_VLAN_8021Q_MVRP is not set # CONFIG_DECNET is not set # CONFIG_LLC2 is not set # CONFIG_ATALK is not set # CONFIG_X25 is not set # CONFIG_LAPB is not set # CONFIG_PHONET is not set # CONFIG_6LOWPAN is not set # CONFIG_IEEE802154 is not set CONFIG_NET_SCHED=y # # Queueing/Scheduling # # CONFIG_NET_SCH_CBQ is not set # CONFIG_NET_SCH_HTB is not set # CONFIG_NET_SCH_HFSC is not set # CONFIG_NET_SCH_PRIO is not set # CONFIG_NET_SCH_MULTIQ is not set # CONFIG_NET_SCH_RED is not set # CONFIG_NET_SCH_SFB is not set # CONFIG_NET_SCH_SFQ is not set # CONFIG_NET_SCH_TEQL is not set # CONFIG_NET_SCH_TBF is not set # CONFIG_NET_SCH_CBS is not set # CONFIG_NET_SCH_ETF is not set # CONFIG_NET_SCH_TAPRIO is not set # CONFIG_NET_SCH_GRED is not set # CONFIG_NET_SCH_DSMARK is not set # CONFIG_NET_SCH_NETEM is not set # CONFIG_NET_SCH_DRR is not set # CONFIG_NET_SCH_MQPRIO is not set # CONFIG_NET_SCH_SKBPRIO is not set # CONFIG_NET_SCH_CHOKE is not set # CONFIG_NET_SCH_QFQ is not set # CONFIG_NET_SCH_CODEL is not set CONFIG_NET_SCH_FQ_CODEL=y # CONFIG_NET_SCH_CAKE is not set # CONFIG_NET_SCH_FQ is not set # CONFIG_NET_SCH_HHF is not set # CONFIG_NET_SCH_PIE is not set CONFIG_NET_SCH_INGRESS=y # CONFIG_NET_SCH_PLUG is not set # CONFIG_NET_SCH_ETS is not set CONFIG_NET_SCH_DEFAULT=y CONFIG_DEFAULT_FQ_CODEL=y # CONFIG_DEFAULT_PFIFO_FAST is not set CONFIG_DEFAULT_NET_SCH="fq_codel" # # Classification # CONFIG_NET_CLS=y # CONFIG_NET_CLS_BASIC is not set # CONFIG_NET_CLS_TCINDEX is not set # CONFIG_NET_CLS_ROUTE4 is not set # CONFIG_NET_CLS_FW is not set # CONFIG_NET_CLS_U32 is not set # CONFIG_NET_CLS_RSVP is not set # CONFIG_NET_CLS_RSVP6 is not set # CONFIG_NET_CLS_FLOW is not set CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=y # CONFIG_NET_CLS_FLOWER is not set # CONFIG_NET_CLS_MATCHALL is not set CONFIG_NET_EMATCH=y CONFIG_NET_EMATCH_STACK=32 # CONFIG_NET_EMATCH_CMP is not set # CONFIG_NET_EMATCH_NBYTE is not set # CONFIG_NET_EMATCH_U32 is not set # CONFIG_NET_EMATCH_META is not set # CONFIG_NET_EMATCH_TEXT is not set # CONFIG_NET_EMATCH_IPT is not set CONFIG_NET_CLS_ACT=y # CONFIG_NET_ACT_POLICE is not set # CONFIG_NET_ACT_GACT is not set # CONFIG_NET_ACT_MIRRED is not set # CONFIG_NET_ACT_SAMPLE is not set # CONFIG_NET_ACT_IPT is not set # CONFIG_NET_ACT_NAT is not set # CONFIG_NET_ACT_PEDIT is not set # CONFIG_NET_ACT_SIMP is not set # CONFIG_NET_ACT_SKBEDIT is not set # CONFIG_NET_ACT_CSUM is not set # CONFIG_NET_ACT_MPLS is not set # CONFIG_NET_ACT_VLAN is not set CONFIG_NET_ACT_BPF=y # CONFIG_NET_ACT_SKBMOD is not set # CONFIG_NET_ACT_IFE is not set # CONFIG_NET_ACT_TUNNEL_KEY is not set # CONFIG_NET_ACT_GATE is not set CONFIG_NET_TC_SKB_EXT=y CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y CONFIG_DNS_RESOLVER=y # CONFIG_BATMAN_ADV is not set # CONFIG_OPENVSWITCH is not set # CONFIG_VSOCKETS is not set # CONFIG_NETLINK_DIAG is not set CONFIG_MPLS=y # CONFIG_NET_MPLS_GSO is not set # CONFIG_MPLS_ROUTING is not set # CONFIG_NET_NSH is not set # CONFIG_HSR is not set # CONFIG_NET_SWITCHDEV is not set # CONFIG_NET_L3_MASTER_DEV is not set # CONFIG_QRTR is not set # CONFIG_NET_NCSI is not set CONFIG_RPS=y CONFIG_RFS_ACCEL=y CONFIG_XPS=y # CONFIG_CGROUP_NET_PRIO is not set CONFIG_CGROUP_NET_CLASSID=y CONFIG_NET_RX_BUSY_POLL=y CONFIG_BQL=y CONFIG_BPF_JIT=y CONFIG_BPF_STREAM_PARSER=y CONFIG_NET_FLOW_LIMIT=y # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_NET_DROP_MONITOR is not set # end of Network testing # end of Networking options # CONFIG_HAMRADIO is not set # CONFIG_CAN is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set # CONFIG_AF_KCM is not set CONFIG_STREAM_PARSER=y CONFIG_FIB_RULES=y CONFIG_WIRELESS=y # CONFIG_CFG80211 is not set # # CFG80211 needs to be enabled for MAC80211 # CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 # CONFIG_WIMAX is not set # CONFIG_RFKILL is not set CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y # CONFIG_NET_9P_DEBUG is not set # CONFIG_CAIF is not set # CONFIG_CEPH_LIB is not set # CONFIG_NFC is not set # CONFIG_PSAMPLE is not set # CONFIG_NET_IFE is not set CONFIG_LWTUNNEL=y CONFIG_LWTUNNEL_BPF=y CONFIG_DST_CACHE=y CONFIG_GRO_CELLS=y CONFIG_NET_SOCK_MSG=y CONFIG_NET_DEVLINK=y CONFIG_FAILOVER=y CONFIG_ETHTOOL_NETLINK=y CONFIG_HAVE_EBPF_JIT=y # # Device Drivers # CONFIG_HAVE_EISA=y # CONFIG_EISA is not set CONFIG_HAVE_PCI=y CONFIG_PCI=y CONFIG_PCI_DOMAINS=y CONFIG_PCIEPORTBUS=y # CONFIG_PCIEAER is not set CONFIG_PCIEASPM=y CONFIG_PCIEASPM_DEFAULT=y # CONFIG_PCIEASPM_POWERSAVE is not set # CONFIG_PCIEASPM_POWER_SUPERSAVE is not set # CONFIG_PCIEASPM_PERFORMANCE is not set # CONFIG_PCIE_PTM is not set # CONFIG_PCIE_BW is not set CONFIG_PCI_MSI=y CONFIG_PCI_MSI_IRQ_DOMAIN=y CONFIG_PCI_QUIRKS=y # CONFIG_PCI_DEBUG is not set # CONFIG_PCI_REALLOC_ENABLE_AUTO is not set # CONFIG_PCI_STUB is not set # CONFIG_PCI_PF_STUB is not set CONFIG_PCI_ATS=y CONFIG_PCI_LOCKLESS_CONFIG=y CONFIG_PCI_IOV=y # CONFIG_PCI_PRI is not set # CONFIG_PCI_PASID is not set CONFIG_PCI_LABEL=y # CONFIG_HOTPLUG_PCI is not set # # PCI controller drivers # # CONFIG_VMD is not set # # DesignWare PCI Core Support # # CONFIG_PCIE_DW_PLAT_HOST is not set # CONFIG_PCI_MESON is not set # end of DesignWare PCI Core Support # # Mobiveil PCIe Core Support # # end of Mobiveil PCIe Core Support # # Cadence PCIe controllers support # # end of Cadence PCIe controllers support # end of PCI controller drivers # # PCI Endpoint # # CONFIG_PCI_ENDPOINT is not set # end of PCI Endpoint # # PCI switch controller drivers # # CONFIG_PCI_SW_SWITCHTEC is not set # end of PCI switch controller drivers # CONFIG_PCCARD is not set # CONFIG_RAPIDIO is not set # # Generic Driver Options # # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_STANDALONE=y # CONFIG_PREVENT_FIRMWARE_BUILD is not set # # Firmware loader # CONFIG_FW_LOADER=y CONFIG_FW_LOADER_PAGED_BUF=y CONFIG_EXTRA_FIRMWARE="" CONFIG_FW_LOADER_USER_HELPER=y # CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set # CONFIG_FW_LOADER_COMPRESS is not set # end of Firmware loader CONFIG_ALLOW_DEV_COREDUMP=y # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set # CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set # CONFIG_TEST_ASYNC_DRIVER_PROBE is not set CONFIG_GENERIC_CPU_AUTOPROBE=y CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set # end of Generic Driver Options # # Bus devices # # CONFIG_MHI_BUS is not set # end of Bus devices # CONFIG_CONNECTOR is not set # CONFIG_GNSS is not set # CONFIG_MTD is not set # CONFIG_OF is not set CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y # CONFIG_PARPORT is not set CONFIG_PNP=y # CONFIG_PNP_DEBUG_MESSAGES is not set # # Protocols # CONFIG_PNPACPI=y CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set # CONFIG_BLK_DEV_FD is not set # CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set # CONFIG_BLK_DEV_UMEM is not set CONFIG_BLK_DEV_LOOP=y # CONFIG_BLK_DEV_DRBD is not set # CONFIG_BLK_DEV_NBD is not set # CONFIG_BLK_DEV_SKD is not set # CONFIG_BLK_DEV_SX8 is not set CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=16384 # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set CONFIG_VIRTIO_BLK=y # CONFIG_BLK_DEV_RBD is not set # CONFIG_BLK_DEV_RSXX is not set # # NVME Support # # CONFIG_BLK_DEV_NVME is not set # CONFIG_NVME_FC is not set # end of NVME Support # # Misc devices # # CONFIG_DUMMY_IRQ is not set # CONFIG_IBM_ASM is not set # CONFIG_PHANTOM is not set # CONFIG_TIFM_CORE is not set # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_HP_ILO is not set # CONFIG_SRAM is not set # CONFIG_PCI_ENDPOINT_TEST is not set # CONFIG_XILINX_SDFEC is not set # CONFIG_PVPANIC is not set # CONFIG_C2PORT is not set # # EEPROM support # # CONFIG_EEPROM_93CX6 is not set # end of EEPROM support # CONFIG_CB710_CORE is not set # # Texas Instruments shared transport line discipline # # end of Texas Instruments shared transport line discipline # # Altera FPGA firmware download module (requires I2C) # # CONFIG_INTEL_MEI is not set # CONFIG_INTEL_MEI_ME is not set # CONFIG_INTEL_MEI_TXE is not set # CONFIG_VMWARE_VMCI is not set # # Intel MIC & related support # # CONFIG_INTEL_MIC_BUS is not set # CONFIG_SCIF_BUS is not set # CONFIG_VOP_BUS is not set # end of Intel MIC & related support # CONFIG_GENWQE is not set # CONFIG_ECHO is not set # CONFIG_MISC_ALCOR_PCI is not set # CONFIG_MISC_RTSX_PCI is not set # CONFIG_HABANA_AI is not set # end of Misc devices CONFIG_HAVE_IDE=y # CONFIG_IDE is not set # # SCSI device support # CONFIG_SCSI_MOD=y # CONFIG_RAID_ATTRS is not set # CONFIG_SCSI is not set # end of SCSI device support # CONFIG_ATA is not set # CONFIG_MD is not set # CONFIG_TARGET_CORE is not set # CONFIG_FUSION is not set # # IEEE 1394 (FireWire) support # # CONFIG_FIREWIRE is not set # CONFIG_FIREWIRE_NOSY is not set # end of IEEE 1394 (FireWire) support # CONFIG_MACINTOSH_DRIVERS is not set CONFIG_NETDEVICES=y CONFIG_NET_CORE=y # CONFIG_BONDING is not set # CONFIG_DUMMY is not set # CONFIG_WIREGUARD is not set # CONFIG_EQUALIZER is not set # CONFIG_IFB is not set # CONFIG_NET_TEAM is not set # CONFIG_MACVLAN is not set # CONFIG_IPVLAN is not set CONFIG_VXLAN=y # CONFIG_GENEVE is not set # CONFIG_BAREUDP is not set # CONFIG_GTP is not set # CONFIG_MACSEC is not set # CONFIG_NETCONSOLE is not set CONFIG_TUN=y # CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=y CONFIG_VIRTIO_NET=y # CONFIG_NLMON is not set # CONFIG_ARCNET is not set # # Distributed Switch Architecture drivers # # end of Distributed Switch Architecture drivers # CONFIG_ETHERNET is not set # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_NET_SB1000 is not set # CONFIG_MDIO_DEVICE is not set # CONFIG_PHYLIB is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set # # Host-side USB support is needed for USB Network Adapter support # # CONFIG_WLAN is not set # # Enable WiMAX (Networking options) to see the WiMAX drivers # # CONFIG_WAN is not set # CONFIG_VMXNET3 is not set # CONFIG_FUJITSU_ES is not set CONFIG_NETDEVSIM=y CONFIG_NET_FAILOVER=y # CONFIG_ISDN is not set # CONFIG_NVM is not set # # Input device support # CONFIG_INPUT=y CONFIG_INPUT_FF_MEMLESS=y # CONFIG_INPUT_POLLDEV is not set # CONFIG_INPUT_SPARSEKMAP is not set # CONFIG_INPUT_MATRIXKMAP is not set # # Userland interfaces # # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_JOYDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_EVBUG is not set # # Input Device Drivers # # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_JOYSTICK is not set # CONFIG_INPUT_TABLET is not set # CONFIG_INPUT_TOUCHSCREEN is not set # CONFIG_INPUT_MISC is not set # CONFIG_RMI4_CORE is not set # # Hardware I/O ports # CONFIG_SERIO=y CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y CONFIG_SERIO_I8042=y CONFIG_SERIO_SERPORT=y # CONFIG_SERIO_CT82C710 is not set # CONFIG_SERIO_PCIPS2 is not set CONFIG_SERIO_LIBPS2=y # CONFIG_SERIO_RAW is not set # CONFIG_SERIO_ALTERA_PS2 is not set # CONFIG_SERIO_PS2MULT is not set # CONFIG_SERIO_ARC_PS2 is not set # CONFIG_USERIO is not set # CONFIG_GAMEPORT is not set # end of Hardware I/O ports # end of Input device support # # Character devices # CONFIG_TTY=y CONFIG_VT=y CONFIG_CONSOLE_TRANSLATIONS=y CONFIG_VT_CONSOLE=y CONFIG_HW_CONSOLE=y CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_UNIX98_PTYS=y # CONFIG_LEGACY_PTYS is not set CONFIG_LDISC_AUTOLOAD=y # # Serial drivers # CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y CONFIG_SERIAL_8250_PNP=y # CONFIG_SERIAL_8250_16550A_VARIANTS is not set # CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_PCI=y CONFIG_SERIAL_8250_EXAR=y CONFIG_SERIAL_8250_NR_UARTS=32 CONFIG_SERIAL_8250_RUNTIME_UARTS=4 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y # CONFIG_SERIAL_8250_DW is not set # CONFIG_SERIAL_8250_RT288X is not set # CONFIG_SERIAL_8250_LPSS is not set # CONFIG_SERIAL_8250_MID is not set # # Non-8250 serial port support # # CONFIG_SERIAL_UARTLITE is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set # CONFIG_SERIAL_LANTIQ is not set # CONFIG_SERIAL_SCCNXP is not set # CONFIG_SERIAL_ALTERA_JTAGUART is not set # CONFIG_SERIAL_ALTERA_UART is not set # CONFIG_SERIAL_ARC is not set # CONFIG_SERIAL_RP2 is not set # CONFIG_SERIAL_FSL_LPUART is not set # CONFIG_SERIAL_FSL_LINFLEXUART is not set # CONFIG_SERIAL_SPRD is not set # end of Serial drivers CONFIG_SERIAL_NONSTANDARD=y # CONFIG_ROCKETPORT is not set # CONFIG_CYCLADES is not set # CONFIG_MOXA_INTELLIO is not set # CONFIG_MOXA_SMARTIO is not set # CONFIG_SYNCLINK is not set # CONFIG_SYNCLINKMP is not set # CONFIG_SYNCLINK_GT is not set # CONFIG_ISI is not set # CONFIG_N_HDLC is not set # CONFIG_N_GSM is not set # CONFIG_NOZOMI is not set # CONFIG_NULL_TTY is not set # CONFIG_TRACE_SINK is not set CONFIG_HVC_DRIVER=y # CONFIG_SERIAL_DEV_BUS is not set # CONFIG_TTY_PRINTK is not set CONFIG_VIRTIO_CONSOLE=y # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_APPLICOM is not set # CONFIG_MWAVE is not set CONFIG_DEVMEM=y CONFIG_DEVKMEM=y # CONFIG_NVRAM is not set # CONFIG_RAW_DRIVER is not set CONFIG_DEVPORT=y CONFIG_HPET=y # CONFIG_HPET_MMAP is not set # CONFIG_HANGCHECK_TIMER is not set CONFIG_TCG_TPM=y CONFIG_TCG_TIS_CORE=y CONFIG_TCG_TIS=y # CONFIG_TCG_NSC is not set # CONFIG_TCG_ATMEL is not set # CONFIG_TCG_INFINEON is not set CONFIG_TCG_CRB=y # CONFIG_TCG_VTPM_PROXY is not set # CONFIG_TELCLOCK is not set # CONFIG_XILLYBUS is not set # end of Character devices # CONFIG_RANDOM_TRUST_CPU is not set # CONFIG_RANDOM_TRUST_BOOTLOADER is not set # # I2C support # # CONFIG_I2C is not set # end of I2C support # CONFIG_I3C is not set # CONFIG_SPI is not set # CONFIG_SPMI is not set # CONFIG_HSI is not set CONFIG_PPS=y # CONFIG_PPS_DEBUG is not set # # PPS clients support # # CONFIG_PPS_CLIENT_KTIMER is not set # CONFIG_PPS_CLIENT_LDISC is not set # CONFIG_PPS_CLIENT_GPIO is not set # # PPS generators support # # # PTP clock support # CONFIG_PTP_1588_CLOCK=y # # Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. # # end of PTP clock support # CONFIG_PINCTRL is not set # CONFIG_GPIOLIB is not set # CONFIG_W1 is not set # CONFIG_POWER_AVS is not set # CONFIG_POWER_RESET is not set CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set # CONFIG_PDA_POWER is not set # CONFIG_TEST_POWER is not set # CONFIG_BATTERY_DS2780 is not set # CONFIG_BATTERY_DS2781 is not set # CONFIG_BATTERY_BQ27XXX is not set # CONFIG_CHARGER_MAX8903 is not set # CONFIG_HWMON is not set CONFIG_THERMAL=y # CONFIG_THERMAL_NETLINK is not set # CONFIG_THERMAL_STATISTICS is not set CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 CONFIG_THERMAL_WRITABLE_TRIPS=y CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y # CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set # CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set # CONFIG_THERMAL_GOV_FAIR_SHARE is not set CONFIG_THERMAL_GOV_STEP_WISE=y # CONFIG_THERMAL_GOV_BANG_BANG is not set CONFIG_THERMAL_GOV_USER_SPACE=y # CONFIG_THERMAL_EMULATION is not set # # Intel thermal drivers # CONFIG_INTEL_POWERCLAMP=y CONFIG_X86_PKG_TEMP_THERMAL=m # CONFIG_INTEL_SOC_DTS_THERMAL is not set # # ACPI INT340X thermal drivers # # CONFIG_INT340X_THERMAL is not set # end of ACPI INT340X thermal drivers # CONFIG_INTEL_PCH_THERMAL is not set # end of Intel thermal drivers # CONFIG_WATCHDOG is not set CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set CONFIG_BCMA_POSSIBLE=y # CONFIG_BCMA is not set # # Multifunction device drivers # # CONFIG_MFD_MADERA is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set # CONFIG_LPC_ICH is not set # CONFIG_LPC_SCH is not set # CONFIG_MFD_INTEL_LPSS_ACPI is not set # CONFIG_MFD_INTEL_LPSS_PCI is not set # CONFIG_MFD_JANZ_CMODIO is not set # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_MT6397 is not set # CONFIG_MFD_RDC321X is not set # CONFIG_MFD_SM501 is not set # CONFIG_ABX500_CORE is not set # CONFIG_MFD_SYSCON is not set # CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_TQMX86 is not set # CONFIG_MFD_VX855 is not set # end of Multifunction device drivers # CONFIG_REGULATOR is not set # CONFIG_RC_CORE is not set # CONFIG_MEDIA_CEC_SUPPORT is not set # CONFIG_MEDIA_SUPPORT is not set # # Graphics support # CONFIG_AGP=y CONFIG_AGP_AMD64=y CONFIG_AGP_INTEL=y CONFIG_AGP_SIS=y CONFIG_AGP_VIA=y CONFIG_INTEL_GTT=y CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=16 # CONFIG_VGA_SWITCHEROO is not set # CONFIG_DRM is not set # # ARM devices # # end of ARM devices # # Frame buffer Devices # CONFIG_FB_CMDLINE=y CONFIG_FB_NOTIFY=y CONFIG_FB=y # CONFIG_FIRMWARE_EDID is not set CONFIG_FB_BOOT_VESA_SUPPORT=y CONFIG_FB_CFB_FILLRECT=y CONFIG_FB_CFB_COPYAREA=y CONFIG_FB_CFB_IMAGEBLIT=y # CONFIG_FB_FOREIGN_ENDIAN is not set CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y # # Frame buffer hardware drivers # # CONFIG_FB_CIRRUS is not set # CONFIG_FB_PM2 is not set # CONFIG_FB_CYBER2000 is not set # CONFIG_FB_ARC is not set # CONFIG_FB_ASILIANT is not set # CONFIG_FB_IMSTT is not set # CONFIG_FB_VGA16 is not set CONFIG_FB_VESA=y # CONFIG_FB_EFI is not set # CONFIG_FB_N411 is not set # CONFIG_FB_HGA is not set # CONFIG_FB_OPENCORES is not set # CONFIG_FB_S1D13XXX is not set # CONFIG_FB_NVIDIA is not set # CONFIG_FB_RIVA is not set # CONFIG_FB_I740 is not set # CONFIG_FB_LE80578 is not set # CONFIG_FB_INTEL is not set # CONFIG_FB_MATROX is not set # CONFIG_FB_RADEON is not set # CONFIG_FB_ATY128 is not set # CONFIG_FB_ATY is not set # CONFIG_FB_S3 is not set # CONFIG_FB_SAVAGE is not set # CONFIG_FB_SIS is not set # CONFIG_FB_NEOMAGIC is not set # CONFIG_FB_KYRO is not set # CONFIG_FB_3DFX is not set # CONFIG_FB_VOODOO1 is not set # CONFIG_FB_VT8623 is not set # CONFIG_FB_TRIDENT is not set # CONFIG_FB_ARK is not set # CONFIG_FB_PM3 is not set # CONFIG_FB_CARMINE is not set # CONFIG_FB_IBM_GXT4500 is not set # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set # CONFIG_FB_SIMPLE is not set # CONFIG_FB_SM712 is not set # end of Frame buffer Devices # # Backlight & LCD device support # # CONFIG_LCD_CLASS_DEVICE is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_BACKLIGHT_APPLE is not set # CONFIG_BACKLIGHT_QCOM_WLED is not set # CONFIG_BACKLIGHT_SAHARA is not set # end of Backlight & LCD device support # # Console display driver support # CONFIG_VGA_CONSOLE=y CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 # CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set CONFIG_DUMMY_CONSOLE=y CONFIG_DUMMY_CONSOLE_COLUMNS=80 CONFIG_DUMMY_CONSOLE_ROWS=25 CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y # CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set # end of Console display driver support CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_LOGO_LINUX_CLUT224=y # end of Graphics support # CONFIG_SOUND is not set # # HID support # CONFIG_HID=y # CONFIG_HID_BATTERY_STRENGTH is not set # CONFIG_HIDRAW is not set # CONFIG_UHID is not set CONFIG_HID_GENERIC=y # # Special HID drivers # CONFIG_HID_A4TECH=y # CONFIG_HID_ACRUX is not set CONFIG_HID_APPLE=y # CONFIG_HID_AUREAL is not set CONFIG_HID_BELKIN=y CONFIG_HID_CHERRY=y CONFIG_HID_CHICONY=y # CONFIG_HID_COUGAR is not set # CONFIG_HID_MACALLY is not set # CONFIG_HID_CMEDIA is not set CONFIG_HID_CYPRESS=y CONFIG_HID_DRAGONRISE=y # CONFIG_DRAGONRISE_FF is not set # CONFIG_HID_EMS_FF is not set # CONFIG_HID_ELECOM is not set CONFIG_HID_EZKEY=y # CONFIG_HID_GEMBIRD is not set # CONFIG_HID_GFRM is not set # CONFIG_HID_GLORIOUS is not set # CONFIG_HID_KEYTOUCH is not set CONFIG_HID_KYE=y # CONFIG_HID_WALTOP is not set # CONFIG_HID_VIEWSONIC is not set CONFIG_HID_GYRATION=y # CONFIG_HID_ICADE is not set # CONFIG_HID_ITE is not set # CONFIG_HID_JABRA is not set CONFIG_HID_TWINHAN=y CONFIG_HID_KENSINGTON=y # CONFIG_HID_LCPOWER is not set # CONFIG_HID_LENOVO is not set # CONFIG_HID_MAGICMOUSE is not set # CONFIG_HID_MALTRON is not set # CONFIG_HID_MAYFLASH is not set # CONFIG_HID_REDRAGON is not set CONFIG_HID_MICROSOFT=y CONFIG_HID_MONTEREY=y # CONFIG_HID_MULTITOUCH is not set # CONFIG_HID_NTI is not set # CONFIG_HID_ORTEK is not set CONFIG_HID_PANTHERLORD=y # CONFIG_PANTHERLORD_FF is not set CONFIG_HID_PETALYNX=y # CONFIG_HID_PICOLCD is not set # CONFIG_HID_PLANTRONICS is not set # CONFIG_HID_PRIMAX is not set # CONFIG_HID_SAITEK is not set CONFIG_HID_SAMSUNG=y # CONFIG_HID_SPEEDLINK is not set # CONFIG_HID_STEAM is not set # CONFIG_HID_STEELSERIES is not set CONFIG_HID_SUNPLUS=y # CONFIG_HID_RMI is not set CONFIG_HID_GREENASIA=y # CONFIG_GREENASIA_FF is not set CONFIG_HID_SMARTJOYPLUS=y # CONFIG_SMARTJOYPLUS_FF is not set # CONFIG_HID_TIVO is not set CONFIG_HID_TOPSEED=y CONFIG_HID_THRUSTMASTER=y CONFIG_THRUSTMASTER_FF=y # CONFIG_HID_UDRAW_PS3 is not set # CONFIG_HID_XINMO is not set CONFIG_HID_ZEROPLUS=y CONFIG_ZEROPLUS_FF=y # CONFIG_HID_ZYDACRON is not set # CONFIG_HID_SENSOR_HUB is not set # CONFIG_HID_ALPS is not set # end of Special HID drivers # # Intel ISH HID support # # CONFIG_INTEL_ISH_HID is not set # end of Intel ISH HID support # end of HID support CONFIG_USB_OHCI_LITTLE_ENDIAN=y # CONFIG_USB_SUPPORT is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set CONFIG_EDAC_ATOMIC_SCRUB=y CONFIG_EDAC_SUPPORT=y # CONFIG_EDAC is not set CONFIG_RTC_LIB=y CONFIG_RTC_MC146818_LIB=y # CONFIG_RTC_CLASS is not set # CONFIG_DMADEVICES is not set # # DMABUF options # CONFIG_SYNC_FILE=y # CONFIG_SW_SYNC is not set # CONFIG_UDMABUF is not set # CONFIG_DMABUF_MOVE_NOTIFY is not set # CONFIG_DMABUF_SELFTESTS is not set # CONFIG_DMABUF_HEAPS is not set # end of DMABUF options # CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set CONFIG_VIRT_DRIVERS=y # CONFIG_VBOXGUEST is not set CONFIG_VIRTIO=y CONFIG_VIRTIO_MENU=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_PCI_LEGACY=y CONFIG_VIRTIO_BALLOON=y # CONFIG_VIRTIO_INPUT is not set # CONFIG_VIRTIO_MMIO is not set # CONFIG_VDPA is not set CONFIG_VHOST_MENU=y # CONFIG_VHOST_NET is not set # CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set # # Microsoft Hyper-V guest support # # end of Microsoft Hyper-V guest support # CONFIG_GREYBUS is not set # CONFIG_STAGING is not set # CONFIG_X86_PLATFORM_DEVICES is not set CONFIG_PMC_ATOM=y # CONFIG_MFD_CROS_EC is not set # CONFIG_CHROME_PLATFORMS is not set # CONFIG_MELLANOX_PLATFORM is not set CONFIG_HAVE_CLK=y CONFIG_CLKDEV_LOOKUP=y CONFIG_HAVE_CLK_PREPARE=y CONFIG_COMMON_CLK=y # CONFIG_HWSPINLOCK is not set # # Clock Source drivers # CONFIG_CLKEVT_I8253=y CONFIG_I8253_LOCK=y CONFIG_CLKBLD_I8253=y # end of Clock Source drivers CONFIG_MAILBOX=y CONFIG_PCC=y # CONFIG_ALTERA_MBOX is not set # CONFIG_IOMMU_SUPPORT is not set # # Remoteproc drivers # # CONFIG_REMOTEPROC is not set # end of Remoteproc drivers # # Rpmsg drivers # # CONFIG_RPMSG_QCOM_GLINK_RPM is not set # CONFIG_RPMSG_VIRTIO is not set # end of Rpmsg drivers # CONFIG_SOUNDWIRE is not set # # SOC (System On Chip) specific Drivers # # # Amlogic SoC drivers # # end of Amlogic SoC drivers # # Aspeed SoC drivers # # end of Aspeed SoC drivers # # Broadcom SoC drivers # # end of Broadcom SoC drivers # # NXP/Freescale QorIQ SoC drivers # # end of NXP/Freescale QorIQ SoC drivers # # i.MX SoC drivers # # end of i.MX SoC drivers # # Qualcomm SoC drivers # # end of Qualcomm SoC drivers # CONFIG_SOC_TI is not set # # Xilinx SoC drivers # # CONFIG_XILINX_VCU is not set # end of Xilinx SoC drivers # end of SOC (System On Chip) specific Drivers # CONFIG_PM_DEVFREQ is not set # CONFIG_EXTCON is not set # CONFIG_MEMORY is not set # CONFIG_IIO is not set # CONFIG_NTB is not set # CONFIG_VME_BUS is not set # CONFIG_PWM is not set # # IRQ chip support # # end of IRQ chip support # CONFIG_IPACK_BUS is not set # CONFIG_RESET_CONTROLLER is not set # # PHY Subsystem # CONFIG_GENERIC_PHY=y # CONFIG_BCM_KONA_USB2_PHY is not set # CONFIG_PHY_PXA_28NM_HSIC is not set # CONFIG_PHY_PXA_28NM_USB2 is not set # CONFIG_PHY_INTEL_EMMC is not set # end of PHY Subsystem # CONFIG_POWERCAP is not set # CONFIG_MCB is not set # # Performance monitor support # # end of Performance monitor support CONFIG_RAS=y # CONFIG_RAS_CEC is not set # CONFIG_USB4 is not set # # Android # # CONFIG_ANDROID is not set # end of Android # CONFIG_LIBNVDIMM is not set # CONFIG_DAX is not set CONFIG_NVMEM=y # CONFIG_NVMEM_SYSFS is not set # # HW tracing support # # CONFIG_STM is not set # CONFIG_INTEL_TH is not set # end of HW tracing support # CONFIG_FPGA is not set # CONFIG_TEE is not set # CONFIG_UNISYS_VISORBUS is not set # CONFIG_SIOX is not set # CONFIG_SLIMBUS is not set # CONFIG_INTERCONNECT is not set # CONFIG_COUNTER is not set # end of Device Drivers # # File systems # CONFIG_DCACHE_WORD_ACCESS=y CONFIG_VALIDATE_FS_PARSER=y CONFIG_FS_IOMAP=y # CONFIG_EXT2_FS is not set # CONFIG_EXT3_FS is not set CONFIG_EXT4_FS=y CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y # CONFIG_EXT4_DEBUG is not set CONFIG_JBD2=y # CONFIG_JBD2_DEBUG is not set CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_BTRFS_FS is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_FS_DAX is not set CONFIG_FS_POSIX_ACL=y CONFIG_EXPORTFS=y # CONFIG_EXPORTFS_BLOCK_OPS is not set CONFIG_FILE_LOCKING=y CONFIG_MANDATORY_FILE_LOCKING=y # CONFIG_FS_ENCRYPTION is not set # CONFIG_FS_VERITY is not set CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y CONFIG_INOTIFY_USER=y # CONFIG_FANOTIFY is not set # CONFIG_QUOTA is not set # CONFIG_AUTOFS4_FS is not set # CONFIG_AUTOFS_FS is not set # CONFIG_FUSE_FS is not set # CONFIG_OVERLAY_FS is not set # # Caches # # CONFIG_FSCACHE is not set # end of Caches # # CD-ROM/DVD Filesystems # # CONFIG_ISO9660_FS is not set # CONFIG_UDF_FS is not set # end of CD-ROM/DVD Filesystems # # DOS/FAT/EXFAT/NT Filesystems # # CONFIG_MSDOS_FS is not set # CONFIG_VFAT_FS is not set # CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set # end of DOS/FAT/EXFAT/NT Filesystems # # Pseudo filesystems # CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_PROC_SYSCTL=y CONFIG_PROC_PAGE_MONITOR=y # CONFIG_PROC_CHILDREN is not set CONFIG_PROC_PID_ARCH_STATUS=y CONFIG_KERNFS=y CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_TMPFS_XATTR=y # CONFIG_TMPFS_INODE64 is not set CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_MEMFD_CREATE=y CONFIG_ARCH_HAS_GIGANTIC_PAGE=y # CONFIG_CONFIGFS_FS is not set # CONFIG_EFIVAR_FS is not set # end of Pseudo filesystems # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NETWORK_FILESYSTEMS=y # CONFIG_NFS_FS is not set # CONFIG_NFSD is not set # CONFIG_CEPH_FS is not set # CONFIG_CIFS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set CONFIG_9P_FS=y CONFIG_9P_FS_POSIX_ACL=y CONFIG_9P_FS_SECURITY=y CONFIG_NLS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y # CONFIG_NLS_CODEPAGE_737 is not set # CONFIG_NLS_CODEPAGE_775 is not set # CONFIG_NLS_CODEPAGE_850 is not set # CONFIG_NLS_CODEPAGE_852 is not set # CONFIG_NLS_CODEPAGE_855 is not set # CONFIG_NLS_CODEPAGE_857 is not set # CONFIG_NLS_CODEPAGE_860 is not set # CONFIG_NLS_CODEPAGE_861 is not set # CONFIG_NLS_CODEPAGE_862 is not set # CONFIG_NLS_CODEPAGE_863 is not set # CONFIG_NLS_CODEPAGE_864 is not set # CONFIG_NLS_CODEPAGE_865 is not set # CONFIG_NLS_CODEPAGE_866 is not set # CONFIG_NLS_CODEPAGE_869 is not set # CONFIG_NLS_CODEPAGE_936 is not set # CONFIG_NLS_CODEPAGE_950 is not set # CONFIG_NLS_CODEPAGE_932 is not set # CONFIG_NLS_CODEPAGE_949 is not set # CONFIG_NLS_CODEPAGE_874 is not set # CONFIG_NLS_ISO8859_8 is not set # CONFIG_NLS_CODEPAGE_1250 is not set # CONFIG_NLS_CODEPAGE_1251 is not set CONFIG_NLS_ASCII=y # CONFIG_NLS_ISO8859_1 is not set # CONFIG_NLS_ISO8859_2 is not set # CONFIG_NLS_ISO8859_3 is not set # CONFIG_NLS_ISO8859_4 is not set # CONFIG_NLS_ISO8859_5 is not set # CONFIG_NLS_ISO8859_6 is not set # CONFIG_NLS_ISO8859_7 is not set # CONFIG_NLS_ISO8859_9 is not set # CONFIG_NLS_ISO8859_13 is not set # CONFIG_NLS_ISO8859_14 is not set # CONFIG_NLS_ISO8859_15 is not set # CONFIG_NLS_KOI8_R is not set # CONFIG_NLS_KOI8_U is not set # CONFIG_NLS_MAC_ROMAN is not set # CONFIG_NLS_MAC_CELTIC is not set # CONFIG_NLS_MAC_CENTEURO is not set # CONFIG_NLS_MAC_CROATIAN is not set # CONFIG_NLS_MAC_CYRILLIC is not set # CONFIG_NLS_MAC_GAELIC is not set # CONFIG_NLS_MAC_GREEK is not set # CONFIG_NLS_MAC_ICELAND is not set # CONFIG_NLS_MAC_INUIT is not set # CONFIG_NLS_MAC_ROMANIAN is not set # CONFIG_NLS_MAC_TURKISH is not set # CONFIG_NLS_UTF8 is not set # CONFIG_UNICODE is not set CONFIG_IO_WQ=y # end of File systems # # Security options # CONFIG_KEYS=y # CONFIG_KEYS_REQUEST_CACHE is not set # CONFIG_PERSISTENT_KEYRINGS is not set # CONFIG_TRUSTED_KEYS is not set # CONFIG_ENCRYPTED_KEYS is not set # CONFIG_KEY_DH_OPERATIONS is not set # CONFIG_SECURITY_DMESG_RESTRICT is not set CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y CONFIG_PAGE_TABLE_ISOLATION=y # CONFIG_SECURITY_NETWORK_XFRM is not set # CONFIG_SECURITY_PATH is not set CONFIG_LSM_MMAP_MIN_ADDR=65536 CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y # CONFIG_HARDENED_USERCOPY is not set # CONFIG_FORTIFY_SOURCE is not set # CONFIG_STATIC_USERMODEHELPER is not set CONFIG_SECURITY_SELINUX=y # CONFIG_SECURITY_SELINUX_BOOTPARAM is not set # CONFIG_SECURITY_SELINUX_DISABLE is not set CONFIG_SECURITY_SELINUX_DEVELOP=y CONFIG_SECURITY_SELINUX_AVC_STATS=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 # CONFIG_SECURITY_SMACK is not set # CONFIG_SECURITY_TOMOYO is not set # CONFIG_SECURITY_APPARMOR is not set # CONFIG_SECURITY_LOADPIN is not set # CONFIG_SECURITY_YAMA is not set # CONFIG_SECURITY_SAFESETID is not set # CONFIG_SECURITY_LOCKDOWN_LSM is not set CONFIG_INTEGRITY=y # CONFIG_INTEGRITY_SIGNATURE is not set CONFIG_INTEGRITY_AUDIT=y CONFIG_IMA=y CONFIG_IMA_MEASURE_PCR_IDX=10 CONFIG_IMA_LSM_RULES=y # CONFIG_IMA_TEMPLATE is not set CONFIG_IMA_NG_TEMPLATE=y # CONFIG_IMA_SIG_TEMPLATE is not set CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" CONFIG_IMA_DEFAULT_HASH_SHA1=y # CONFIG_IMA_DEFAULT_HASH_SHA256 is not set CONFIG_IMA_DEFAULT_HASH="sha1" CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_READ_POLICY=y # CONFIG_IMA_APPRAISE is not set CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y # CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set # CONFIG_EVM is not set # CONFIG_DEFAULT_SECURITY_SELINUX is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_LSM="selinux,bpf,integrity" # # Kernel hardening options # # # Memory initialization # CONFIG_INIT_STACK_NONE=y # CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set # CONFIG_INIT_ON_FREE_DEFAULT_ON is not set # end of Memory initialization # end of Kernel hardening options # end of Security options CONFIG_CRYPTO=y # # Crypto core or helper # CONFIG_CRYPTO_ALGAPI=y CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_SKCIPHER=y CONFIG_CRYPTO_SKCIPHER2=y CONFIG_CRYPTO_HASH=y CONFIG_CRYPTO_HASH2=y CONFIG_CRYPTO_RNG=y CONFIG_CRYPTO_RNG2=y CONFIG_CRYPTO_RNG_DEFAULT=y CONFIG_CRYPTO_AKCIPHER2=y CONFIG_CRYPTO_AKCIPHER=y CONFIG_CRYPTO_KPP2=y CONFIG_CRYPTO_ACOMP2=y CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_USER is not set CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y CONFIG_CRYPTO_GF128MUL=y CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_NULL2=y # CONFIG_CRYPTO_PCRYPT is not set # CONFIG_CRYPTO_CRYPTD is not set # CONFIG_CRYPTO_AUTHENC is not set # CONFIG_CRYPTO_TEST is not set CONFIG_CRYPTO_ENGINE=m # # Public-key cryptography # CONFIG_CRYPTO_RSA=y # CONFIG_CRYPTO_DH is not set # CONFIG_CRYPTO_ECDH is not set # CONFIG_CRYPTO_ECRDSA is not set # CONFIG_CRYPTO_CURVE25519 is not set # CONFIG_CRYPTO_CURVE25519_X86 is not set # # Authenticated Encryption with Associated Data # # CONFIG_CRYPTO_CCM is not set CONFIG_CRYPTO_GCM=y # CONFIG_CRYPTO_CHACHA20POLY1305 is not set # CONFIG_CRYPTO_AEGIS128 is not set # CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set CONFIG_CRYPTO_SEQIV=y # CONFIG_CRYPTO_ECHAINIV is not set # # Block modes # # CONFIG_CRYPTO_CBC is not set # CONFIG_CRYPTO_CFB is not set CONFIG_CRYPTO_CTR=y # CONFIG_CRYPTO_CTS is not set # CONFIG_CRYPTO_ECB is not set # CONFIG_CRYPTO_LRW is not set # CONFIG_CRYPTO_OFB is not set # CONFIG_CRYPTO_PCBC is not set # CONFIG_CRYPTO_XTS is not set # CONFIG_CRYPTO_KEYWRAP is not set # CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set # CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set # CONFIG_CRYPTO_ADIANTUM is not set # CONFIG_CRYPTO_ESSIV is not set # # Hash modes # # CONFIG_CRYPTO_CMAC is not set CONFIG_CRYPTO_HMAC=y # CONFIG_CRYPTO_XCBC is not set # CONFIG_CRYPTO_VMAC is not set # # Digest # CONFIG_CRYPTO_CRC32C=y # CONFIG_CRYPTO_CRC32C_INTEL is not set # CONFIG_CRYPTO_CRC32 is not set # CONFIG_CRYPTO_CRC32_PCLMUL is not set CONFIG_CRYPTO_XXHASH=y CONFIG_CRYPTO_BLAKE2B=y # CONFIG_CRYPTO_BLAKE2S is not set # CONFIG_CRYPTO_BLAKE2S_X86 is not set CONFIG_CRYPTO_CRCT10DIF=y # CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set CONFIG_CRYPTO_GHASH=y # CONFIG_CRYPTO_POLY1305 is not set # CONFIG_CRYPTO_POLY1305_X86_64 is not set # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_MICHAEL_MIC is not set # CONFIG_CRYPTO_RMD128 is not set # CONFIG_CRYPTO_RMD160 is not set # CONFIG_CRYPTO_RMD256 is not set # CONFIG_CRYPTO_RMD320 is not set CONFIG_CRYPTO_SHA1=y # CONFIG_CRYPTO_SHA1_SSSE3 is not set # CONFIG_CRYPTO_SHA256_SSSE3 is not set # CONFIG_CRYPTO_SHA512_SSSE3 is not set CONFIG_CRYPTO_SHA256=y # CONFIG_CRYPTO_SHA512 is not set # CONFIG_CRYPTO_SHA3 is not set # CONFIG_CRYPTO_SM3 is not set # CONFIG_CRYPTO_STREEBOG is not set # CONFIG_CRYPTO_TGR192 is not set # CONFIG_CRYPTO_WP512 is not set # CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set # # Ciphers # CONFIG_CRYPTO_AES=y # CONFIG_CRYPTO_AES_TI is not set # CONFIG_CRYPTO_AES_NI_INTEL is not set # CONFIG_CRYPTO_ANUBIS is not set # CONFIG_CRYPTO_ARC4 is not set # CONFIG_CRYPTO_BLOWFISH is not set # CONFIG_CRYPTO_BLOWFISH_X86_64 is not set # CONFIG_CRYPTO_CAMELLIA is not set # CONFIG_CRYPTO_CAMELLIA_X86_64 is not set # CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set # CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set # CONFIG_CRYPTO_CAST5 is not set # CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set # CONFIG_CRYPTO_CAST6 is not set # CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set # CONFIG_CRYPTO_DES is not set # CONFIG_CRYPTO_DES3_EDE_X86_64 is not set # CONFIG_CRYPTO_FCRYPT is not set # CONFIG_CRYPTO_KHAZAD is not set # CONFIG_CRYPTO_SALSA20 is not set # CONFIG_CRYPTO_CHACHA20 is not set # CONFIG_CRYPTO_CHACHA20_X86_64 is not set # CONFIG_CRYPTO_SEED is not set # CONFIG_CRYPTO_SERPENT is not set # CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set # CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set # CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set # CONFIG_CRYPTO_SM4 is not set # CONFIG_CRYPTO_TEA is not set # CONFIG_CRYPTO_TWOFISH is not set # CONFIG_CRYPTO_TWOFISH_X86_64 is not set # CONFIG_CRYPTO_TWOFISH_X86_64_3WAY is not set # CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set # # Compression # # CONFIG_CRYPTO_DEFLATE is not set # CONFIG_CRYPTO_LZO is not set # CONFIG_CRYPTO_842 is not set # CONFIG_CRYPTO_LZ4 is not set # CONFIG_CRYPTO_LZ4HC is not set # CONFIG_CRYPTO_ZSTD is not set # # Random Number Generation # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_DRBG_MENU=y CONFIG_CRYPTO_DRBG_HMAC=y # CONFIG_CRYPTO_DRBG_HASH is not set # CONFIG_CRYPTO_DRBG_CTR is not set CONFIG_CRYPTO_DRBG=y CONFIG_CRYPTO_JITTERENTROPY=y CONFIG_CRYPTO_USER_API=y CONFIG_CRYPTO_USER_API_HASH=y # CONFIG_CRYPTO_USER_API_SKCIPHER is not set # CONFIG_CRYPTO_USER_API_RNG is not set # CONFIG_CRYPTO_USER_API_AEAD is not set CONFIG_CRYPTO_HASH_INFO=y # # Crypto library routines # CONFIG_CRYPTO_LIB_AES=y # CONFIG_CRYPTO_LIB_BLAKE2S is not set # CONFIG_CRYPTO_LIB_CHACHA is not set # CONFIG_CRYPTO_LIB_CURVE25519 is not set CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 # CONFIG_CRYPTO_LIB_POLY1305 is not set # CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set CONFIG_CRYPTO_LIB_SHA256=y CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_PADLOCK is not set # CONFIG_CRYPTO_DEV_CCP is not set # CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set # CONFIG_CRYPTO_DEV_QAT_C62X is not set # CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set # CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set CONFIG_CRYPTO_DEV_VIRTIO=m # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y CONFIG_X509_CERTIFICATE_PARSER=y # CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set CONFIG_PKCS7_MESSAGE_PARSER=y # # Certificates for signature checking # CONFIG_SYSTEM_TRUSTED_KEYRING=y CONFIG_SYSTEM_TRUSTED_KEYS="" # CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set # CONFIG_SECONDARY_TRUSTED_KEYRING is not set # CONFIG_SYSTEM_BLACKLIST_KEYRING is not set # end of Certificates for signature checking CONFIG_BINARY_PRINTF=y # # Library routines # # CONFIG_PACKING is not set CONFIG_BITREVERSE=y CONFIG_GENERIC_STRNCPY_FROM_USER=y CONFIG_GENERIC_STRNLEN_USER=y CONFIG_GENERIC_NET_UTILS=y CONFIG_GENERIC_FIND_FIRST_BIT=y # CONFIG_CORDIC is not set # CONFIG_PRIME_NUMBERS is not set CONFIG_RATIONAL=y CONFIG_GENERIC_PCI_IOMAP=y CONFIG_GENERIC_IOMAP=y CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y CONFIG_ARCH_HAS_FAST_MULTIPLIER=y CONFIG_ARCH_USE_SYM_ANNOTATIONS=y CONFIG_CRC_CCITT=y CONFIG_CRC16=y CONFIG_CRC_T10DIF=y # CONFIG_CRC_ITU_T is not set CONFIG_CRC32=y # CONFIG_CRC32_SELFTEST is not set CONFIG_CRC32_SLICEBY8=y # CONFIG_CRC32_SLICEBY4 is not set # CONFIG_CRC32_SARWATE is not set # CONFIG_CRC32_BIT is not set # CONFIG_CRC64 is not set # CONFIG_CRC4 is not set # CONFIG_CRC7 is not set CONFIG_LIBCRC32C=y # CONFIG_CRC8 is not set CONFIG_XXHASH=y # CONFIG_RANDOM32_SELFTEST is not set CONFIG_ZLIB_INFLATE=y CONFIG_LZO_DECOMPRESS=y CONFIG_LZ4_DECOMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_XZ_DEC=y CONFIG_XZ_DEC_X86=y # CONFIG_XZ_DEC_POWERPC is not set # CONFIG_XZ_DEC_IA64 is not set # CONFIG_XZ_DEC_ARM is not set # CONFIG_XZ_DEC_ARMTHUMB is not set # CONFIG_XZ_DEC_SPARC is not set CONFIG_XZ_DEC_BCJ=y # CONFIG_XZ_DEC_TEST is not set CONFIG_DECOMPRESS_GZIP=y CONFIG_DECOMPRESS_BZIP2=y CONFIG_DECOMPRESS_LZMA=y CONFIG_DECOMPRESS_XZ=y CONFIG_DECOMPRESS_LZO=y CONFIG_DECOMPRESS_LZ4=y CONFIG_DECOMPRESS_ZSTD=y CONFIG_GENERIC_ALLOCATOR=y CONFIG_XARRAY_MULTI=y CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT_MAP=y CONFIG_HAS_DMA=y CONFIG_DMA_OPS=y CONFIG_NEED_SG_DMA_LENGTH=y CONFIG_NEED_DMA_MAP_STATE=y CONFIG_ARCH_DMA_ADDR_T_64BIT=y CONFIG_SWIOTLB=y CONFIG_DMA_CMA=y # # Default contiguous memory area size: # CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CMA_SIZE_SEL_MBYTES=y # CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set # CONFIG_CMA_SIZE_SEL_MIN is not set # CONFIG_CMA_SIZE_SEL_MAX is not set CONFIG_CMA_ALIGNMENT=8 # CONFIG_DMA_API_DEBUG is not set CONFIG_SGL_ALLOC=y CONFIG_IOMMU_HELPER=y CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y # CONFIG_GLOB_SELFTEST is not set CONFIG_NLATTR=y CONFIG_CLZ_TAB=y CONFIG_IRQ_POLL=y CONFIG_MPILIB=y CONFIG_OID_REGISTRY=y CONFIG_UCS2_STRING=y CONFIG_HAVE_GENERIC_VDSO=y CONFIG_GENERIC_GETTIMEOFDAY=y CONFIG_GENERIC_VDSO_TIME_NS=y CONFIG_FONT_SUPPORT=y CONFIG_FONTS=y # CONFIG_FONT_8x8 is not set CONFIG_FONT_8x16=y # CONFIG_FONT_6x11 is not set # CONFIG_FONT_7x14 is not set # CONFIG_FONT_PEARL_8x8 is not set # CONFIG_FONT_ACORN_8x8 is not set CONFIG_FONT_MINI_4x6=y # CONFIG_FONT_6x10 is not set # CONFIG_FONT_10x18 is not set # CONFIG_FONT_SUN8x16 is not set # CONFIG_FONT_SUN12x22 is not set # CONFIG_FONT_TER16x32 is not set CONFIG_ARCH_HAS_PMEM_API=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y CONFIG_ARCH_HAS_UACCESS_MCSAFE=y CONFIG_ARCH_STACKWALK=y CONFIG_SBITMAP=y # CONFIG_STRING_SELFTEST is not set # end of Library routines # # Kernel hacking # # # printk and dmesg options # CONFIG_PRINTK_TIME=y # CONFIG_PRINTK_CALLER is not set CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 CONFIG_CONSOLE_LOGLEVEL_QUIET=4 CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_DYNAMIC_DEBUG is not set # CONFIG_DYNAMIC_DEBUG_CORE is not set CONFIG_SYMBOLIC_ERRNAME=y CONFIG_DEBUG_BUGVERBOSE=y # end of printk and dmesg options # # Compile-time checks and compiler options # CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_INFO_REDUCED is not set # CONFIG_DEBUG_INFO_COMPRESSED is not set # CONFIG_DEBUG_INFO_SPLIT is not set # CONFIG_DEBUG_INFO_DWARF4 is not set CONFIG_DEBUG_INFO_BTF=y # CONFIG_GDB_SCRIPTS is not set CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=2048 # CONFIG_STRIP_ASM_SYMS is not set # CONFIG_READABLE_ASM is not set # CONFIG_HEADERS_INSTALL is not set # CONFIG_DEBUG_SECTION_MISMATCH is not set CONFIG_SECTION_MISMATCH_WARN_ONLY=y # CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B is not set CONFIG_STACK_VALIDATION=y # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # end of Compile-time checks and compiler options # # Generic Kernel Debugging Instruments # CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 CONFIG_MAGIC_SYSRQ_SERIAL=y CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" CONFIG_DEBUG_FS=y CONFIG_DEBUG_FS_ALLOW_ALL=y # CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set # CONFIG_DEBUG_FS_ALLOW_NONE is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y # CONFIG_UBSAN is not set # end of Generic Kernel Debugging Instruments CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MISC=y # # Memory Debugging # # CONFIG_PAGE_EXTENSION is not set # CONFIG_DEBUG_PAGEALLOC is not set # CONFIG_PAGE_OWNER is not set # CONFIG_PAGE_POISONING is not set # CONFIG_DEBUG_PAGE_REF is not set # CONFIG_DEBUG_RODATA_TEST is not set CONFIG_ARCH_HAS_DEBUG_WX=y # CONFIG_DEBUG_WX is not set CONFIG_GENERIC_PTDUMP=y # CONFIG_PTDUMP_DEBUGFS is not set # CONFIG_DEBUG_OBJECTS is not set # CONFIG_SLUB_DEBUG_ON is not set # CONFIG_SLUB_STATS is not set CONFIG_HAVE_DEBUG_KMEMLEAK=y # CONFIG_DEBUG_KMEMLEAK is not set # CONFIG_DEBUG_STACK_USAGE is not set CONFIG_SCHED_STACK_END_CHECK=y CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_VM_PGTABLE is not set CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y # CONFIG_DEBUG_VIRTUAL is not set CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_PER_CPU_MAPS is not set CONFIG_HAVE_ARCH_KASAN=y CONFIG_HAVE_ARCH_KASAN_VMALLOC=y CONFIG_CC_HAS_KASAN_GENERIC=y # end of Memory Debugging # CONFIG_DEBUG_SHIRQ is not set # # Debug Oops, Lockups and Hangs # CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS_VALUE=1 CONFIG_PANIC_TIMEOUT=0 CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 CONFIG_HARDLOCKUP_DETECTOR_PERF=y CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 CONFIG_DETECT_HUNG_TASK=y CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 # CONFIG_WQ_WATCHDOG is not set # CONFIG_TEST_LOCKUP is not set # end of Debug Oops, Lockups and Hangs # # Scheduler Debugging # CONFIG_SCHED_DEBUG=y CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set CONFIG_DEBUG_PREEMPT=y # # Lock Debugging (spinlocks, mutexes, etc...) # CONFIG_LOCK_DEBUGGING_SUPPORT=y CONFIG_PROVE_LOCKING=y # CONFIG_PROVE_RAW_LOCK_NESTING is not set # CONFIG_LOCK_STAT is not set CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y CONFIG_DEBUG_RWSEMS=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_LOCKDEP=y # CONFIG_DEBUG_LOCKDEP is not set CONFIG_DEBUG_ATOMIC_SLEEP=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_LOCK_TORTURE_TEST is not set # CONFIG_WW_MUTEX_SELFTEST is not set # end of Lock Debugging (spinlocks, mutexes, etc...) CONFIG_TRACE_IRQFLAGS=y CONFIG_TRACE_IRQFLAGS_NMI=y CONFIG_STACKTRACE=y # CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set # CONFIG_DEBUG_KOBJECT is not set # # Debug kernel data structures # # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_PLIST is not set # CONFIG_DEBUG_SG is not set # CONFIG_DEBUG_NOTIFIERS is not set # CONFIG_BUG_ON_DATA_CORRUPTION is not set # end of Debug kernel data structures CONFIG_DEBUG_CREDENTIALS=y # # RCU Debugging # CONFIG_PROVE_RCU=y # CONFIG_RCU_PERF_TEST is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_RCU_TRACE is not set # CONFIG_RCU_EQS_DEBUG is not set # end of RCU Debugging # CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set # CONFIG_LATENCYTOP is not set CONFIG_USER_STACKTRACE_SUPPORT=y CONFIG_NOP_TRACER=y CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y CONFIG_HAVE_SYSCALL_TRACEPOINTS=y CONFIG_HAVE_FENTRY=y CONFIG_HAVE_C_RECORDMCOUNT=y CONFIG_TRACE_CLOCK=y CONFIG_RING_BUFFER=y CONFIG_EVENT_TRACING=y CONFIG_CONTEXT_SWITCH_TRACER=y CONFIG_PREEMPTIRQ_TRACEPOINTS=y CONFIG_TRACING=y CONFIG_GENERIC_TRACER=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y CONFIG_BOOTTIME_TRACING=y CONFIG_FUNCTION_TRACER=y CONFIG_FUNCTION_GRAPH_TRACER=y CONFIG_DYNAMIC_FTRACE=y CONFIG_DYNAMIC_FTRACE_WITH_REGS=y CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y # CONFIG_FUNCTION_PROFILER is not set # CONFIG_STACK_TRACER is not set # CONFIG_IRQSOFF_TRACER is not set # CONFIG_PREEMPT_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_HWLAT_TRACER is not set # CONFIG_MMIOTRACE is not set CONFIG_FTRACE_SYSCALLS=y # CONFIG_TRACER_SNAPSHOT is not set CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set # CONFIG_PROFILE_ALL_BRANCHES is not set CONFIG_BLK_DEV_IO_TRACE=y CONFIG_KPROBE_EVENTS=y # CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set CONFIG_UPROBE_EVENTS=y CONFIG_BPF_EVENTS=y CONFIG_DYNAMIC_EVENTS=y CONFIG_PROBE_EVENTS=y CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_FTRACE_MCOUNT_RECORD=y # CONFIG_SYNTH_EVENTS is not set # CONFIG_HIST_TRIGGERS is not set # CONFIG_TRACE_EVENT_INJECT is not set # CONFIG_TRACEPOINT_BENCHMARK is not set # CONFIG_RING_BUFFER_BENCHMARK is not set # CONFIG_TRACE_EVAL_MAP_FILE is not set # CONFIG_FTRACE_STARTUP_TEST is not set # CONFIG_RING_BUFFER_STARTUP_TEST is not set # CONFIG_PREEMPTIRQ_DELAY_TEST is not set # CONFIG_KPROBE_EVENT_GEN_TEST is not set # CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KCSAN=y CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y # CONFIG_STRICT_DEVMEM is not set # # x86 Debugging # CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y CONFIG_X86_VERBOSE_BOOTUP=y CONFIG_EARLY_PRINTK=y # CONFIG_EARLY_PRINTK_DBGP is not set # CONFIG_EARLY_PRINTK_USB_XDBC is not set # CONFIG_EFI_PGT_DUMP is not set # CONFIG_DEBUG_TLBFLUSH is not set # CONFIG_IOMMU_DEBUG is not set CONFIG_HAVE_MMIOTRACE_SUPPORT=y # CONFIG_X86_DECODER_SELFTEST is not set CONFIG_IO_DELAY_0X80=y # CONFIG_IO_DELAY_0XED is not set # CONFIG_IO_DELAY_UDELAY is not set # CONFIG_IO_DELAY_NONE is not set # CONFIG_DEBUG_BOOT_PARAMS is not set # CONFIG_CPA_DEBUG is not set # CONFIG_DEBUG_ENTRY is not set # CONFIG_DEBUG_NMI_SELFTEST is not set CONFIG_X86_DEBUG_FPU=y # CONFIG_PUNIT_ATOM_DEBUG is not set CONFIG_UNWINDER_ORC=y # CONFIG_UNWINDER_FRAME_POINTER is not set # CONFIG_UNWINDER_GUESS is not set # end of x86 Debugging # # Kernel Testing and Coverage # # CONFIG_KUNIT is not set # CONFIG_NOTIFIER_ERROR_INJECTION is not set CONFIG_FUNCTION_ERROR_INJECTION=y CONFIG_FAULT_INJECTION=y # CONFIG_FAILSLAB is not set # CONFIG_FAIL_PAGE_ALLOC is not set # CONFIG_FAIL_MAKE_REQUEST is not set # CONFIG_FAIL_IO_TIMEOUT is not set # CONFIG_FAIL_FUTEX is not set CONFIG_FAULT_INJECTION_DEBUG_FS=y CONFIG_FAIL_FUNCTION=y CONFIG_ARCH_HAS_KCOV=y CONFIG_CC_HAS_SANCOV_TRACE_PC=y # CONFIG_KCOV is not set # CONFIG_RUNTIME_TESTING_MENU is not set # CONFIG_MEMTEST is not set # end of Kernel Testing and Coverage # end of Kernel hacking xdp-tools-1.5.4/.github/scripts/run_tests.sh0000755000175100001660000000015415003640462020431 0ustar runnerdocker#!/bin/bash export $(cat ENVVARS | xargs -d '\n') make test V=1 >> TEST_OUTPUT 2>&1 echo $? > TEST_RESULT xdp-tools-1.5.4/.github/scripts/prepare_test_kernel.sh0000755000175100001660000000157415003640462022447 0ustar runnerdocker#!/bin/bash set -e IFS=- read KERNEL_UPSTREAM_VERSION KERNEL_PATCH_VERSION <<< $KERNEL_VERSION KERNEL_VERSION_COMPLETE="$KERNEL_VERSION".x86_64 PACKAGES_URL=https://kojipkgs.fedoraproject.org/packages/kernel/ PACKAGES_URL+="$KERNEL_UPSTREAM_VERSION"/"$KERNEL_PATCH_VERSION"/x86_64 for package in core modules modules-core modules-extra devel; do # modules-core package only exists for newer kernel versions, so continue if # download fails wget -nv "$PACKAGES_URL"/kernel-"$package"-"$KERNEL_VERSION_COMPLETE".rpm || continue rpm2cpio kernel-"$package"-"$KERNEL_VERSION_COMPLETE".rpm | cpio -di done find lib -name "*.xz" -exec xz -d {} \; mv lib/modules/"$KERNEL_VERSION_COMPLETE" kernel mkdir -p kernel/arch/x86/boot cp kernel/vmlinuz kernel/arch/x86/boot/bzImage cp kernel/config kernel/.config rsync -a usr/src/kernels/"$KERNEL_VERSION_COMPLETE"/ kernel/ find kernel xdp-tools-1.5.4/.github/scripts/prepare_test_tools.sh0000755000175100001660000000037515003640462022325 0ustar runnerdocker#!/bin/bash set -e echo ::group::Install xdp-test-harness sudo python3 -m pip install xdp_test_harness echo ::endgroup:: echo ::group::Install virtme git clone https://github.com/amluto/virtme sudo python3 -m pip install ./virtme echo ::endgroup:: xdp-tools-1.5.4/.github/scripts/run_tests_in_vm.sh0000755000175100001660000000057215003640462021625 0ustar runnerdocker#!/bin/bash ENVVARS="KERNEL_VERSION DID_UNSHARE CLANG" touch ENVVARS for v in $ENVVARS; do val=$(eval echo '$'$v) echo "$v=$val" >> ENVVARS done touch TEST_OUTPUT tail -f TEST_OUTPUT & sudo virtme-run --kdir kernel --script-exec .github/scripts/run_tests.sh --pwd --rw --mods=auto --qemu-opts -cpu qemu64 -machine accel=tcg -m 2G kill %1 exit "$(cat TEST_RESULT)" xdp-tools-1.5.4/.github/workflows/0000755000175100001660000000000015003640462016412 5ustar runnerdockerxdp-tools-1.5.4/.github/workflows/covscan.yml0000644000175100001660000000412715003640462020575 0ustar runnerdockername: coverity-scan on: schedule: - cron: '0 18 * * 0' # Sundays at 18:00 UTC push: branches: [ "coverity_scan" ] jobs: latest: runs-on: ubuntu-22.04 env: LLVM_VERSION: 19 CLANG: clang-19 steps: - name: Check out repository code uses: actions/checkout@v4 with: submodules: recursive - name: Prepare packages run: | sudo apt-get update sudo apt-get install zstd binutils-dev elfutils libpcap-dev libelf-dev gcc-multilib pkg-config wireshark tshark bpfcc-tools python3 python3-pip python3-setuptools qemu-kvm rpm2cpio libdw-dev libdwarf-dev - name: Prepare Clang run: | wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-$LLVM_VERSION main" | sudo tee -a /etc/apt/sources.list sudo apt-get -qq update sudo apt-get -qq -y install clang-$LLVM_VERSION lld-$LLVM_VERSION llvm-$LLVM_VERSION - name: Download Coverity Build Tool run: | wget -q https://scan.coverity.com/download/cxx/linux64 --post-data "token=$TOKEN&project=xdp-project%2Fxdp-tools" -O cov-analysis-linux64.tar.gz mkdir cov-analysis-linux64 tar xzf cov-analysis-linux64.tar.gz --strip 1 -C cov-analysis-linux64 env: TOKEN: ${{ secrets.COVERITY_SCAN_TOKEN }} - name: Configure run: ./configure - name: Build with cov-build run: | export PATH=`pwd`/cov-analysis-linux64/bin:$PATH cov-build --dir cov-int make - name: Submit the result to Coverity Scan run: | tar czvf xdp-tools.tgz cov-int curl \ --form project=xdp-project/xdp-tools \ --form token=$TOKEN \ --form email=toke@redhat.com \ --form file=@xdp-tools.tgz \ --form version=trunk \ --form description="xdp-tools" \ https://scan.coverity.com/builds?project=xdp-project%2Fxdp-tools env: TOKEN: ${{ secrets.COVERITY_SCAN_TOKEN }} xdp-tools-1.5.4/.github/workflows/release.yml0000644000175100001660000000104715003640462020557 0ustar runnerdocker--- name: "tagged-release" on: push: tags: - "v*" jobs: tagged-release: name: "Tagged Release" runs-on: "ubuntu-latest" steps: - name: Check out repository code uses: actions/checkout@v4 with: submodules: recursive - name: "Create source archive" run: | ./mkarchive.sh - uses: "marvinpinto/action-automatic-releases@v1.2.1" with: repo_token: "${{ secrets.GITHUB_TOKEN }}" prerelease: false files: | *.tar.gz xdp-tools-1.5.4/.github/workflows/selftests.yml0000644000175100001660000000432115003640462021151 0ustar runnerdockername: Selftests on: push: branches: [ main ] pull_request: branches: [ main ] jobs: selftest: runs-on: ubuntu-22.04 strategy: matrix: KERNEL_VERSION: - "6.13.7-200.fc41" - "6.12.8-200.fc41" - "6.10.12-200.fc40" - "6.6.14-200.fc39" - "6.1.9-200.fc37" - "5.16.8-200.fc35" - "5.11.0-156.fc34" - "5.6.19-300.fc32" LLVM_VERSION: - 16 - 17 - 18 - 19 - 20 fail-fast: false env: KERNEL_VERSION: ${{ matrix.KERNEL_VERSION }} LLVM_VERSION: ${{ matrix.LLVM_VERSION }} CLANG: clang-${{ matrix.LLVM_VERSION }} LLVM_STRIP: llvm-strip-${{ matrix.LLVM_VERSION }} # can't use unshare on old kernels DID_UNSHARE: ${{ (startsWith(matrix.KERNEL_VERSION, '5.6') || startsWith(matrix.KERNEL_VERSION, '5.11')) && 1 || 0 }} steps: - name: Check out repository code uses: actions/checkout@v4 with: submodules: recursive - name: Prepare packages run: | sudo apt-get update sudo apt-get install zstd binutils-dev elfutils libpcap-dev libelf-dev gcc-multilib pkg-config wireshark tshark bpfcc-tools python3 python3-pip python3-setuptools qemu-kvm rpm2cpio libdw-dev libdwarf-dev libcap-ng-dev socat - name: Prepare Clang run: | wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-$LLVM_VERSION main" | sudo tee -a /etc/apt/sources.list sudo apt-get -qq update sudo apt-get -qq -y install clang-$LLVM_VERSION lld-$LLVM_VERSION llvm-$LLVM_VERSION - name: Install latest bpftool run: | git clone --depth=1 --recurse-submodules https://github.com/libbpf/bpftool bpftool make -C bpftool/src sudo make install -C bpftool/src prefix=/usr - name: Compile run: make - name: Prepare test tools run: .github/scripts/prepare_test_tools.sh - name: Prepare test kernel run: .github/scripts/prepare_test_kernel.sh - name: Run tests run: .github/scripts/run_tests_in_vm.sh xdp-tools-1.5.4/xdp-bench/0000755000175100001660000000000015003640462014665 5ustar runnerdockerxdp-tools-1.5.4/xdp-bench/xdp_redirect_basic.c0000644000175100001660000001144415003640462020652 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 John Fastabend */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "logging.h" #include "xdp-bench.h" #include "xdp_sample.h" #include "xdp_redirect_basic.skel.h" static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_CNT | SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI; DEFINE_SAMPLE_INIT(xdp_redirect_basic); const struct redirect_opts defaults_redirect_basic = { .mode = XDP_MODE_NATIVE, .interval = 2 }; int do_redirect_basic(const void *cfg, __unused const char *pin_root_path) { const struct redirect_opts *opt = cfg; struct xdp_program *xdp_prog = NULL, *dummy_prog = NULL; DECLARE_LIBBPF_OPTS(xdp_program_opts, opts); struct bpf_program *prog = NULL; struct xdp_redirect_basic *skel; char str[2 * IF_NAMESIZE + 1]; int ret = EXIT_FAIL_OPTION; if (opt->extended) sample_switch_mode(); if (opt->mode == XDP_MODE_SKB) /* devmap_xmit tracepoint not available */ mask &= ~(SAMPLE_DEVMAP_XMIT_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI); if (opt->stats) mask |= SAMPLE_REDIRECT_CNT; skel = xdp_redirect_basic__open(); if (!skel) { pr_warn("Failed to xdp_redirect_basic__open: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end; } ret = sample_init_pre_load(skel, opt->iface_in.ifname); if (ret < 0) { pr_warn("Failed to sample_init_pre_load: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } skel->rodata->from_match[0] = opt->iface_in.ifindex; skel->rodata->to_match[0] = opt->iface_out.ifindex; skel->rodata->ifindex_out = opt->iface_out.ifindex; /* Make sure we only load the one XDP program we are interested in */ while ((prog = bpf_object__next_program(skel->obj, prog)) != NULL) if (bpf_program__type(prog) == BPF_PROG_TYPE_XDP && bpf_program__expected_attach_type(prog) == BPF_XDP) bpf_program__set_autoload(prog, false); opts.obj = skel->obj; opts.prog_name = (opt->load_mode == BASIC_LOAD_BYTES) ? "xdp_redirect_load_bytes_prog" : "xdp_redirect_prog"; xdp_prog = xdp_program__create(&opts); if (!xdp_prog) { ret = -errno; pr_warn("Couldn't open XDP program: %s\n", strerror(-ret)); goto end_destroy; } /* We always set the frags support bit: nothing the program does is * incompatible with multibuf, and it's perfectly fine to load a program * with frags support on an interface with a small MTU. We don't risk * setting any flags the kernel will balk at, either, since libxdp will * do the feature probing for us and skip the flag if the kernel doesn't * support it. * * The function below returns EOPNOTSUPP it libbpf is too old to support * setting the flags, but we just ignore that, since in such a case the * best we can do is just attempt to run without the frags support. */ xdp_program__set_xdp_frags_support(xdp_prog, true); ret = xdp_program__attach(xdp_prog, opt->iface_in.ifindex, opt->mode, 0); if (ret < 0) { pr_warn("Failed to attach XDP program: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } ret = sample_init(skel, mask, opt->iface_in.ifindex, opt->iface_out.ifindex); if (ret < 0) { pr_warn("Failed to initialize sample: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_detach; } opts.obj = NULL; opts.prog_name = "xdp_pass"; opts.find_filename = "xdp-dispatcher.o"; dummy_prog = xdp_program__create(&opts); if (!dummy_prog) { pr_warn("Failed to load dummy program: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end_detach; } xdp_program__set_xdp_frags_support(dummy_prog, true); ret = xdp_program__attach(dummy_prog, opt->iface_out.ifindex, opt->mode, 0); if (ret < 0) { pr_warn("Failed to attach dummy program: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_detach; } ret = EXIT_FAIL; safe_strncpy(str, get_driver_name(opt->iface_in.ifindex), sizeof(str)); pr_info("Redirecting from %s (ifindex %d; driver %s) to %s (ifindex %d; driver %s)\n", opt->iface_in.ifname, opt->iface_in.ifindex, str, opt->iface_out.ifname, opt->iface_out.ifindex, get_driver_name(opt->iface_out.ifindex)); ret = sample_run(opt->interval, NULL, NULL); if (ret < 0) { pr_warn("Failed during sample run: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_detach; } ret = EXIT_OK; end_detach: if (dummy_prog) xdp_program__detach(dummy_prog, opt->iface_out.ifindex, opt->mode, 0); xdp_program__detach(xdp_prog, opt->iface_in.ifindex, opt->mode, 0); end_destroy: xdp_redirect_basic__destroy(skel); end: sample_teardown(); return ret; } xdp-tools-1.5.4/xdp-bench/README.org0000644000175100001660000006245015003640462016342 0ustar runnerdocker#+EXPORT_FILE_NAME: xdp-bench #+TITLE: xdp-bench #+MAN_CLASS_OPTIONS: :section-id "8\" \"DATE\" \"VERSION\" \"A simple XDP benchmarking tool" # This file serves both as a README on github, and as the source for the man # page; the latter through the org-mode man page export support. # . # To export the man page, simply use the org-mode exporter; (require 'ox-man) if # it's not available. There's also a Makefile rule to export it. * XDP-bench - a simple XDP benchmarking tool XDP-bench is a benchmarking utility for exercising the different operation modes of XDP. It is intended to be a simple program demonstrating the various operating modes; these include dropping packets, hairpin forwarding (using the =XDP_TX= return code), and redirection using the various in-kernel packet redirection facilities. The drop and TX modes support various options to control whether packet data is touched (read or written) before being dropped or transmitted. The redirection modes support using the simple ifindex-based =bpf_redirect= helper, the =bpf_redirect_map= helper using a cpumap as its target, =bpf_redirect_map= using a devmap as its target, and the devmap's broadcast mode which allows redirecting to multiple devices. There is more information on the meaning of the output in both default (terse) and extended output mode, in the *Output Format Description* section below. ** Running xdp-bench The syntax for running xdp-bench is: #+begin_src sh Usage: xdp-bench COMMAND [options] COMMAND can be one of: drop - Drop all packets on an interface pass - Pass all packets to the network stack tx - Transmit packets back out on an interface (hairpin forwarding) redirect - XDP redirect using the bpf_redirect() helper redirect-cpu - XDP CPU redirect using BPF_MAP_TYPE_CPUMAP redirect-map - XDP redirect using BPF_MAP_TYPE_DEVMAP redirect-multi - XDP multi-redirect using BPF_MAP_TYPE_DEVMAP and the BPF_F_BROADCAST flag #+end_src Each command, and its options are explained below. Or use =xdp-bench COMMAND --help= to see the options for each command. * The DROP command In this mode, =xdp-bench= installs an XDP program on an interface that simply drops all packets. There are options to control what to do with the packet before dropping it (touch the packet data or not), as well as which statistics to gather. This is a basic benchmark for the baseline (best-case) performance of XDP on an interface. The syntax for the =drop= command is: =xdp-bench drop [options] = Where == is the name of the interface the XDP program should be installed on. The supported options are: ** -p, --packet-operation Specify which operation should be taken on the packet before dropping it. The following actions are available: #+begin_src sh no-touch - Drop the packet without touching the packet data read-data - Read a field in the packet header before dropping parse-ip - Parse the IP header field before dropping swap-macs - Swap the source and destination MAC addresses before dropping #+end_src Whether to touch the packet before dropping it can have a significant performance impact as this requires bringing packet data into the CPU cache (and flushing it back out if writing). The default for this option is =no-touch=. ** -l, --load-mode Specify which mechanism xdp-bench should use to load (and store) the packet data. The following modes are available: #+begin_src sh dpa - Use traditional Direct Packet Access from the XDP program load-bytes - Use the xdp_load_bytes() and xdp_store_bytes() helper functions #+end_src This can be used to benchmark the various packet access modes supported by the kernel. The default for this option is =dpa=. ** -r, --rxq-stats If set, the XDP program will also gather statistics on which receive queue index each packet was received on. This is displayed in the extended output mode along with per-CPU data (which, depending on the hardware configuration may or may not be equivalent). ** -i, --interval Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. ** -e, --extended Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-\ while the program is running. See also the *Output Format Description* section below. ** -m, --mode Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. ** -v, --verbose Enable verbose logging. Supply twice to enable verbose logging from the underlying =libxdp= and =libbpf= libraries. ** --version Show the application version and exit. ** -h, --help Display a summary of the available options * The PASS command In this mode, =xdp-bench= installs an XDP program on an interface that passes all packets to the network stack after processing them (returning =XDP_PASS=). There are options to control what to do with the packet before passing it (touch the packet data or not), as well as which statistics to gather. This is a basic benchmark for the overhead of installing an XDP program on an interface while still running the regular network stack. The syntax for the =pass= command is: =xdp-bench pass [options] = Where == is the name of the interface the XDP program should be installed on. The supported options are: ** -p, --packet-operation Specify which operation should be taken on the packet before passing it. The following actions are available: #+begin_src sh no-touch - Pass the packet without touching the packet data read-data - Read a field in the packet header before passing parse-ip - Parse the IP header field before passing swap-macs - Swap the source and destination MAC addresses before passing #+end_src The default for this option is =no-touch=. ** -l, --load-mode Specify which mechanism xdp-bench should use to load (and store) the packet data. The following modes are available: #+begin_src sh dpa - Use traditional Direct Packet Access from the XDP program load-bytes - Use the xdp_load_bytes() and xdp_store_bytes() helper functions #+end_src This can be used to benchmark the various packet access modes supported by the kernel. The default for this option is =dpa=. ** -r, --rxq-stats If set, the XDP program will also gather statistics on which receive queue index each packet was received on. This is displayed in the extended output mode along with per-CPU data (which, depending on the hardware configuration may or may not be equivalent). ** -i, --interval Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. ** -e, --extended Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-\ while the program is running. See also the *Output Format Description* section below. ** -m, --mode Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. ** -v, --verbose Enable verbose logging. Supply twice to enable verbose logging from the underlying =libxdp= and =libbpf= libraries. ** --version Show the application version and exit. ** -h, --help Display a summary of the available options * The TX command In this mode, =xdp-bench= installs an XDP program on an interface that performs so-called "hairpin forwarding", which means each packet is transmitted back out the same interface (using the =XDP_TX= return code).. There are options to control what to do with the packet before transmitting it (touch the packet data or not), as well as which statistics to gather. The syntax for the =tx= command is: =xdp-bench tx [options] = Where == is the name of the interface the XDP program should be installed on. The supported options are: ** -p, --packet-operation Specify which operation should be taken on the packet before transmitting it. The following actions are available: #+begin_src sh no-touch - Transmit the packet without touching the packet data read-data - Read a field in the packet header before transmitting parse-ip - Parse the IP header field before transmitting swap-macs - Swap the source and destination MAC addresses before transmitting #+end_src To allow the packet to be successfully transmitted back to the sender, the MAC addresses have to be swapped, so that the source MAC matches the network device. However, there is a performance overhead in doing swapping, so this option allows this function to be turned off. The default for this option is =swap-macs=. ** -l, --load-mode Specify which mechanism xdp-bench should use to load (and store) the packet data. The following modes are available: #+begin_src sh dpa - Use traditional Direct Packet Access from the XDP program load-bytes - Use the xdp_load_bytes() and xdp_store_bytes() helper functions #+end_src This can be used to benchmark the various packet access modes supported by the kernel. The default for this option is =dpa=. ** -r, --rxq-stats If set, the XDP program will also gather statistics on which receive queue index each packet was received on. This is displayed in the extended output mode along with per-CPU data (which, depending on the hardware configuration may or may not be equivalent). ** -i, --interval Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. ** -e, --extended Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-\ while the program is running. See also the *Output Format Description* section below. ** -m, --mode Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. ** -v, --verbose Enable verbose logging. Supply twice to enable verbose logging from the underlying =libxdp= and =libbpf= libraries. ** --version Show the application version and exit. ** -h, --help Display a summary of the available options * The REDIRECT command In this mode, =xdp-bench= sets up packet redirection between the two interfaces supplied on the command line using the =bpf_redirect= BPF helper triggered on packet reception on the ingress interface. The syntax for the =redirect= command is: =xdp-bench redirect [options] = Where == is the name of the input interface from where packets will be redirect to the output interface ==. The supported options are: ** -l, --load-mode Specify which mechanism xdp-bench should use to load (and store) the packet data. The following modes are available: #+begin_src sh dpa - Use traditional Direct Packet Access from the XDP program load-bytes - Use the xdp_load_bytes() and xdp_store_bytes() helper functions #+end_src This can be used to benchmark the various packet access modes supported by the kernel. The default for this option is =dpa=. ** -i, --interval Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. ** -s, --stats Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. ** -e, --extended Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-\ while the program is running. See also the *Output Format Description* section below. ** -m, --mode Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. ** -v, --verbose Enable verbose logging. Supply twice to enable verbose logging from the underlying =libxdp= and =libbpf= libraries. ** --version Show the application version and exit. ** -h, --help Display a summary of the available options * The REDIRECT-CPU command In this mode, =xdp-bench= sets up packet redirection using the =bpf_redirect_map= BPF helper triggered on packet reception on the ingress interface, using a cpumap as its target. Hence, this tool can be used to redirect packets on an interface from one CPU to another. In addition to this, the tool then supports redirecting the packet to another output device when it is processed on the target CPU. The syntax for the =redirect-cpu= command is: =xdp-bench redirect-cpu [options] -c 0 ... -c N= Where == is the name of the input interface from where packets will be redirect to the target CPU list specified using =-c=. The supported options are: ** -c, --cpu Specify a possible target CPU index. This option must be passed at least once, and can be passed multiple times to specify a list of CPUs. Which CPU is chosen for a given packet depends on the value of the =--program-mode= option, described below. ** -p, --program-mode Specify a program that embeds a predefined policy deciding how packets are redirected to different CPUs. The following options are available: #+begin_src sh no-touch - Redirect without touching packet data touch - Read packet data before redirecting round-robin - Cycle between target CPUs in a round-robin fashion (for each packet) l4-proto - Choose the target CPU based on the layer-4 protocol of packet l4-filter - Like l4-proto, but drop UDP packets with destination port 9 (used by pktgen) l4-hash - Use source and destination IP hashing to pick target CPU l4-sport - Use modulo of source port to pick target CPU l4-dport - Use modulo of destination port to pick target CPU #+end_src The =no-touch= and =touch= modes always redirect packets to the same CPU (the first value supplied to =--cpu=). The =round-robin= and =l4-hash= modes distribute packets between all the CPUs supplied as =--cpu= arguments, while =l4-proto= and =l4-filter= send TCP and unrecognised packets to CPU index 0, UDP packets to CPU index 1 and ICMP packets to CPU index 2 (where the index refers to the order the actual CPUs are given on the command line). The default for this option is =l4-hash=. ** -r --remote-action If this option is set, a separate program is installed into the cpumap, which will be invoked on the remote CPU after the packet is processed there. The action can be either =drop= or =pass= which will drop the packet or pass it to the regular networking stack, respectively. Or it can be =redirect=, which will cause the packet to be redirected to another interface and transmitted out that interface on the remote CPU. If this option is set to =redirect= the target device must be specified using =--redirect-device=. The default for this option is =disabled=. ** -r, --redirect-device Specify the device to redirect the packet to when it is received on the target CPU. Note that this option can only be specified with =--remote-action redirect=. ** -q, --qsize Set the queue size for the per-CPU cpumap ring buffer used for redirecting packets from multiple CPUs to one CPU. The default value is 2048 packets. ** -x, --stress-mode Stress the cpumap implementation by deallocating and reallocating the cpumap ring buffer on each polling interval. ** -i, --interval Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. ** -s, --stats Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. ** -e, --extended Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-\ while the program is running. See also the *Output Format Description* section below. ** -m, --mode Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. ** -v, --verbose Enable verbose logging. Supply twice to enable verbose logging from the underlying =libxdp= and =libbpf= libraries. ** --version Show the application version and exit. ** -h, --help Display a summary of the available options * The REDIRECT-MAP command In this mode, =xdp-bench= sets up packet redirection between two interfaces supplied on the command line using the =bpf_redirect_map()= BPF helper triggered on packet reception on the ingress interface, using a devmap as its target. The syntax for the =redirect-map= command is: =xdp-bench redirect-map [options] = Where == is the name of the input interface from where packets will be redirect to the output interface ==. The supported options are: ** -X, --load-egress Load a program in the devmap entry used for redirection, so that it is invoked after the packet is redirected to the target device, before it is transmitted out of the output interface. The remote program will update the packet data so its source MAC address matches the one of the destination interface. ** -i, --interval Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. ** -s, --stats Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. ** -e, --extended Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-\ while the program is running. See also the *Output Format Description* section below. ** -m, --mode Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. ** -v, --verbose Enable verbose logging. Supply twice to enable verbose logging from the underlying =libxdp= and =libbpf= libraries. ** --version Show the application version and exit. ** -h, --help Display a summary of the available options * The REDIRECT-MULTI command In this mode, =xdp-bench= sets up one-to-many packet redirection between interfaces supplied on the command line, using the =bpf_redirect_map= BPF helper triggered on packet reception on the ingress interface, using a devmap as its target. The packet is broadcast to all output interfaces specified on the command line, using devmap's packet broadcast feature. The syntax for the =redirect-multi= command is: =xdp-bench redirect-multi [options] ... = Where == is the name of the input interface from where packets will be redirect to one or many output interface(s). The supported options are: ** -X, --load-egress Load a program in the devmap entry used for redirection, so that it is invoked after the packet is redirected to the target device, before it is transmitted out of the output interface. The remote program will update the packet data so its source MAC address matches the one of the destination interface. ** -i, --interval Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. ** -s, --stats Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. ** -e, --extended Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-\ while the program is running. See also the *Output Format Description* section below. ** -m, --mode Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. ** -v, --verbose Enable verbose logging. Supply twice to enable verbose logging from the underlying =libxdp= and =libbpf= libraries. ** --version Show the application version and exit. ** -h, --help Display a summary of the available options * Output Format Description By default, redirect success statistics are disabled, use =--stats= to enable. The terse output mode is default, extended output mode can be activated using the =--extended= command line option. SIGQUIT (Ctrl + \\) can be used to switch the mode dynamically at runtime. Terse mode displays at most the following fields: #+begin_src sh rx/s Number of packets received per second redir/s Number of packets successfully redirected per second err,drop/s Aggregated count of errors per second (including dropped packets when not using the drop command) xmit/s Number of packets transmitted on the output device per second #+end_src Extended output mode displays at most the following fields: #+begin_src sh FIELD DESCRIPTION receive Displays the number of packets received and errors encountered Whenever an error or packet drop occurs, details of per CPU error and drop statistics will be expanded inline in terse mode. pkt/s - Packets received per second drop/s - Packets dropped per second error/s - Errors encountered per second redirect - Displays the number of packets successfully redirected Errors encountered are expanded under redirect_err field Note that passing -s to enable it has a per packet overhead redir/s - Packets redirected successfully per second redirect_err Displays the number of packets that failed redirection The errno is expanded under this field with per CPU count The recognized errors are: EINVAL: Invalid redirection ENETDOWN: Device being redirected to is down EMSGSIZE: Packet length too large for device EOPNOTSUPP: Operation not supported ENOSPC: No space in ptr_ring of cpumap kthread error/s - Packets that failed redirection per second enqueue to cpu N Displays the number of packets enqueued to bulk queue of CPU N Expands to cpu:FROM->N to display enqueue stats for each CPU enqueuing to CPU N Received packets can be associated with the CPU redirect program is enqueuing packets to. pkt/s - Packets enqueued per second from other CPU to CPU N drop/s - Packets dropped when trying to enqueue to CPU N bulk-avg - Average number of packets processed for each event kthread Displays the number of packets processed in CPUMAP kthread for each CPU Packets consumed from ptr_ring in kthread, and its xdp_stats (after calling CPUMAP bpf prog) are expanded below this. xdp_stats are expanded as a total and then per-CPU to associate it to each CPU's pinned CPUMAP kthread. pkt/s - Packets consumed per second from ptr_ring drop/s - Packets dropped per second in kthread sched - Number of times kthread called schedule() xdp_stats (also expands to per-CPU counts) pass/s - XDP_PASS count for CPUMAP program execution drop/s - XDP_DROP count for CPUMAP program execution redir/s - XDP_REDIRECT count for CPUMAP program execution xdp_exception Displays xdp_exception tracepoint events This can occur due to internal driver errors, unrecognized XDP actions and due to explicit user trigger by use of XDP_ABORTED Each action is expanded below this field with its count hit/s - Number of times the tracepoint was hit per second devmap_xmit Displays devmap_xmit tracepoint events This tracepoint is invoked for successful transmissions on output device but these statistics are not available for generic XDP mode, hence they will be omitted from the output when using SKB mode xmit/s - Number of packets that were transmitted per second drop/s - Number of packets that failed transmissions per second drv_err/s - Number of internal driver errors per second bulk-avg - Average number of packets processed for each event #+end_src * BUGS Please report any bugs on Github: https://github.com/xdp-project/xdp-tools/issues * AUTHOR Earlier xdp-redirect tools were written by Jesper Dangaard Brouer and John Fastabend. They were then rewritten to support more features by Kumar Kartikeya Dwivedi, who also ported them to xdp-tools together with Toke Høiland-Jørgensen. This man page was written by Kumar Kartikeya Dwivedi and Toke Høiland-Jørgensen. xdp-tools-1.5.4/xdp-bench/xdp_redirect_basic.bpf.c0000644000175100001660000000400215003640462021410 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2016 John Fastabend * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include #include #include #include #include #ifndef HAVE_LIBBPF_BPF_PROGRAM__TYPE static long (*bpf_xdp_load_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 189; static long (*bpf_xdp_store_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 190; #endif const volatile int ifindex_out; SEC("xdp") int xdp_redirect_prog(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 key = bpf_get_smp_processor_id(); struct ethhdr *eth = data; struct datarec *rec; __u64 nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_DROP; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); swap_src_dst_mac(data); return bpf_redirect(ifindex_out, 0); } SEC("xdp") int xdp_redirect_load_bytes_prog(struct xdp_md *ctx) { __u32 key = bpf_get_smp_processor_id(); int err, offset = 0; struct datarec *rec; struct ethhdr eth; err = bpf_xdp_load_bytes(ctx, offset, ð, sizeof(eth)); if (err) return err; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); swap_src_dst_mac(ð); err = bpf_xdp_store_bytes(ctx, offset, ð, sizeof(eth)); if (err) return err; return bpf_redirect(ifindex_out, 0); } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-bench/xdp_redirect_cpumap.bpf.c0000644000175100001660000003643015003640462021626 0ustar runnerdocker/* XDP redirect to CPUs via cpumap (BPF_MAP_TYPE_CPUMAP) * * GPLv2, Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */ #include #include #include #include #include #include "hash_func01.h" /* Special map type that can XDP_REDIRECT frames to another CPU */ struct { __uint(type, BPF_MAP_TYPE_CPUMAP); __uint(key_size, sizeof(__u32)); __uint(value_size, sizeof(struct bpf_cpumap_val)); } cpu_map SEC(".maps"); /* Set of maps controlling available CPU, and for iterating through * selectable redirect CPUs. */ struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, __u32); __type(value, __u32); } cpus_available SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, __u32); __type(value, __u32); __uint(max_entries, 1); } cpus_count SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __type(key, __u32); __type(value, __u32); __uint(max_entries, 1); } cpus_iterator SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_DEVMAP); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(struct bpf_devmap_val)); __uint(max_entries, 1); } tx_port SEC(".maps"); char tx_mac_addr[ETH_ALEN]; /* Helper parse functions */ static __always_inline bool parse_eth(struct ethhdr *eth, void *data_end, __u16 *eth_proto, __u64 *l3_offset) { __u16 eth_type; __u64 offset; offset = sizeof(*eth); if ((void *)eth + offset > data_end) return false; eth_type = eth->h_proto; /* Skip non 802.3 Ethertypes */ if (__builtin_expect(bpf_ntohs(eth_type) < ETH_P_802_3_MIN, 0)) return false; /* Handle VLAN tagged packet */ if (eth_type == bpf_htons(ETH_P_8021Q) || eth_type == bpf_htons(ETH_P_8021AD)) { struct vlan_hdr *vlan_hdr; vlan_hdr = (void *)eth + offset; offset += sizeof(*vlan_hdr); if ((void *)eth + offset > data_end) return false; eth_type = vlan_hdr->h_vlan_encapsulated_proto; } /* Handle double VLAN tagged packet */ if (eth_type == bpf_htons(ETH_P_8021Q) || eth_type == bpf_htons(ETH_P_8021AD)) { struct vlan_hdr *vlan_hdr; vlan_hdr = (void *)eth + offset; offset += sizeof(*vlan_hdr); if ((void *)eth + offset > data_end) return false; eth_type = vlan_hdr->h_vlan_encapsulated_proto; } *eth_proto = bpf_ntohs(eth_type); *l3_offset = offset; return true; } static __always_inline __u16 get_port_ipv4_udp(struct xdp_md *ctx, __u64 nh_off, bool src) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct iphdr *iph = data + nh_off; struct udphdr *udph; if (iph + 1 > data_end) return 0; if (!(iph->protocol == IPPROTO_UDP)) return 0; udph = (void *)(iph + 1); if (udph + 1 > data_end) return 0; if (src) return bpf_ntohs(udph->source); else return bpf_ntohs(udph->dest); } static __always_inline __u16 get_port_ipv6_udp(struct xdp_md *ctx, __u64 nh_off, bool src) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ipv6hdr *ip6h = data + nh_off; struct udphdr *udph; if (ip6h + 1 > data_end) return 0; if (!(ip6h->nexthdr == IPPROTO_UDP)) return 0; udph = (void *)(ip6h + 1); if (udph + 1 > data_end) return 0; if (src) return bpf_ntohs(udph->source); else return bpf_ntohs(udph->dest); } static __always_inline __u16 get_port_ipv4_tcp(struct xdp_md *ctx, __u64 nh_off, bool src) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct iphdr *iph = data + nh_off; struct tcphdr *tcph; if (iph + 1 > data_end) return 0; if (!(iph->protocol == IPPROTO_TCP)) return 0; tcph = (void *)(iph + 1); if (tcph + 1 > data_end) return 0; if (src) return bpf_ntohs(tcph->source); else return bpf_ntohs(tcph->dest); } static __always_inline __u16 get_port_ipv6_tcp(struct xdp_md *ctx, __u64 nh_off, bool src) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ipv6hdr *ip6h = data + nh_off; struct tcphdr *tcph; if (ip6h + 1 > data_end) return 0; if (!(ip6h->nexthdr == IPPROTO_UDP)) return 0; tcph = (void *)(ip6h + 1); if (tcph + 1 > data_end) return 0; if (src) return bpf_ntohs(tcph->source); else return bpf_ntohs(tcph->dest); } static __always_inline int get_proto_ipv4(struct xdp_md *ctx, __u64 nh_off) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct iphdr *iph = data + nh_off; if (iph + 1 > data_end) return 0; return iph->protocol; } static __always_inline int get_proto_ipv6(struct xdp_md *ctx, __u64 nh_off) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ipv6hdr *ip6h = data + nh_off; if (ip6h + 1 > data_end) return 0; return ip6h->nexthdr; } SEC("xdp") int cpumap_no_touch(struct xdp_md *ctx) { __u32 key = bpf_get_smp_processor_id(); struct datarec *rec; __u32 *cpu_selected; __u32 cpu_dest = 0; __u32 key0 = 0; /* Only use first entry in cpus_available */ cpu_selected = bpf_map_lookup_elem(&cpus_available, &key0); if (!cpu_selected) return XDP_ABORTED; cpu_dest = *cpu_selected; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); if (cpu_dest >= nr_cpus) { NO_TEAR_INC(rec->issue); return XDP_ABORTED; } return bpf_redirect_map(&cpu_map, cpu_dest, 0); } SEC("xdp") int cpumap_touch_data(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 key = bpf_get_smp_processor_id(); struct ethhdr *eth = data; struct datarec *rec; __u32 *cpu_selected; __u32 cpu_dest = 0; __u32 key0 = 0; __u16 eth_type; /* Only use first entry in cpus_available */ cpu_selected = bpf_map_lookup_elem(&cpus_available, &key0); if (!cpu_selected) return XDP_ABORTED; cpu_dest = *cpu_selected; /* Validate packet length is minimum Eth header size */ if (eth + 1 > data_end) return XDP_ABORTED; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); /* Read packet data, and use it (drop non 802.3 Ethertypes) */ eth_type = eth->h_proto; if (bpf_ntohs(eth_type) < ETH_P_802_3_MIN) { NO_TEAR_INC(rec->dropped); return XDP_DROP; } if (cpu_dest >= nr_cpus) { NO_TEAR_INC(rec->issue); return XDP_ABORTED; } return bpf_redirect_map(&cpu_map, cpu_dest, 0); } SEC("xdp") int cpumap_round_robin(struct xdp_md *ctx) { __u32 key = bpf_get_smp_processor_id(); struct datarec *rec; __u32 cpu_dest = 0; __u32 key0 = 0; __u32 *cpu_selected; __u32 *cpu_iterator; __u32 *cpu_max; __u32 cpu_idx; cpu_max = bpf_map_lookup_elem(&cpus_count, &key0); if (!cpu_max) return XDP_ABORTED; cpu_iterator = bpf_map_lookup_elem(&cpus_iterator, &key0); if (!cpu_iterator) return XDP_ABORTED; cpu_idx = *cpu_iterator; *cpu_iterator += 1; if (*cpu_iterator == *cpu_max) *cpu_iterator = 0; cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_idx); if (!cpu_selected) return XDP_ABORTED; cpu_dest = *cpu_selected; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); if (cpu_dest >= nr_cpus) { NO_TEAR_INC(rec->issue); return XDP_ABORTED; } return bpf_redirect_map(&cpu_map, cpu_dest, 0); } SEC("xdp") int cpumap_l4_proto(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 key = bpf_get_smp_processor_id(); struct ethhdr *eth = data; __u8 ip_proto = IPPROTO_UDP; struct datarec *rec; __u16 eth_proto = 0; __u64 l3_offset = 0; __u32 cpu_dest = 0; __u32 *cpu_lookup; __u32 cpu_idx = 0; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); if (!(parse_eth(eth, data_end, ð_proto, &l3_offset))) return XDP_PASS; /* Just skip */ /* Extract L4 protocol */ switch (eth_proto) { case ETH_P_IP: ip_proto = get_proto_ipv4(ctx, l3_offset); break; case ETH_P_IPV6: ip_proto = get_proto_ipv6(ctx, l3_offset); break; case ETH_P_ARP: cpu_idx = 0; /* ARP packet handled on separate CPU */ break; default: cpu_idx = 0; } /* Choose CPU based on L4 protocol */ switch (ip_proto) { case IPPROTO_ICMP: case IPPROTO_ICMPV6: cpu_idx = 2; break; case IPPROTO_TCP: cpu_idx = 0; break; case IPPROTO_UDP: cpu_idx = 1; break; default: cpu_idx = 0; } cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx); if (!cpu_lookup) return XDP_ABORTED; cpu_dest = *cpu_lookup; if (cpu_dest >= nr_cpus) { NO_TEAR_INC(rec->issue); return XDP_ABORTED; } return bpf_redirect_map(&cpu_map, cpu_dest, 0); } SEC("xdp") int cpumap_l4_filter(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 key = bpf_get_smp_processor_id(); struct ethhdr *eth = data; __u8 ip_proto = IPPROTO_UDP; struct datarec *rec; __u16 eth_proto = 0; __u64 l3_offset = 0; __u32 cpu_dest = 0; __u32 *cpu_lookup; __u32 cpu_idx = 0; __u16 dest_port; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); if (!(parse_eth(eth, data_end, ð_proto, &l3_offset))) return XDP_PASS; /* Just skip */ /* Extract L4 protocol */ switch (eth_proto) { case ETH_P_IP: ip_proto = get_proto_ipv4(ctx, l3_offset); break; case ETH_P_IPV6: ip_proto = get_proto_ipv6(ctx, l3_offset); break; case ETH_P_ARP: cpu_idx = 0; /* ARP packet handled on separate CPU */ break; default: cpu_idx = 0; } /* Choose CPU based on L4 protocol */ switch (ip_proto) { case IPPROTO_ICMP: case IPPROTO_ICMPV6: cpu_idx = 2; break; case IPPROTO_TCP: cpu_idx = 0; break; case IPPROTO_UDP: cpu_idx = 1; /* DDoS filter UDP port 9 (pktgen) */ dest_port = get_port_ipv4_udp(ctx, l3_offset, false); if (dest_port == 9) { NO_TEAR_INC(rec->dropped); return XDP_DROP; } break; default: cpu_idx = 0; } cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx); if (!cpu_lookup) return XDP_ABORTED; cpu_dest = *cpu_lookup; if (cpu_dest >= nr_cpus) { NO_TEAR_INC(rec->issue); return XDP_ABORTED; } return bpf_redirect_map(&cpu_map, cpu_dest, 0); } /* Hashing initval */ #define INITVAL 15485863 static __always_inline __u32 get_ipv4_hash_ip_pair(struct xdp_md *ctx, __u64 nh_off) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct iphdr *iph = data + nh_off; __u32 cpu_hash; if (iph + 1 > data_end) return 0; cpu_hash = iph->saddr + iph->daddr; cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + iph->protocol); return cpu_hash; } static __always_inline __u32 get_ipv6_hash_ip_pair(struct xdp_md *ctx, __u64 nh_off) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ipv6hdr *ip6h = data + nh_off; __u32 cpu_hash; if (ip6h + 1 > data_end) return 0; cpu_hash = ip6h->saddr.in6_u.u6_addr32[0] + ip6h->daddr.in6_u.u6_addr32[0]; cpu_hash += ip6h->saddr.in6_u.u6_addr32[1] + ip6h->daddr.in6_u.u6_addr32[1]; cpu_hash += ip6h->saddr.in6_u.u6_addr32[2] + ip6h->daddr.in6_u.u6_addr32[2]; cpu_hash += ip6h->saddr.in6_u.u6_addr32[3] + ip6h->daddr.in6_u.u6_addr32[3]; cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + ip6h->nexthdr); return cpu_hash; } /* Load-Balance traffic based on hashing IP-addrs + L4-proto. The * hashing scheme is symmetric, meaning swapping IP src/dest still hit * same CPU. */ SEC("xdp") int cpumap_l4_hash(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 key = bpf_get_smp_processor_id(); struct ethhdr *eth = data; struct datarec *rec; __u16 eth_proto = 0; __u64 l3_offset = 0; __u32 cpu_dest = 0; __u32 cpu_idx = 0; __u32 *cpu_lookup; __u32 key0 = 0; __u32 *cpu_max; __u32 cpu_hash; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); cpu_max = bpf_map_lookup_elem(&cpus_count, &key0); if (!cpu_max) return XDP_ABORTED; if (!(parse_eth(eth, data_end, ð_proto, &l3_offset))) return XDP_PASS; /* Just skip */ /* Hash for IPv4 and IPv6 */ switch (eth_proto) { case ETH_P_IP: cpu_hash = get_ipv4_hash_ip_pair(ctx, l3_offset); break; case ETH_P_IPV6: cpu_hash = get_ipv6_hash_ip_pair(ctx, l3_offset); break; case ETH_P_ARP: /* ARP packet handled on CPU idx 0 */ default: cpu_hash = 0; } /* Choose CPU based on hash */ cpu_idx = cpu_hash % *cpu_max; cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx); if (!cpu_lookup) return XDP_ABORTED; cpu_dest = *cpu_lookup; if (cpu_dest >= nr_cpus) { NO_TEAR_INC(rec->issue); return XDP_ABORTED; } return bpf_redirect_map(&cpu_map, cpu_dest, 0); } static __always_inline int cpumap_l4_port(struct xdp_md *ctx, bool src) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 key = bpf_get_smp_processor_id(); struct ethhdr *eth = data; __u8 ip_proto = IPPROTO_UDP; struct datarec *rec; __u16 eth_proto = 0; __u64 l3_offset = 0; __u32 cpu_dest = 0; __u32 *cpu_lookup; __u32 cpu_idx = 0; __u32 *cpu_max; __u32 key0 = 0; __u16 port; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); cpu_max = bpf_map_lookup_elem(&cpus_count, &key0); if (!cpu_max) return XDP_ABORTED; if (!(parse_eth(eth, data_end, ð_proto, &l3_offset))) return XDP_PASS; /* Just skip */ /* Extract L4 source port */ switch (eth_proto) { case ETH_P_IP: ip_proto = get_proto_ipv4(ctx, l3_offset); switch (ip_proto) { case IPPROTO_TCP: port = get_port_ipv4_tcp(ctx, l3_offset, src); break; case IPPROTO_UDP: port = get_port_ipv4_udp(ctx, l3_offset, src); break; default: port = 0; } break; case ETH_P_IPV6: ip_proto = get_proto_ipv6(ctx, l3_offset); switch (ip_proto) { case IPPROTO_TCP: port = get_port_ipv6_tcp(ctx, l3_offset, src); break; case IPPROTO_UDP: port = get_port_ipv6_udp(ctx, l3_offset, src); break; default: port = 0; } break; default: port = 0; } cpu_idx = port % *cpu_max; cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx); if (!cpu_lookup) return XDP_ABORTED; cpu_dest = *cpu_lookup; if (cpu_dest >= nr_cpus) { NO_TEAR_INC(rec->issue); return XDP_ABORTED; } return bpf_redirect_map(&cpu_map, cpu_dest, 0); } SEC("xdp") int cpumap_l4_sport(struct xdp_md *ctx) { return cpumap_l4_port(ctx, true); } SEC("xdp") int cpumap_l4_dport(struct xdp_md *ctx) { return cpumap_l4_port(ctx, false); } SEC("xdp/cpumap") int cpumap_redirect(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; __u64 nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_DROP; swap_src_dst_mac(data); return bpf_redirect_map(&tx_port, 0, 0); } SEC("xdp/cpumap") int cpumap_pass(struct xdp_md *ctx) { return XDP_PASS; } SEC("xdp/cpumap") int cpumap_drop(struct xdp_md *ctx) { return XDP_DROP; } SEC("xdp/devmap") int redirect_egress_prog(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; __u64 nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_DROP; __builtin_memcpy(eth->h_source, (const char *)tx_mac_addr, ETH_ALEN); return XDP_PASS; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-bench/tests/0000755000175100001660000000000015003640462016027 5ustar runnerdockerxdp-tools-1.5.4/xdp-bench/tests/test-xdp-bench.sh0000644000175100001660000001151415003640462021212 0ustar runnerdockerXDP_LOADER=${XDP_LOADER:-./xdp-loader} XDP_BENCH=${XDP_BENCH:-./xdp-bench} ALL_TESTS="test_drop test_pass test_tx test_xdp_load_bytes test_rxq_stats test_redirect test_redirect_cpu test_redirect_map test_redirect_map_egress test_redirect_multi test_redirect_multi_egress" test_basic() { action=$1 export XDP_SAMPLE_IMMEDIATE_EXIT=1 check_run $XDP_BENCH $action $NS -vv check_run $XDP_BENCH $action $NS -p read-data -vv check_run $XDP_BENCH $action $NS -p parse-ip -vv check_run $XDP_BENCH $action $NS -p swap-macs -vv check_run $XDP_BENCH $action $NS -m skb -vv check_run $XDP_BENCH $action $NS -e -vv } test_drop() { test_basic drop } test_pass() { test_basic pass } test_tx() { test_basic tx } test_xdp_load_bytes() { skip_if_missing_xdp_load_bytes export XDP_SAMPLE_IMMEDIATE_EXIT=1 for action in drop pass tx; do check_run $XDP_BENCH $action $NS -l load-bytes -vv check_run $XDP_BENCH $action $NS -p read-data -l load-bytes -vv check_run $XDP_BENCH $action $NS -p parse-ip -l load-bytes -vv check_run $XDP_BENCH $action $NS -p swap-macs -l load-bytes -vv check_run $XDP_BENCH $action $NS -m skb -l load-bytes -vv check_run $XDP_BENCH $action $NS -e -l load-bytes -vv done check_run ip link add dev btest0 type veth peer name btest1 check_run $XDP_BENCH redirect btest0 btest1 -l load-bytes -vv check_run $XDP_BENCH redirect btest0 btest1 -s -l load-bytes -vv check_run $XDP_BENCH redirect btest0 btest1 -m skb -l load-bytes -vv check_run $XDP_BENCH redirect btest0 btest1 -e -l load-bytes -vv ip link del dev btest0 } test_rxq_stats() { skip_if_missing_veth_rxq export XDP_SAMPLE_IMMEDIATE_EXIT=1 check_run $XDP_BENCH drop $NS -r -vv } test_redirect() { export XDP_SAMPLE_IMMEDIATE_EXIT=1 check_run ip link add dev btest0 type veth peer name btest1 check_run $XDP_BENCH redirect btest0 btest1 -vv check_run $XDP_BENCH redirect btest0 btest1 -s -vv check_run $XDP_BENCH redirect btest0 btest1 -m skb -vv check_run $XDP_BENCH redirect btest0 btest1 -e -vv ip link del dev btest0 } test_redirect_cpu() { skip_if_missing_cpumap_attach export XDP_SAMPLE_IMMEDIATE_EXIT=1 check_run ip link add dev btest0 type veth peer name btest1 check_run $XDP_BENCH redirect-cpu btest0 -c 0 -vv check_run $XDP_BENCH redirect-cpu btest0 -c 0 -m skb -vv check_run $XDP_BENCH redirect-cpu btest0 -c 0 -p touch -vv check_run $XDP_BENCH redirect-cpu btest0 -c 0 -p round-robin -vv check_run $XDP_BENCH redirect-cpu btest0 -c 0 -p l4-proto -vv check_run $XDP_BENCH redirect-cpu btest0 -c 0 -p l4-filter -vv check_run $XDP_BENCH redirect-cpu btest0 -c 0 -p l4-hash -vv is_progmap_supported || export LIBXDP_SKIP_DISPATCHER=1 check_run $XDP_BENCH redirect-cpu btest0 -c 0 -r drop -vv check_run $XDP_BENCH redirect-cpu btest0 -c 0 -r pass -vv check_run $XDP_BENCH redirect-cpu btest0 -c 0 -r redirect -D btest1 -vv ip link del dev btest0 } test_redirect_map() { export XDP_SAMPLE_IMMEDIATE_EXIT=1 check_run ip link add dev btest0 type veth peer name btest1 check_run $XDP_BENCH redirect-map btest0 btest1 -vv check_run $XDP_BENCH redirect-map btest0 btest1 -s -vv check_run $XDP_BENCH redirect-map btest0 btest1 -m skb -vv check_run $XDP_BENCH redirect-map btest0 btest1 -e -vv ip link del dev btest0 } test_redirect_map_egress() { skip_if_missing_cpumap_attach export XDP_SAMPLE_IMMEDIATE_EXIT=1 check_run ip link add dev btest0 type veth peer name btest1 is_progmap_supported || export LIBXDP_SKIP_DISPATCHER=1 check_run $XDP_BENCH redirect-map btest0 btest1 -X -vv ip link del dev btest0 } test_redirect_multi() { export XDP_SAMPLE_IMMEDIATE_EXIT=1 check_run ip link add dev btest0 type veth peer name btest1 check_run ip link add dev btest2 type veth peer name btest3 check_run $XDP_BENCH redirect-multi btest0 btest1 btest2 btest3 -vv check_run $XDP_BENCH redirect-multi btest0 btest1 btest2 btest3 -s -vv check_run $XDP_BENCH redirect-multi btest0 btest1 btest2 btest3 -m skb -vv check_run $XDP_BENCH redirect-multi btest0 btest1 btest2 btest3 -e -vv ip link del dev btest0 ip link del dev btest2 } test_redirect_multi_egress() { skip_if_missing_cpumap_attach export XDP_SAMPLE_IMMEDIATE_EXIT=1 is_progmap_supported || export LIBXDP_SKIP_DISPATCHER=1 check_run ip link add dev btest0 type veth peer name btest1 check_run ip link add dev btest2 type veth peer name btest3 check_run $XDP_BENCH redirect-multi btest0 btest1 btest2 btest3 -X -vv ip link del dev btest0 ip link del dev btest2 } cleanup_tests() { ip link del dev btest0 >/dev/null 2>&1 ip link del dev btest2 >/dev/null 2>&1 $XDP_LOADER unload $NS --all >/dev/null 2>&1 $XDP_LOADER clean >/dev/null 2>&1 } xdp-tools-1.5.4/xdp-bench/xdp_basic.bpf.c0000644000175100001660000001243215003640462017535 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2016 John Fastabend * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include #include #include #include #include #include #ifndef HAVE_LIBBPF_BPF_PROGRAM__TYPE static long (*bpf_xdp_load_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 189; static long (*bpf_xdp_store_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 190; #endif const volatile bool rxq_stats = 0; const volatile enum xdp_action action = XDP_DROP; static int parse_ip_header_load(struct xdp_md *ctx) { int eth_type, ip_type, err, offset = 0; struct ipv6hdr ipv6hdr; struct iphdr iphdr; struct ethhdr eth; err = bpf_xdp_load_bytes(ctx, offset, ð, sizeof(eth)); if (err) return err; eth_type = eth.h_proto; offset = sizeof(eth); if (eth_type == bpf_htons(ETH_P_IP)) { err = bpf_xdp_load_bytes(ctx, offset, &iphdr, sizeof(iphdr)); if (err) return err; ip_type = iphdr.protocol; if (ip_type < 0) return ip_type; } else if (eth_type == bpf_htons(ETH_P_IPV6)) { err = bpf_xdp_load_bytes(ctx, offset, &ipv6hdr, sizeof(ipv6hdr)); if (err) return err; ip_type = ipv6hdr.nexthdr; if (ip_type < 0) return ip_type; } return 0; } static int parse_ip_header(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct hdr_cursor nh = { .pos = data }; struct ipv6hdr *ipv6hdr; struct iphdr *iphdr; struct ethhdr *eth; int eth_type, ip_type; eth_type = parse_ethhdr(&nh, data_end, ð); if (eth_type < 0) return eth_type; if (eth_type == bpf_htons(ETH_P_IP)) { ip_type = parse_iphdr(&nh, data_end, &iphdr); if (ip_type < 0) return ip_type; } else if (eth_type == bpf_htons(ETH_P_IPV6)) { ip_type = parse_ip6hdr(&nh, data_end, &ipv6hdr); if (ip_type < 0) return ip_type; } return 0; } static int record_stats(__u32 rxq_idx, bool success) { __u32 key = bpf_get_smp_processor_id(); struct datarec *rec; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return -1; NO_TEAR_INC(rec->processed); if (action == XDP_DROP && success) NO_TEAR_INC(rec->dropped); if (rxq_stats) { struct datarec *rxq_rec; rxq_rec = bpf_map_lookup_elem(&rxq_cnt, &rxq_idx); if (!rxq_rec) return -1; NO_TEAR_INC(rxq_rec->processed); if (action == XDP_DROP && success) NO_TEAR_INC(rxq_rec->dropped); } return 0; } SEC("xdp") int xdp_basic_prog(struct xdp_md *ctx) { if (record_stats(ctx->rx_queue_index, true)) return XDP_ABORTED; return action; } SEC("xdp") int xdp_read_data_prog(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; int ret = action; __u64 nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_ABORTED; if (bpf_ntohs(eth->h_proto) < ETH_P_802_3_MIN) ret = XDP_ABORTED; if (record_stats(ctx->rx_queue_index, ret==action)) return XDP_ABORTED; return ret; } SEC("xdp") int xdp_read_data_load_bytes_prog(struct xdp_md *ctx) { int err, offset = 0; struct ethhdr eth; int ret = action; err = bpf_xdp_load_bytes(ctx, offset, ð, sizeof(eth)); if (err) return err; if (bpf_ntohs(eth.h_proto) < ETH_P_802_3_MIN) ret = XDP_ABORTED; if (record_stats(ctx->rx_queue_index, ret==action)) return XDP_ABORTED; return ret; } SEC("xdp") int xdp_swap_macs_prog(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; __u64 nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_ABORTED; swap_src_dst_mac(data); if (record_stats(ctx->rx_queue_index, true)) return XDP_ABORTED; return action; } SEC("xdp") int xdp_swap_macs_load_bytes_prog(struct xdp_md *ctx) { int err, offset = 0; struct ethhdr eth; err = bpf_xdp_load_bytes(ctx, offset, ð, sizeof(eth)); if (err) return err; swap_src_dst_mac(ð); err = bpf_xdp_store_bytes(ctx, offset, ð, sizeof(eth)); if (err) return err; if (record_stats(ctx->rx_queue_index, true)) return XDP_ABORTED; return action; } SEC("xdp") int xdp_parse_prog(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; int ret = action; __u64 nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_ABORTED; if (parse_ip_header(ctx)) ret = XDP_ABORTED; if (record_stats(ctx->rx_queue_index, ret==action)) return XDP_ABORTED; return ret; } SEC("xdp") int xdp_parse_load_bytes_prog(struct xdp_md *ctx) { int ret = action; if (parse_ip_header_load(ctx)) ret = XDP_ABORTED; if (record_stats(ctx->rx_queue_index, ret==action)) return XDP_ABORTED; return ret; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-bench/xdp-bench.80000644000175100001660000006564115003640462016642 0ustar runnerdocker.TH "xdp-bench" "8" "NOVEMBER 19, 2024" "V1.5.4" "A simple XDP benchmarking tool" .SH "NAME" XDP-bench \- a simple XDP benchmarking tool .SH "SYNOPSIS" .PP XDP-bench is a benchmarking utility for exercising the different operation modes of XDP. It is intended to be a simple program demonstrating the various operating modes; these include dropping packets, hairpin forwarding (using the \fIXDP_TX\fP return code), and redirection using the various in-kernel packet redirection facilities. .PP The drop and TX modes support various options to control whether packet data is touched (read or written) before being dropped or transmitted. The redirection modes support using the simple ifindex-based \fIbpf_redirect\fP helper, the \fIbpf_redirect_map\fP helper using a cpumap as its target, \fIbpf_redirect_map\fP using a devmap as its target, and the devmap's broadcast mode which allows redirecting to multiple devices. .PP There is more information on the meaning of the output in both default (terse) and extended output mode, in the \fBOutput Format Description\fP section below. .SS "Running xdp-bench" .PP The syntax for running xdp-bench is: .RS .nf \fCUsage: xdp-bench COMMAND [options] COMMAND can be one of: drop - Drop all packets on an interface pass - Pass all packets to the network stack tx - Transmit packets back out on an interface (hairpin forwarding) redirect - XDP redirect using the bpf_redirect() helper redirect-cpu - XDP CPU redirect using BPF_MAP_TYPE_CPUMAP redirect-map - XDP redirect using BPF_MAP_TYPE_DEVMAP redirect-multi - XDP multi-redirect using BPF_MAP_TYPE_DEVMAP and the BPF_F_BROADCAST flag \fP .fi .RE .PP Each command, and its options are explained below. Or use \fIxdp\-bench COMMAND \-\-help\fP to see the options for each command. .SH "The DROP command" .PP In this mode, \fIxdp\-bench\fP installs an XDP program on an interface that simply drops all packets. There are options to control what to do with the packet before dropping it (touch the packet data or not), as well as which statistics to gather. This is a basic benchmark for the baseline (best-case) performance of XDP on an interface. .PP The syntax for the \fIdrop\fP command is: .PP \fIxdp\-bench drop [options] \fP .PP Where \fI\fP is the name of the interface the XDP program should be installed on. .PP The supported options are: .SS "-p, --packet-operation " .PP Specify which operation should be taken on the packet before dropping it. The following actions are available: .RS .nf \fCno-touch - Drop the packet without touching the packet data read-data - Read a field in the packet header before dropping parse-ip - Parse the IP header field before dropping swap-macs - Swap the source and destination MAC addresses before dropping \fP .fi .RE .PP Whether to touch the packet before dropping it can have a significant performance impact as this requires bringing packet data into the CPU cache (and flushing it back out if writing). .PP The default for this option is \fIno\-touch\fP. .SS "-l, --load-mode " .PP Specify which mechanism xdp-bench should use to load (and store) the packet data. The following modes are available: .RS .nf \fCdpa - Use traditional Direct Packet Access from the XDP program load-bytes - Use the xdp_load_bytes() and xdp_store_bytes() helper functions \fP .fi .RE .PP This can be used to benchmark the various packet access modes supported by the kernel. .PP The default for this option is \fIdpa\fP. .SS "-r, --rxq-stats" .PP If set, the XDP program will also gather statistics on which receive queue index each packet was received on. This is displayed in the extended output mode along with per-CPU data (which, depending on the hardware configuration may or may not be equivalent). .SS "-i, --interval " .PP Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. .SS "-e, --extended" .PP Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-$\ while the program is running. See also the \fBOutput Format Description\fP section below. .SS "-m, --mode" .PP Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. .SS "-v, --verbose" .PP Enable verbose logging. Supply twice to enable verbose logging from the underlying \fIlibxdp\fP and \fIlibbpf\fP libraries. .SS "--version" .PP Show the application version and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "The PASS command" .PP In this mode, \fIxdp\-bench\fP installs an XDP program on an interface that passes all packets to the network stack after processing them (returning \fIXDP_PASS\fP). There are options to control what to do with the packet before passing it (touch the packet data or not), as well as which statistics to gather. This is a basic benchmark for the overhead of installing an XDP program on an interface while still running the regular network stack. .PP The syntax for the \fIpass\fP command is: .PP \fIxdp\-bench pass [options] \fP .PP Where \fI\fP is the name of the interface the XDP program should be installed on. .PP The supported options are: .SS "-p, --packet-operation " .PP Specify which operation should be taken on the packet before passing it. The following actions are available: .RS .nf \fCno-touch - Pass the packet without touching the packet data read-data - Read a field in the packet header before passing parse-ip - Parse the IP header field before passing swap-macs - Swap the source and destination MAC addresses before passing \fP .fi .RE .PP The default for this option is \fIno\-touch\fP. .SS "-l, --load-mode " .PP Specify which mechanism xdp-bench should use to load (and store) the packet data. The following modes are available: .RS .nf \fCdpa - Use traditional Direct Packet Access from the XDP program load-bytes - Use the xdp_load_bytes() and xdp_store_bytes() helper functions \fP .fi .RE .PP This can be used to benchmark the various packet access modes supported by the kernel. .PP The default for this option is \fIdpa\fP. .SS "-r, --rxq-stats" .PP If set, the XDP program will also gather statistics on which receive queue index each packet was received on. This is displayed in the extended output mode along with per-CPU data (which, depending on the hardware configuration may or may not be equivalent). .SS "-i, --interval " .PP Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. .SS "-e, --extended" .PP Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-$\ while the program is running. See also the \fBOutput Format Description\fP section below. .SS "-m, --mode" .PP Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. .SS "-v, --verbose" .PP Enable verbose logging. Supply twice to enable verbose logging from the underlying \fIlibxdp\fP and \fIlibbpf\fP libraries. .SS "--version" .PP Show the application version and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "The TX command" .PP In this mode, \fIxdp\-bench\fP installs an XDP program on an interface that performs so-called "hairpin forwarding", which means each packet is transmitted back out the same interface (using the \fIXDP_TX\fP return code).. There are options to control what to do with the packet before transmitting it (touch the packet data or not), as well as which statistics to gather. .PP The syntax for the \fItx\fP command is: .PP \fIxdp\-bench tx [options] \fP .PP Where \fI\fP is the name of the interface the XDP program should be installed on. .PP The supported options are: .SS "-p, --packet-operation " .PP Specify which operation should be taken on the packet before transmitting it. The following actions are available: .RS .nf \fCno-touch - Transmit the packet without touching the packet data read-data - Read a field in the packet header before transmitting parse-ip - Parse the IP header field before transmitting swap-macs - Swap the source and destination MAC addresses before transmitting \fP .fi .RE .PP To allow the packet to be successfully transmitted back to the sender, the MAC addresses have to be swapped, so that the source MAC matches the network device. However, there is a performance overhead in doing swapping, so this option allows this function to be turned off. .PP The default for this option is \fIswap\-macs\fP. .SS "-l, --load-mode " .PP Specify which mechanism xdp-bench should use to load (and store) the packet data. The following modes are available: .RS .nf \fCdpa - Use traditional Direct Packet Access from the XDP program load-bytes - Use the xdp_load_bytes() and xdp_store_bytes() helper functions \fP .fi .RE .PP This can be used to benchmark the various packet access modes supported by the kernel. .PP The default for this option is \fIdpa\fP. .SS "-r, --rxq-stats" .PP If set, the XDP program will also gather statistics on which receive queue index each packet was received on. This is displayed in the extended output mode along with per-CPU data (which, depending on the hardware configuration may or may not be equivalent). .SS "-i, --interval " .PP Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. .SS "-e, --extended" .PP Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-$\ while the program is running. See also the \fBOutput Format Description\fP section below. .SS "-m, --mode" .PP Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. .SS "-v, --verbose" .PP Enable verbose logging. Supply twice to enable verbose logging from the underlying \fIlibxdp\fP and \fIlibbpf\fP libraries. .SS "--version" .PP Show the application version and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "The REDIRECT command" .PP In this mode, \fIxdp\-bench\fP sets up packet redirection between the two interfaces supplied on the command line using the \fIbpf_redirect\fP BPF helper triggered on packet reception on the ingress interface. .PP The syntax for the \fIredirect\fP command is: .PP \fIxdp\-bench redirect [options] \fP .PP Where \fI\fP is the name of the input interface from where packets will be redirect to the output interface \fI\fP. .PP The supported options are: .SS "-l, --load-mode " .PP Specify which mechanism xdp-bench should use to load (and store) the packet data. The following modes are available: .RS .nf \fCdpa - Use traditional Direct Packet Access from the XDP program load-bytes - Use the xdp_load_bytes() and xdp_store_bytes() helper functions \fP .fi .RE .PP This can be used to benchmark the various packet access modes supported by the kernel. .PP The default for this option is \fIdpa\fP. .SS "-i, --interval " .PP Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. .SS "-s, --stats" .PP Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. .SS "-e, --extended" .PP Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-$\ while the program is running. See also the \fBOutput Format Description\fP section below. .SS "-m, --mode" .PP Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. .SS "-v, --verbose" .PP Enable verbose logging. Supply twice to enable verbose logging from the underlying \fIlibxdp\fP and \fIlibbpf\fP libraries. .SS "--version" .PP Show the application version and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "The REDIRECT-CPU command" .PP In this mode, \fIxdp\-bench\fP sets up packet redirection using the \fIbpf_redirect_map\fP BPF helper triggered on packet reception on the ingress interface, using a cpumap as its target. Hence, this tool can be used to redirect packets on an interface from one CPU to another. In addition to this, the tool then supports redirecting the packet to another output device when it is processed on the target CPU. .PP The syntax for the \fIredirect\-cpu\fP command is: .PP \fIxdp\-bench redirect\-cpu [options] \-c 0 ... \-c N\fP .PP Where \fI\fP is the name of the input interface from where packets will be redirect to the target CPU list specified using \fI\-c\fP. .PP The supported options are: .SS "-c, --cpu " .PP Specify a possible target CPU index. This option must be passed at least once, and can be passed multiple times to specify a list of CPUs. Which CPU is chosen for a given packet depends on the value of the \fI\-\-program\-mode\fP option, described below. .SS "-p, --program-mode " .PP Specify a program that embeds a predefined policy deciding how packets are redirected to different CPUs. The following options are available: .RS .nf \fCno-touch - Redirect without touching packet data touch - Read packet data before redirecting round-robin - Cycle between target CPUs in a round-robin fashion (for each packet) l4-proto - Choose the target CPU based on the layer-4 protocol of packet l4-filter - Like l4-proto, but drop UDP packets with destination port 9 (used by pktgen) l4-hash - Use source and destination IP hashing to pick target CPU l4-sport - Use modulo of source port to pick target CPU l4-dport - Use modulo of destination port to pick target CPU \fP .fi .RE .PP The \fIno\-touch\fP and \fItouch\fP modes always redirect packets to the same CPU (the first value supplied to \fI\-\-cpu\fP). The \fIround\-robin\fP and \fIl4\-hash\fP modes distribute packets between all the CPUs supplied as \fI\-\-cpu\fP arguments, while \fIl4\-proto\fP and \fIl4\-filter\fP send TCP and unrecognised packets to CPU index 0, UDP packets to CPU index 1 and ICMP packets to CPU index 2 (where the index refers to the order the actual CPUs are given on the command line). .PP The default for this option is \fIl4\-hash\fP. .SS "-r --remote-action " .PP If this option is set, a separate program is installed into the cpumap, which will be invoked on the remote CPU after the packet is processed there. The action can be either \fIdrop\fP or \fIpass\fP which will drop the packet or pass it to the regular networking stack, respectively. Or it can be \fIredirect\fP, which will cause the packet to be redirected to another interface and transmitted out that interface on the remote CPU. If this option is set to \fIredirect\fP the target device must be specified using \fI\-\-redirect\-device\fP. .PP The default for this option is \fIdisabled\fP. .SS "-r, --redirect-device " .PP Specify the device to redirect the packet to when it is received on the target CPU. Note that this option can only be specified with \fI\-\-remote\-action redirect\fP. .SS "-q, --qsize " .PP Set the queue size for the per-CPU cpumap ring buffer used for redirecting packets from multiple CPUs to one CPU. The default value is 2048 packets. .SS "-x, --stress-mode" .PP Stress the cpumap implementation by deallocating and reallocating the cpumap ring buffer on each polling interval. .SS "-i, --interval " .PP Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. .SS "-s, --stats" .PP Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. .SS "-e, --extended" .PP Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-$\ while the program is running. See also the \fBOutput Format Description\fP section below. .SS "-m, --mode" .PP Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. .SS "-v, --verbose" .PP Enable verbose logging. Supply twice to enable verbose logging from the underlying \fIlibxdp\fP and \fIlibbpf\fP libraries. .SS "--version" .PP Show the application version and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "The REDIRECT-MAP command" .PP In this mode, \fIxdp\-bench\fP sets up packet redirection between two interfaces supplied on the command line using the \fIbpf_redirect_map()\fP BPF helper triggered on packet reception on the ingress interface, using a devmap as its target. .PP The syntax for the \fIredirect\-map\fP command is: .PP \fIxdp\-bench redirect\-map [options] \fP .PP Where \fI\fP is the name of the input interface from where packets will be redirect to the output interface \fI\fP. .PP The supported options are: .SS "-X, --load-egress" .PP Load a program in the devmap entry used for redirection, so that it is invoked after the packet is redirected to the target device, before it is transmitted out of the output interface. The remote program will update the packet data so its source MAC address matches the one of the destination interface. .SS "-i, --interval " .PP Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. .SS "-s, --stats" .PP Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. .SS "-e, --extended" .PP Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-$\ while the program is running. See also the \fBOutput Format Description\fP section below. .SS "-m, --mode" .PP Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. .SS "-v, --verbose" .PP Enable verbose logging. Supply twice to enable verbose logging from the underlying \fIlibxdp\fP and \fIlibbpf\fP libraries. .SS "--version" .PP Show the application version and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "The REDIRECT-MULTI command" .PP In this mode, \fIxdp\-bench\fP sets up one-to-many packet redirection between interfaces supplied on the command line, using the \fIbpf_redirect_map\fP BPF helper triggered on packet reception on the ingress interface, using a devmap as its target. The packet is broadcast to all output interfaces specified on the command line, using devmap's packet broadcast feature. .PP The syntax for the \fIredirect\-multi\fP command is: .PP \fIxdp\-bench redirect\-multi [options] ... \fP .PP Where \fI\fP is the name of the input interface from where packets will be redirect to one or many output interface(s). .PP The supported options are: .SS "-X, --load-egress" .PP Load a program in the devmap entry used for redirection, so that it is invoked after the packet is redirected to the target device, before it is transmitted out of the output interface. The remote program will update the packet data so its source MAC address matches the one of the destination interface. .SS "-i, --interval " .PP Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. .SS "-s, --stats" .PP Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. .SS "-e, --extended" .PP Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-$\ while the program is running. See also the \fBOutput Format Description\fP section below. .SS "-m, --mode" .PP Selects the XDP program mode (native or skb). Note that native XDP mode is the default, and loading the redirect program in skb manner is neither performant, nor recommended. However, this option is useful if the interface driver lacks native XDP support, or when simply testing the tool. .SS "-v, --verbose" .PP Enable verbose logging. Supply twice to enable verbose logging from the underlying \fIlibxdp\fP and \fIlibbpf\fP libraries. .SS "--version" .PP Show the application version and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "Output Format Description" .PP By default, redirect success statistics are disabled, use \fI\-\-stats\fP to enable. The terse output mode is default, extended output mode can be activated using the \fI\-\-extended\fP command line option. .PP SIGQUIT (Ctrl + \\) can be used to switch the mode dynamically at runtime. .PP Terse mode displays at most the following fields: .RS .nf \fCrx/s Number of packets received per second redir/s Number of packets successfully redirected per second err,drop/s Aggregated count of errors per second (including dropped packets when not using the drop command) xmit/s Number of packets transmitted on the output device per second \fP .fi .RE .PP Extended output mode displays at most the following fields: .RS .nf \fCFIELD DESCRIPTION receive Displays the number of packets received and errors encountered Whenever an error or packet drop occurs, details of per CPU error and drop statistics will be expanded inline in terse mode. pkt/s - Packets received per second drop/s - Packets dropped per second error/s - Errors encountered per second redirect - Displays the number of packets successfully redirected Errors encountered are expanded under redirect_err field Note that passing -s to enable it has a per packet overhead redir/s - Packets redirected successfully per second redirect_err Displays the number of packets that failed redirection The errno is expanded under this field with per CPU count The recognized errors are: EINVAL: Invalid redirection ENETDOWN: Device being redirected to is down EMSGSIZE: Packet length too large for device EOPNOTSUPP: Operation not supported ENOSPC: No space in ptr_ring of cpumap kthread error/s - Packets that failed redirection per second enqueue to cpu N Displays the number of packets enqueued to bulk queue of CPU N Expands to cpu:FROM->N to display enqueue stats for each CPU enqueuing to CPU N Received packets can be associated with the CPU redirect program is enqueuing packets to. pkt/s - Packets enqueued per second from other CPU to CPU N drop/s - Packets dropped when trying to enqueue to CPU N bulk-avg - Average number of packets processed for each event kthread Displays the number of packets processed in CPUMAP kthread for each CPU Packets consumed from ptr_ring in kthread, and its xdp_stats (after calling CPUMAP bpf prog) are expanded below this. xdp_stats are expanded as a total and then per-CPU to associate it to each CPU's pinned CPUMAP kthread. pkt/s - Packets consumed per second from ptr_ring drop/s - Packets dropped per second in kthread sched - Number of times kthread called schedule() xdp_stats (also expands to per-CPU counts) pass/s - XDP_PASS count for CPUMAP program execution drop/s - XDP_DROP count for CPUMAP program execution redir/s - XDP_REDIRECT count for CPUMAP program execution xdp_exception Displays xdp_exception tracepoint events This can occur due to internal driver errors, unrecognized XDP actions and due to explicit user trigger by use of XDP_ABORTED Each action is expanded below this field with its count hit/s - Number of times the tracepoint was hit per second devmap_xmit Displays devmap_xmit tracepoint events This tracepoint is invoked for successful transmissions on output device but these statistics are not available for generic XDP mode, hence they will be omitted from the output when using SKB mode xmit/s - Number of packets that were transmitted per second drop/s - Number of packets that failed transmissions per second drv_err/s - Number of internal driver errors per second bulk-avg - Average number of packets processed for each event \fP .fi .RE .SH "BUGS" .PP Please report any bugs on Github: \fIhttps://github.com/xdp-project/xdp-tools/issues\fP .SH "AUTHOR" .PP Earlier xdp-redirect tools were written by Jesper Dangaard Brouer and John Fastabend. They were then rewritten to support more features by Kumar Kartikeya Dwivedi, who also ported them to xdp-tools together with Toke Høiland-Jørgensen. This man page was written by Kumar Kartikeya Dwivedi and Toke Høiland-Jørgensen. xdp-tools-1.5.4/xdp-bench/xdp_redirect_devmap_multi.bpf.c0000644000175100001660000000343715003640462023030 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 #include #include #include #include #include struct { __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(int)); __uint(max_entries, 32); } forward_map_general SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(struct bpf_devmap_val)); __uint(max_entries, 32); } forward_map_native SEC(".maps"); /* map to store egress interfaces mac addresses */ struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, __u32); __type(value, __be64); __uint(max_entries, 32); } mac_map SEC(".maps"); static int xdp_redirect_devmap_multi(struct xdp_md *ctx, void *forward_map) { __u32 key = bpf_get_smp_processor_id(); struct datarec *rec; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); return bpf_redirect_map(forward_map, 0, BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS); } SEC("xdp") int redir_multi_general(struct xdp_md *ctx) { return xdp_redirect_devmap_multi(ctx, &forward_map_general); } SEC("xdp") int redir_multi_native(struct xdp_md *ctx) { return xdp_redirect_devmap_multi(ctx, &forward_map_native); } SEC("xdp/devmap") int xdp_devmap_prog(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 key = ctx->egress_ifindex; struct ethhdr *eth = data; __be64 *mac; __u64 nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_DROP; mac = bpf_map_lookup_elem(&mac_map, &key); if (mac) __builtin_memcpy(eth->h_source, mac, ETH_ALEN); return XDP_PASS; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-bench/xdp_basic.c0000644000175100001660000001132215003640462016764 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 John Fastabend */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "logging.h" #include "xdp-bench.h" #include "xdp_sample.h" #include "xdp_basic.skel.h" static int mask = SAMPLE_RX_CNT | SAMPLE_EXCEPTION_CNT; DEFINE_SAMPLE_INIT(xdp_basic); const struct basic_opts defaults_drop = { .mode = XDP_MODE_NATIVE, .interval = 2 }; const struct basic_opts defaults_pass = { .mode = XDP_MODE_NATIVE, .interval = 2 }; const struct basic_opts defaults_tx = { .mode = XDP_MODE_NATIVE, .interval = 2, .program_mode = BASIC_SWAP_MACS }; static int do_basic(const struct basic_opts *opt, enum xdp_action action) { DECLARE_LIBBPF_OPTS(xdp_program_opts, opts); struct xdp_program *xdp_prog = NULL; struct bpf_program *prog = NULL; int ret = EXIT_FAIL_OPTION; struct xdp_basic *skel; if (opt->extended) sample_switch_mode(); skel = xdp_basic__open(); if (!skel) { pr_warn("Failed to xdp_basic__open: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end; } ret = sample_init_pre_load(skel, opt->iface_in.ifname); if (ret < 0) { pr_warn("Failed to sample_init_pre_load: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } skel->rodata->action = action; if (action == XDP_DROP) mask |= SAMPLE_DROP_OK; if (opt->rxq_stats) { skel->rodata->rxq_stats = true; mask |= SAMPLE_RXQ_STATS; } /* Make sure we only load the one XDP program we are interested in */ while ((prog = bpf_object__next_program(skel->obj, prog)) != NULL) if (bpf_program__type(prog) == BPF_PROG_TYPE_XDP && bpf_program__expected_attach_type(prog) == BPF_XDP) bpf_program__set_autoload(prog, false); switch (opt->program_mode) { case BASIC_NO_TOUCH: opts.prog_name = "xdp_basic_prog"; break; case BASIC_READ_DATA: opts.prog_name = (opt->load_mode == BASIC_LOAD_BYTES) ? "xdp_read_data_load_bytes_prog" : "xdp_read_data_prog"; break; case BASIC_PARSE_IPHDR: opts.prog_name = (opt->load_mode == BASIC_LOAD_BYTES) ? "xdp_parse_load_bytes_prog" : "xdp_parse_prog"; break; case BASIC_SWAP_MACS: opts.prog_name = (opt->load_mode == BASIC_LOAD_BYTES) ? "xdp_swap_macs_load_bytes_prog" : "xdp_swap_macs_prog"; break; } opts.obj = skel->obj; xdp_prog = xdp_program__create(&opts); if (!xdp_prog) { ret = -errno; pr_warn("Couldn't open XDP program: %s\n", strerror(-ret)); goto end_destroy; } /* We always set the frags support bit: nothing the program does is * incompatible with multibuf, and it's perfectly fine to load a program * with frags support on an interface with a small MTU. We don't risk * setting any flags the kernel will balk at, either, since libxdp will * do the feature probing for us and skip the flag if the kernel doesn't * support it. * * The function below returns EOPNOTSUPP it libbpf is too old to support * setting the flags, but we just ignore that, since in such a case the * best we can do is just attempt to run without the frags support. */ xdp_program__set_xdp_frags_support(xdp_prog, true); ret = xdp_program__attach(xdp_prog, opt->iface_in.ifindex, opt->mode, 0); if (ret < 0) { pr_warn("Failed to attach XDP program: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } ret = sample_init(skel, mask, 0, 0); if (ret < 0) { pr_warn("Failed to initialize sample: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_detach; } ret = EXIT_FAIL; pr_info("%s packets on %s (ifindex %d; driver %s)\n", action == XDP_DROP ? "Dropping" : action == XDP_TX ? "Hairpinning (XDP_TX)" : "Passing", opt->iface_in.ifname, opt->iface_in.ifindex, get_driver_name(opt->iface_in.ifindex)); ret = sample_run(opt->interval, NULL, NULL); if (ret < 0) { pr_warn("Failed during sample run: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_detach; } ret = EXIT_OK; end_detach: xdp_program__detach(xdp_prog, opt->iface_in.ifindex, opt->mode, 0); end_destroy: xdp_basic__destroy(skel); end: sample_teardown(); return ret; } int do_drop(const void *cfg, __unused const char *pin_root_path) { const struct basic_opts *opt = cfg; return do_basic(opt, XDP_DROP); } int do_pass(const void *cfg, __unused const char *pin_root_path) { const struct basic_opts *opt = cfg; return do_basic(opt, XDP_PASS); } int do_tx(const void *cfg, __unused const char *pin_root_path) { const struct basic_opts *opt = cfg; return do_basic(opt, XDP_TX); } xdp-tools-1.5.4/xdp-bench/xdp-bench.h0000644000175100001660000000464515003640462016717 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0-only #ifndef XDP_REDIRECT_H #define XDP_REDIRECT_H #include #include "params.h" #include "util.h" #define MAX_IFACE_NUM 32 int do_drop(const void *cfg, const char *pin_root_path); int do_pass(const void *cfg, const char *pin_root_path); int do_tx(const void *cfg, const char *pin_root_path); int do_redirect_basic(const void *cfg, const char *pin_root_path); int do_redirect_cpumap(const void *cfg, const char *pin_root_path); int do_redirect_devmap(const void *cfg, const char *pin_root_path); int do_redirect_devmap_multi(const void *cfg, const char *pin_root_path); enum basic_program_mode { BASIC_NO_TOUCH, BASIC_READ_DATA, BASIC_PARSE_IPHDR, BASIC_SWAP_MACS, }; enum basic_load_mode { BASIC_LOAD_DPA, BASIC_LOAD_BYTES, }; struct basic_opts { bool extended; bool rxq_stats; __u32 interval; enum xdp_attach_mode mode; enum basic_program_mode program_mode; enum basic_load_mode load_mode; struct iface iface_in; }; struct redirect_opts { bool stats; bool extended; __u32 interval; enum xdp_attach_mode mode; enum basic_load_mode load_mode; struct iface iface_in; struct iface iface_out; }; struct devmap_opts { bool stats; bool extended; bool load_egress; __u32 interval; enum xdp_attach_mode mode; struct iface iface_in; struct iface iface_out; }; struct devmap_multi_opts { bool stats; bool extended; bool load_egress; __u32 interval; enum xdp_attach_mode mode; struct iface *ifaces; }; enum cpumap_remote_action { ACTION_DISABLED, ACTION_DROP, ACTION_PASS, ACTION_REDIRECT, }; enum cpumap_program_mode { CPUMAP_NO_TOUCH, CPUMAP_TOUCH_DATA, CPUMAP_CPU_ROUND_ROBIN, CPUMAP_CPU_L4_PROTO, CPUMAP_CPU_L4_PROTO_FILTER, CPUMAP_CPU_L4_HASH, CPUMAP_CPU_L4_SPORT, CPUMAP_CPU_L4_DPORT, }; struct cpumap_opts { bool stats; bool extended; bool stress_mode; __u32 interval; __u32 qsize; struct u32_multi cpus; enum xdp_attach_mode mode; enum cpumap_remote_action remote_action; enum cpumap_program_mode program_mode; struct iface iface_in; struct iface redir_iface; }; extern const struct basic_opts defaults_drop; extern const struct basic_opts defaults_pass; extern const struct basic_opts defaults_tx; extern const struct redirect_opts defaults_redirect_basic; extern const struct cpumap_opts defaults_redirect_cpumap; extern const struct devmap_opts defaults_redirect_devmap; extern const struct devmap_multi_opts defaults_redirect_devmap_multi; #endif xdp-tools-1.5.4/xdp-bench/xdp_redirect_cpumap.c0000644000175100001660000002330215003640462021052 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "logging.h" #include "xdp-bench.h" #include "xdp_sample.h" #include "xdp_redirect_cpumap.skel.h" static int map_fd; static int avail_fd; static int count_fd; static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT | SAMPLE_CPUMAP_ENQUEUE_CNT | SAMPLE_CPUMAP_KTHREAD_CNT | SAMPLE_EXCEPTION_CNT; const struct cpumap_opts defaults_redirect_cpumap = { .mode = XDP_MODE_NATIVE, .interval = 2, .qsize = 2048, .program_mode = CPUMAP_CPU_L4_HASH, }; static const char *cpumap_prog_names[] = { "cpumap_no_touch", "cpumap_touch_data", "cpumap_round_robin", "cpumap_l4_proto", "cpumap_l4_filter", "cpumap_l4_hash", "cpumap_l4_sport", "cpumap_l4_dport", }; DEFINE_SAMPLE_INIT(xdp_redirect_cpumap); static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value, __u32 avail_idx, bool new) { __u32 curr_cpus_count = 0; __u32 key = 0; int ret; /* Add a CPU entry to cpumap, as this allocate a cpu entry in * the kernel for the cpu. */ ret = bpf_map_update_elem(map_fd, &cpu, value, 0); if (ret < 0) { pr_warn("Create CPU entry failed: %s\n", strerror(errno)); return ret; } /* Inform bpf_prog's that a new CPU is available to select * from via some control maps. */ ret = bpf_map_update_elem(avail_fd, &avail_idx, &cpu, 0); if (ret < 0) { pr_warn("Add to avail CPUs failed: %s\n", strerror(errno)); return ret; } /* When not replacing/updating existing entry, bump the count */ ret = bpf_map_lookup_elem(count_fd, &key, &curr_cpus_count); if (ret < 0) { pr_warn("Failed reading curr cpus_count: %s\n", strerror(errno)); return ret; } if (new) { curr_cpus_count++; ret = bpf_map_update_elem(count_fd, &key, &curr_cpus_count, 0); if (ret < 0) { pr_warn("Failed write curr cpus_count: %s\n", strerror(errno)); return ret; } } pr_debug("%s CPU: %u as idx: %u qsize: %d cpumap_prog_fd: %d (cpus_count: %u)\n", new ? "Add new" : "Replace", cpu, avail_idx, value->qsize, value->bpf_prog.fd, curr_cpus_count); return 0; } /* CPUs are zero-indexed. Thus, add a special sentinel default value * in map cpus_available to mark CPU index'es not configured */ static int mark_cpus_unavailable(void) { int ret, i, n_cpus = libbpf_num_possible_cpus(); __u32 invalid_cpu = n_cpus; for (i = 0; i < n_cpus; i++) { ret = bpf_map_update_elem(avail_fd, &i, &invalid_cpu, 0); if (ret < 0) { pr_warn("Failed marking CPU unavailable: %s\n", strerror(errno)); return ret; } } return 0; } /* Stress cpumap management code by concurrently changing underlying cpumap */ static void stress_cpumap(void *ctx) { struct bpf_cpumap_val *value = ctx; /* Changing qsize will cause kernel to free and alloc a new * bpf_cpu_map_entry, with an associated/complicated tear-down * procedure. */ value->qsize = 1024; create_cpu_entry(1, value, 0, false); value->qsize = 8; create_cpu_entry(1, value, 0, false); value->qsize = 16000; create_cpu_entry(1, value, 0, false); } static int set_cpumap_prog(struct xdp_redirect_cpumap *skel, enum cpumap_remote_action action, const struct iface *redir_iface) { struct bpf_devmap_val val = {}; __u32 key = 0; int err; switch (action) { case ACTION_DISABLED: return 0; case ACTION_DROP: return bpf_program__fd(skel->progs.cpumap_drop); case ACTION_PASS: return bpf_program__fd(skel->progs.cpumap_pass); case ACTION_REDIRECT: break; default: return -EINVAL; } if (!redir_iface->ifindex) { pr_warn("Must specify redirect device when using --remote-action 'redirect'\n"); return -EINVAL; } if (get_mac_addr(redir_iface->ifindex, skel->bss->tx_mac_addr) < 0) { pr_warn("Couldn't get MAC address for interface %s\n", redir_iface->ifname); return -EINVAL; } val.ifindex = redir_iface->ifindex; val.bpf_prog.fd = bpf_program__fd(skel->progs.redirect_egress_prog); err = bpf_map_update_elem(bpf_map__fd(skel->maps.tx_port), &key, &val, 0); if (err < 0) return -errno; return bpf_program__fd(skel->progs.cpumap_redirect); } int do_redirect_cpumap(const void *cfg, __unused const char *pin_root_path) { const struct cpumap_opts *opt = cfg; DECLARE_LIBBPF_OPTS(xdp_program_opts, opts); struct xdp_program *xdp_prog = NULL; struct xdp_redirect_cpumap *skel; struct bpf_program *prog = NULL; struct bpf_map_info info = {}; struct bpf_cpumap_val value; __u32 infosz = sizeof(info); int ret = EXIT_FAIL_OPTION; int n_cpus, fd; size_t i; if (opt->extended) sample_switch_mode(); if (opt->stats) mask |= SAMPLE_REDIRECT_MAP_CNT; if (opt->redir_iface.ifindex) mask |= SAMPLE_DEVMAP_XMIT_CNT_MULTI; n_cpus = libbpf_num_possible_cpus(); /* Notice: Choosing the queue size is very important when CPU is * configured with power-saving states. * * If deepest state take 133 usec to wakeup from (133/10^6). When link * speed is 10Gbit/s ((10*10^9/8) in bytes/sec). How many bytes can * arrive with in 133 usec at this speed: (10*10^9/8)*(133/10^6) = * 166250 bytes. With MTU size packets this is 110 packets, and with * minimum Ethernet (MAC-preamble + intergap) 84 bytes is 1979 packets. * * Setting default cpumap queue to 2048 as worst-case (small packet) * should be +64 packet due kthread wakeup call (due to xdp_do_flush) * worst-case is 2043 packets. * * Sysadm can configured system to avoid deep-sleep via: * tuned-adm profile network-latency */ skel = xdp_redirect_cpumap__open(); if (!skel) { pr_warn("Failed to xdp_redirect_cpumap__open: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end; } /* Make sure we only load the one XDP program we are interested in */ while ((prog = bpf_object__next_program(skel->obj, prog)) != NULL) if (bpf_program__type(prog) == BPF_PROG_TYPE_XDP && bpf_program__expected_attach_type(prog) == BPF_XDP) bpf_program__set_autoload(prog, false); prog = bpf_object__find_program_by_name(skel->obj, cpumap_prog_names[opt->program_mode]); if (!prog) { pr_warn("Failed to find program '%s'\n", cpumap_prog_names[opt->program_mode]); goto end_destroy; } ret = sample_init_pre_load(skel, opt->iface_in.ifname); if (ret < 0) { pr_warn("Failed to sample_init_pre_load: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } if (bpf_map__set_max_entries(skel->maps.cpu_map, n_cpus) < 0) { pr_warn("Failed to set max entries for cpu_map map: %s", strerror(errno)); ret = EXIT_FAIL_BPF; goto end_destroy; } if (bpf_map__set_max_entries(skel->maps.cpus_available, n_cpus) < 0) { pr_warn("Failed to set max entries for cpus_available map: %s", strerror(errno)); ret = EXIT_FAIL_BPF; goto end_destroy; } ret = EXIT_FAIL_OPTION; skel->rodata->from_match[0] = opt->iface_in.ifindex; if (opt->redir_iface.ifindex) skel->rodata->to_match[0] = opt->redir_iface.ifindex; opts.obj = skel->obj; opts.prog_name = bpf_program__name(prog); xdp_prog = xdp_program__create(&opts); if (!xdp_prog) { ret = -errno; pr_warn("Couldn't open XDP program: %s\n", strerror(-ret)); goto end_destroy; } /* We always set the frags support bit: nothing the program does is * incompatible with multibuf, and it's perfectly fine to load a program * with frags support on an interface with a small MTU. We don't risk * setting any flags the kernel will balk at, either, since libxdp will * do the feature probing for us and skip the flag if the kernel doesn't * support it. * * The function below returns EOPNOTSUPP it libbpf is too old to support * setting the flags, but we just ignore that, since in such a case the * best we can do is just attempt to run without the frags support. */ xdp_program__set_xdp_frags_support(xdp_prog, true); ret = xdp_program__attach(xdp_prog, opt->iface_in.ifindex, opt->mode, 0); if (ret < 0) { pr_warn("Failed to attach XDP program: %s\n", strerror(-ret)); goto end_destroy; } ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.cpu_map), &info, &infosz); if (ret < 0) { pr_warn("Failed bpf_obj_get_info_by_fd for cpumap: %s\n", strerror(errno)); goto end_detach; } skel->bss->cpumap_map_id = info.id; map_fd = bpf_map__fd(skel->maps.cpu_map); avail_fd = bpf_map__fd(skel->maps.cpus_available); count_fd = bpf_map__fd(skel->maps.cpus_count); ret = mark_cpus_unavailable(); if (ret < 0) { pr_warn("Unable to mark CPUs as unavailable\n"); goto end_detach; } ret = sample_init(skel, mask, opt->iface_in.ifindex, 0); if (ret < 0) { pr_warn("Failed to initialize sample: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_detach; } fd = set_cpumap_prog(skel, opt->remote_action, &opt->redir_iface); if (fd < 0) { ret = EXIT_FAIL_BPF; goto end_detach; } value.qsize = opt->qsize; value.bpf_prog.fd = fd; for (i = 0; i < opt->cpus.num_vals; i++) { if (create_cpu_entry(opt->cpus.vals[i], &value, i, true) < 0) { pr_warn("Cannot proceed, exiting\n"); ret = EXIT_FAIL; goto end_detach; } } ret = sample_run(opt->interval, opt->stress_mode ? stress_cpumap : NULL, &value); if (ret < 0) { pr_warn("Failed during sample run: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_detach; } ret = EXIT_OK; end_detach: xdp_program__detach(xdp_prog, opt->iface_in.ifindex, opt->mode, 0); end_destroy: xdp_program__close(xdp_prog); xdp_redirect_cpumap__destroy(skel); end: sample_teardown(); return ret; } xdp-tools-1.5.4/xdp-bench/xdp-bench.c0000644000175100001660000002616415003640462016712 0ustar runnerdocker#define _GNU_SOURCE #include #include #include #include "xdp-bench.h" #include "params.h" #define PROG_NAME "xdp-bench" int do_help(__unused const void *cfg, __unused const char *pin_root_path) { fprintf(stderr, "Usage: xdp-bench COMMAND [options]\n" "\n" "COMMAND can be one of:\n" " drop - Drop all packets on an interface\n" " pass - Pass all packets to the network stack\n" " tx - Transmit packets back out on an interface (hairpin forwarding)\n" " redirect - XDP redirect using the bpf_redirect() helper\n" " redirect-cpu - XDP CPU redirect using BPF_MAP_TYPE_CPUMAP\n" " redirect-map - XDP redirect using BPF_MAP_TYPE_DEVMAP\n" " redirect-multi - XDP multi-redirect using BPF_MAP_TYPE_DEVMAP and the BPF_F_BROADCAST flag\n" " help - show this help message\n" "\n" "Use 'xdp-bench COMMAND --help' to see options for each command\n"); return -1; } struct enum_val xdp_modes[] = { {"native", XDP_MODE_NATIVE}, {"skb", XDP_MODE_SKB}, {NULL, 0} }; struct enum_val basic_program_modes[] = { {"no-touch", BASIC_NO_TOUCH}, {"read-data", BASIC_READ_DATA}, {"parse-ip", BASIC_PARSE_IPHDR}, {"swap-macs", BASIC_SWAP_MACS}, {NULL, 0} }; struct enum_val basic_load_modes[] = { {"dpa", BASIC_LOAD_DPA}, {"load-bytes", BASIC_LOAD_BYTES}, {NULL, 0} }; struct enum_val cpumap_remote_actions[] = { {"disabled", ACTION_DISABLED}, {"drop", ACTION_DROP}, {"pass", ACTION_PASS}, {"redirect", ACTION_REDIRECT}, {NULL, 0} }; struct enum_val cpumap_program_modes[] = { {"no-touch", CPUMAP_NO_TOUCH}, {"touch", CPUMAP_TOUCH_DATA}, {"round-robin", CPUMAP_CPU_ROUND_ROBIN}, {"l4-proto", CPUMAP_CPU_L4_PROTO}, {"l4-filter", CPUMAP_CPU_L4_PROTO_FILTER}, {"l4-hash", CPUMAP_CPU_L4_HASH}, {"l4-sport", CPUMAP_CPU_L4_SPORT}, {"l4-dport", CPUMAP_CPU_L4_DPORT}, {NULL, 0} }; struct prog_option basic_options[] = { DEFINE_OPTION("packet-operation", OPT_ENUM, struct basic_opts, program_mode, .short_opt = 'p', .metavar = "", .typearg = basic_program_modes, .help = "Action to take before dropping packet."), DEFINE_OPTION("program-mode", OPT_ENUM, struct basic_opts, program_mode, .typearg = basic_program_modes, .hidden = true), DEFINE_OPTION("load-mode", OPT_ENUM, struct basic_opts, load_mode, .short_opt = 'l', .metavar = "", .typearg = basic_load_modes, .help = "How to load (and store) data; default dpa"), DEFINE_OPTION("rxq-stats", OPT_BOOL, struct basic_opts, rxq_stats, .short_opt = 'r', .help = "Collect per-RXQ drop statistics"), DEFINE_OPTION("interval", OPT_U32, struct basic_opts, interval, .short_opt = 'i', .metavar = "", .help = "Polling interval (default 2)"), DEFINE_OPTION("extended", OPT_BOOL, struct basic_opts, extended, .short_opt = 'e', .help = "Start running in extended output mode (C^\\ to toggle)"), DEFINE_OPTION("xdp-mode", OPT_ENUM, struct basic_opts, mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load XDP program in ; default native"), DEFINE_OPTION("dev", OPT_IFNAME, struct basic_opts, iface_in, .positional = true, .metavar = "", .required = true, .help = "Load on device "), END_OPTIONS }; struct prog_option redirect_basic_options[] = { DEFINE_OPTION("load-mode", OPT_ENUM, struct redirect_opts, load_mode, .short_opt = 'l', .metavar = "", .typearg = basic_load_modes, .help = "How to load (and store) data; default dpa"), DEFINE_OPTION("interval", OPT_U32, struct redirect_opts, interval, .short_opt = 'i', .metavar = "", .help = "Polling interval (default 2)"), DEFINE_OPTION("stats", OPT_BOOL, struct redirect_opts, stats, .short_opt = 's', .help = "Enable statistics for transmitted packets (not just errors)"), DEFINE_OPTION("extended", OPT_BOOL, struct redirect_opts, extended, .short_opt = 'e', .help = "Start running in extended output mode (C^\\ to toggle)"), DEFINE_OPTION("mode", OPT_ENUM, struct redirect_opts, mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load XDP program in ; default native"), DEFINE_OPTION("dev_in", OPT_IFNAME, struct redirect_opts, iface_in, .positional = true, .metavar = "", .required = true, .help = "Redirect from device "), DEFINE_OPTION("dev_out", OPT_IFNAME, struct redirect_opts, iface_out, .positional = true, .metavar = "", .required = true, .help = "Redirect to device "), END_OPTIONS }; struct prog_option redirect_cpumap_options[] = { DEFINE_OPTION("cpu", OPT_U32_MULTI, struct cpumap_opts, cpus, .short_opt = 'c', .metavar = "", .required = true, .help = "Insert CPU into CPUMAP (can be specified multiple times)"), DEFINE_OPTION("dev", OPT_IFNAME, struct cpumap_opts, iface_in, .positional = true, .metavar = "", .required = true, .help = "Run on "), DEFINE_OPTION("program-mode", OPT_ENUM, struct cpumap_opts, program_mode, .short_opt = 'p', .metavar = "", .typearg = cpumap_program_modes, .help = "Redirect to CPUs using . Default l4-hash."), DEFINE_OPTION("remote-action", OPT_ENUM, struct cpumap_opts, remote_action, .short_opt = 'r', .metavar = "", .typearg = cpumap_remote_actions, .help = "Perform on the remote CPU. Default disabled."), DEFINE_OPTION("redirect-device", OPT_IFNAME, struct cpumap_opts, redir_iface, .short_opt = 'D', .metavar = "", .help = "Redirect packets to on remote CPU (when --remote-action is 'redirect')"), DEFINE_OPTION("qsize", OPT_U32, struct cpumap_opts, qsize, .short_opt = 'q', .metavar = "", .help = "CPUMAP queue size (default 2048)"), DEFINE_OPTION("stress-mode", OPT_BOOL, struct cpumap_opts, stress_mode, .short_opt = 'x', .help = "Stress the kernel CPUMAP setup and teardown code while running"), DEFINE_OPTION("interval", OPT_U32, struct cpumap_opts, interval, .short_opt = 'i', .metavar = "", .help = "Polling interval (default 2)"), DEFINE_OPTION("stats", OPT_BOOL, struct cpumap_opts, stats, .short_opt = 's', .help = "Enable statistics for transmitted packets (not just errors)"), DEFINE_OPTION("extended", OPT_BOOL, struct basic_opts, extended, .short_opt = 'e', .help = "Start running in extended output mode (C^\\ to toggle)"), DEFINE_OPTION("xdp-mode", OPT_ENUM, struct cpumap_opts, mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load XDP program in ; default native"), END_OPTIONS }; struct prog_option redirect_devmap_options[] = { DEFINE_OPTION("load-egress", OPT_BOOL, struct devmap_opts, load_egress, .short_opt = 'X', .help = "Load an egress program into the devmap"), DEFINE_OPTION("interval", OPT_U32, struct devmap_opts, interval, .short_opt = 'i', .metavar = "", .help = "Polling interval (default 2)"), DEFINE_OPTION("stats", OPT_BOOL, struct devmap_opts, stats, .short_opt = 's', .help = "Enable statistics for transmitted packets (not just errors)"), DEFINE_OPTION("extended", OPT_BOOL, struct devmap_opts, extended, .short_opt = 'e', .help = "Start running in extended output mode (C^\\ to toggle)"), DEFINE_OPTION("mode", OPT_ENUM, struct devmap_opts, mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load XDP program in ; default native"), DEFINE_OPTION("dev_in", OPT_IFNAME, struct devmap_opts, iface_in, .positional = true, .metavar = "", .required = true, .help = "Redirect from device "), DEFINE_OPTION("dev_out", OPT_IFNAME, struct devmap_opts, iface_out, .positional = true, .metavar = "", .required = true, .help = "Redirect to device "), END_OPTIONS }; struct prog_option redirect_devmap_multi_options[] = { DEFINE_OPTION("load-egress", OPT_BOOL, struct devmap_multi_opts, load_egress, .short_opt = 'X', .help = "Load an egress program into the devmap"), DEFINE_OPTION("interval", OPT_U32, struct devmap_multi_opts, interval, .short_opt = 'i', .metavar = "", .help = "Polling interval (default 2)"), DEFINE_OPTION("stats", OPT_BOOL, struct devmap_multi_opts, stats, .short_opt = 's', .help = "Enable statistics for transmitted packets (not just errors)"), DEFINE_OPTION("extended", OPT_BOOL, struct devmap_multi_opts, extended, .short_opt = 'e', .help = "Start running in extended output mode (C^\\ to toggle)"), DEFINE_OPTION("mode", OPT_ENUM, struct devmap_multi_opts, mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load XDP program in ; default native"), DEFINE_OPTION("devs", OPT_IFNAME_MULTI, struct devmap_multi_opts, ifaces, .positional = true, .metavar = "", .min_num = 2, .max_num = MAX_IFACE_NUM, .required = true, .help = "Redirect from and to devices "), END_OPTIONS }; static const struct prog_command cmds[] = { { .name = "drop", .func = do_drop, .options = basic_options, .default_cfg = &defaults_drop, .doc = "Drop all packets on an interface" }, { .name = "pass", .func = do_pass, .options = basic_options, .default_cfg = &defaults_pass, .doc = "Pass all packets to the network stack" }, { .name = "tx", .func = do_tx, .options = basic_options, .default_cfg = &defaults_tx, .doc = "Transmit packets back out an interface (hairpin forwarding)" }, DEFINE_COMMAND_NAME("redirect", redirect_basic, "XDP redirect using the bpf_redirect() helper"), DEFINE_COMMAND_NAME("redirect-cpu", redirect_cpumap, "XDP CPU redirect using BPF_MAP_TYPE_CPUMAP"), DEFINE_COMMAND_NAME("redirect-map", redirect_devmap, "XDP redirect using BPF_MAP_TYPE_DEVMAP"), DEFINE_COMMAND_NAME( "redirect-multi", redirect_devmap_multi, "XDP multi-redirect using BPF_MAP_TYPE_DEVMAP and the BPF_F_BROADCAST flag"), { .name = "help", .func = do_help, .no_cfg = true }, END_COMMANDS }; union all_opts { struct basic_opts basic; struct cpumap_opts cpumap; struct devmap_opts devmap; struct devmap_multi_opts devmap_multi; }; int main(int argc, char **argv) { if (argc > 1) return dispatch_commands(argv[1], argc - 1, argv + 1, cmds, sizeof(union all_opts), PROG_NAME, false); return do_help(NULL, NULL); } xdp-tools-1.5.4/xdp-bench/.gitignore0000644000175100001660000000001215003640462016646 0ustar runnerdockerxdp-bench xdp-tools-1.5.4/xdp-bench/hash_func01.h0000644000175100001660000000245015003640462017136 0ustar runnerdocker/* SPDX-License-Identifier: LGPL-2.1 * * Based on Paul Hsieh's (LGPG 2.1) hash function * From: http://www.azillionmonkeys.com/qed/hash.html */ #define get16bits(d) (*((const __u16 *) (d))) static __always_inline __u32 SuperFastHash(const char *data, int len, __u32 initval) { __u32 hash = initval; __u32 tmp; int rem; if (len <= 0 || data == NULL) return 0; rem = len & 3; len >>= 2; /* Main loop */ #pragma clang loop unroll(full) for (;len > 0; len--) { hash += get16bits (data); tmp = (get16bits (data+2) << 11) ^ hash; hash = (hash << 16) ^ tmp; data += 2*sizeof (__u16); hash += hash >> 11; } /* Handle end cases */ switch (rem) { case 3: hash += get16bits (data); hash ^= hash << 16; hash ^= ((signed char)data[sizeof (__u16)]) << 18; hash += hash >> 11; break; case 2: hash += get16bits (data); hash ^= hash << 11; hash += hash >> 17; break; case 1: hash += (signed char)*data; hash ^= hash << 10; hash += hash >> 1; } /* Force "avalanching" of final 127 bits */ hash ^= hash << 3; hash += hash >> 5; hash ^= hash << 4; hash += hash >> 17; hash ^= hash << 25; hash += hash >> 6; return hash; } xdp-tools-1.5.4/xdp-bench/xdp_redirect_devmap.bpf.c0000644000175100001660000000453715003640462021620 0ustar runnerdocker/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include #include #include #include #include /* The 2nd xdp prog on egress does not support skb mode, so we define two * maps, tx_port_general and tx_port_native. */ struct { __uint(type, BPF_MAP_TYPE_DEVMAP); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(int)); __uint(max_entries, 1); } tx_port_general SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_DEVMAP); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(struct bpf_devmap_val)); __uint(max_entries, 1); } tx_port_native SEC(".maps"); /* store egress interface mac address */ const volatile char tx_mac_addr[ETH_ALEN]; static __always_inline int xdp_redirect_devmap(struct xdp_md *ctx, void *redirect_map) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 key = bpf_get_smp_processor_id(); struct ethhdr *eth = data; struct datarec *rec; __u64 nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_DROP; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_PASS; NO_TEAR_INC(rec->processed); swap_src_dst_mac(data); return bpf_redirect_map(redirect_map, 0, 0); } SEC("xdp") int redir_devmap_general(struct xdp_md *ctx) { return xdp_redirect_devmap(ctx, &tx_port_general); } SEC("xdp") int redir_devmap_native(struct xdp_md *ctx) { return xdp_redirect_devmap(ctx, &tx_port_native); } SEC("xdp/devmap") int xdp_redirect_devmap_egress(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; __u64 nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_DROP; __builtin_memcpy(eth->h_source, (const char *)tx_mac_addr, ETH_ALEN); return XDP_PASS; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-bench/xdp_redirect_devmap.c0000644000175100001660000001471315003640462021047 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "logging.h" #include "xdp-bench.h" #include "xdp_sample.h" #include "xdp_redirect_devmap.skel.h" static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT | SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI; DEFINE_SAMPLE_INIT(xdp_redirect_devmap); const struct devmap_opts defaults_redirect_devmap = { .mode = XDP_MODE_NATIVE, .interval = 2 }; int do_redirect_devmap(const void *cfg, __unused const char *pin_root_path) { const struct devmap_opts *opt = cfg; struct xdp_program *xdp_prog = NULL, *dummy_prog = NULL; const char *prog_name = "redir_devmap_native"; DECLARE_LIBBPF_OPTS(xdp_program_opts, opts); struct bpf_devmap_val devmap_val = {}; struct bpf_map *tx_port_map = NULL; struct xdp_redirect_devmap *skel; struct bpf_program *prog = NULL; char str[2 * IF_NAMESIZE + 1]; int ret = EXIT_FAIL_OPTION; bool tried = false; int key = 0; if (opt->extended) sample_switch_mode(); if (opt->mode == XDP_MODE_SKB) /* devmap_xmit tracepoint not available */ mask &= ~(SAMPLE_DEVMAP_XMIT_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI); if (opt->stats) mask |= SAMPLE_REDIRECT_CNT; restart: skel = xdp_redirect_devmap__open(); if (!skel) { pr_warn("Failed to xdp_redirect_devmap__open: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end; } /* Make sure we only load the one XDP program we are interested in */ while ((prog = bpf_object__next_program(skel->obj, prog)) != NULL) if (bpf_program__type(prog) == BPF_PROG_TYPE_XDP && bpf_program__expected_attach_type(prog) == BPF_XDP) bpf_program__set_autoload(prog, false); if (tried) { tx_port_map = skel->maps.tx_port_general; bpf_program__set_autoload(skel->progs.xdp_redirect_devmap_egress, false); #ifdef HAVE_LIBBPF_BPF_MAP__SET_AUTOCREATE bpf_map__set_autocreate(skel->maps.tx_port_native, false); #else pr_warn("Libbpf is missing bpf_map__set_autocreate(), fallback won't work\n"); ret = EXIT_FAIL_BPF; goto end_destroy; #endif } else { #ifdef HAVE_LIBBPF_BPF_MAP__SET_AUTOCREATE bpf_map__set_autocreate(skel->maps.tx_port_general, false); #endif tx_port_map = skel->maps.tx_port_native; } ret = sample_init_pre_load(skel, opt->iface_in.ifname); if (ret < 0) { pr_warn("Failed to sample_init_pre_load: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } /* Load 2nd xdp prog on egress. */ if (opt->load_egress) { ret = get_mac_addr(opt->iface_out.ifindex, skel->rodata->tx_mac_addr); if (ret < 0) { pr_warn("Failed to get interface %s mac address: %s\n", opt->iface_out.ifname, strerror(-ret)); ret = EXIT_FAIL; goto end_destroy; } } skel->rodata->from_match[0] = opt->iface_in.ifindex; skel->rodata->to_match[0] = opt->iface_out.ifindex; opts.obj = skel->obj; opts.prog_name = prog_name; xdp_prog = xdp_program__create(&opts); if (!xdp_prog) { ret = -errno; pr_warn("Couldn't open XDP program: %s\n", strerror(-ret)); goto end_destroy; } /* We always set the frags support bit: nothing the program does is * incompatible with multibuf, and it's perfectly fine to load a program * with frags support on an interface with a small MTU. We don't risk * setting any flags the kernel will balk at, either, since libxdp will * do the feature probing for us and skip the flag if the kernel doesn't * support it. * * The function below returns EOPNOTSUPP it libbpf is too old to support * setting the flags, but we just ignore that, since in such a case the * best we can do is just attempt to run without the frags support. */ xdp_program__set_xdp_frags_support(xdp_prog, true); ret = xdp_program__attach(xdp_prog, opt->iface_in.ifindex, opt->mode, 0); if (ret < 0) { /* First try with struct bpf_devmap_val as value for generic * mode, then fallback to sizeof(int) for older kernels. */ if (!opt->load_egress && !tried) { pr_warn("Attempting fallback to int-sized devmap\n"); prog_name = "redir_devmap_general"; tried = true; xdp_program__close(xdp_prog); xdp_redirect_devmap__destroy(skel); sample_teardown(); xdp_prog = NULL; goto restart; } pr_warn("Failed to attach XDP program: %s\n", strerror(-ret)); ret = EXIT_FAIL_XDP; goto end_destroy; } ret = sample_init(skel, mask, opt->iface_in.ifindex, opt->iface_out.ifindex); if (ret < 0) { pr_warn("Failed to initialize sample: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_detach; } opts.obj = NULL; opts.prog_name = "xdp_pass"; opts.find_filename = "xdp-dispatcher.o"; dummy_prog = xdp_program__create(&opts); if (!dummy_prog) { pr_warn("Failed to load dummy program: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end_detach; } xdp_program__set_xdp_frags_support(dummy_prog, true); ret = xdp_program__attach(dummy_prog, opt->iface_out.ifindex, opt->mode, 0); if (ret < 0) { pr_warn("Failed to attach dummy program: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_detach; } devmap_val.ifindex = opt->iface_out.ifindex; if (opt->load_egress) devmap_val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_redirect_devmap_egress); ret = bpf_map_update_elem(bpf_map__fd(tx_port_map), &key, &devmap_val, 0); if (ret < 0) { pr_warn("Failed to update devmap value: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end_detach; } ret = EXIT_FAIL; safe_strncpy(str, get_driver_name(opt->iface_in.ifindex), sizeof(str)); pr_info("Redirecting from %s (ifindex %d; driver %s) to %s (ifindex %d; driver %s)\n", opt->iface_in.ifname, opt->iface_in.ifindex, str, opt->iface_out.ifname, opt->iface_out.ifindex, get_driver_name(opt->iface_out.ifindex)); ret = sample_run(opt->interval, NULL, NULL); if (ret < 0) { pr_warn("Failed during sample run: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_destroy; } ret = EXIT_OK; end_detach: if (dummy_prog) xdp_program__detach(dummy_prog, opt->iface_out.ifindex, opt->mode, 0); xdp_program__detach(xdp_prog, opt->iface_in.ifindex, opt->mode, 0); end_destroy: xdp_program__close(xdp_prog); xdp_program__close(dummy_prog); xdp_redirect_devmap__destroy(skel); end: sample_teardown(); return ret; } xdp-tools-1.5.4/xdp-bench/Makefile0000644000175100001660000000114115003640462016322 0ustar runnerdocker# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) XDP_TARGETS := xdp_redirect_basic.bpf xdp_redirect_cpumap.bpf xdp_redirect_devmap.bpf \ xdp_redirect_devmap_multi.bpf xdp_basic.bpf BPF_SKEL_TARGETS := $(XDP_TARGETS) # Don't install skeleton object files XDP_OBJ_INSTALL := TOOL_NAME := xdp-bench MAN_PAGE := xdp-bench.8 TEST_FILE := tests/test-xdp-bench.sh USER_TARGETS := xdp-bench USER_EXTRA_C := xdp_redirect_basic.c xdp_redirect_cpumap.c xdp_redirect_devmap.c \ xdp_redirect_devmap_multi.c xdp_basic.c EXTRA_USER_DEPS := xdp-bench.h LIB_DIR = ../lib include $(LIB_DIR)/common.mk xdp-tools-1.5.4/xdp-bench/xdp_redirect_devmap_multi.c0000644000175100001660000001362515003640462022262 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "logging.h" #include "xdp_sample.h" #include "xdp-bench.h" #include "xdp_redirect_devmap_multi.skel.h" static int ifaces[MAX_IFACE_NUM] = {}; static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT | SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI | SAMPLE_SKIP_HEADING; DEFINE_SAMPLE_INIT(xdp_redirect_devmap_multi); static int update_mac_map(struct bpf_map *map) { int mac_map_fd = bpf_map__fd(map); unsigned char mac_addr[6]; unsigned int ifindex; int i, ret = -1; for (i = 0; ifaces[i] > 0; i++) { ifindex = ifaces[i]; ret = get_mac_addr(ifindex, mac_addr); if (ret < 0) { pr_warn("get interface %d mac failed\n", ifindex); return ret; } ret = bpf_map_update_elem(mac_map_fd, &ifindex, mac_addr, 0); if (ret < 0) { pr_warn("Failed to update mac address for ifindex %d\n", ifindex); return ret; } } return 0; } const struct devmap_multi_opts defaults_redirect_devmap_multi = { .mode = XDP_MODE_NATIVE, .interval = 2 }; int do_redirect_devmap_multi(const void *cfg, __unused const char *pin_root_path) { const struct devmap_multi_opts *opt = cfg; const char *prog_name = "redir_multi_native"; DECLARE_LIBBPF_OPTS(xdp_program_opts, opts); struct xdp_redirect_devmap_multi *skel; struct bpf_devmap_val devmap_val = {}; struct xdp_program *xdp_prog = NULL; struct bpf_map *forward_map = NULL; bool first = true, tried = false; struct bpf_program *prog = NULL; int ret = EXIT_FAIL_OPTION; struct iface *iface; int i; if (opt->extended) sample_switch_mode(); if (opt->mode == XDP_MODE_SKB) /* devmap_xmit tracepoint not available */ mask &= ~(SAMPLE_DEVMAP_XMIT_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI); if (opt->stats) mask |= SAMPLE_REDIRECT_CNT; restart: skel = xdp_redirect_devmap_multi__open(); if (!skel) { pr_warn("Failed to xdp_redirect_devmap_multi__open: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end; } /* Make sure we only load the one XDP program we are interested in */ while ((prog = bpf_object__next_program(skel->obj, prog)) != NULL) if (bpf_program__type(prog) == BPF_PROG_TYPE_XDP && bpf_program__expected_attach_type(prog) == BPF_XDP) bpf_program__set_autoload(prog, false); if (tried) { forward_map = skel->maps.forward_map_general; bpf_program__set_autoload(skel->progs.xdp_devmap_prog, false); #ifdef HAVE_LIBBPF_BPF_MAP__SET_AUTOCREATE bpf_map__set_autocreate(skel->maps.forward_map_native, false); #else pr_warn("Libbpf is missing bpf_map__set_autocreate(), fallback won't work\n"); ret = EXIT_FAIL_BPF; goto end_destroy; #endif } else { #ifdef HAVE_LIBBPF_BPF_MAP__SET_AUTOCREATE bpf_map__set_autocreate(skel->maps.forward_map_general, false); #endif forward_map = skel->maps.forward_map_native; } ret = sample_init_pre_load(skel, NULL); if (ret < 0) { pr_warn("Failed to sample_init_pre_load: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } ret = EXIT_FAIL_OPTION; /* opt parsing enforces num <= MAX_IFACES_NUM */ for (i = 0, iface = opt->ifaces; iface; i++, iface = iface->next) { skel->rodata->from_match[i] = iface->ifindex; skel->rodata->to_match[i] = iface->ifindex; } opts.obj = skel->obj; opts.prog_name = prog_name; xdp_prog = xdp_program__create(&opts); if (!xdp_prog) { ret = -errno; pr_warn("Couldn't open XDP program: %s\n", strerror(-ret)); goto end_destroy; } for (iface = opt->ifaces; iface; iface = iface->next) { pr_debug("Loading program on interface %s\n", iface->ifname); ret = xdp_program__attach(xdp_prog, iface->ifindex, opt->mode, 0); if (ret) { if (first) { if (!opt->load_egress && !tried) { pr_warn("Attempting fallback to int-sized devmap\n"); prog_name = "redir_multi_general"; tried = true; xdp_program__close(xdp_prog); xdp_redirect_devmap_multi__destroy(skel); sample_teardown(); xdp_prog = NULL; goto restart; } pr_warn("Failed to attach XDP program to iface %s: %s\n", iface->ifname, strerror(-ret)); goto end_destroy; } pr_warn("Failed to attach XDP program to iface %s: %s\n", iface->ifname, strerror(-ret)); goto end_detach; } /* Add all the interfaces to forward group and attach * egress devmap program if exist */ devmap_val.ifindex = iface->ifindex; if (opt->load_egress) devmap_val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_devmap_prog); ret = bpf_map_update_elem(bpf_map__fd(forward_map), &iface->ifindex, &devmap_val, 0); if (ret < 0) { pr_warn("Failed to update devmap value: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end_detach; } first = false; } if (opt->load_egress) { /* Update mac_map with all egress interfaces' mac addr */ if (update_mac_map(skel->maps.mac_map) < 0) { pr_warn("Updating mac address failed\n"); ret = EXIT_FAIL; goto end_detach; } } ret = sample_init(skel, mask, 0, 0); if (ret < 0) { pr_warn("Failed to initialize sample: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_detach; } ret = sample_run(opt->interval, NULL, NULL); if (ret < 0) { pr_warn("Failed during sample run: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_detach; } ret = EXIT_OK; end_detach: for (iface = opt->ifaces; iface; iface = iface->next) xdp_program__detach(xdp_prog, iface->ifindex, opt->mode, 0); end_destroy: xdp_program__close(xdp_prog); xdp_redirect_devmap_multi__destroy(skel); end: sample_teardown(); return ret; } xdp-tools-1.5.4/LICENSE0000644000175100001660000000037615003640462014030 0ustar runnerdockerThe code in this repository is licensed by a mix of GPL-2.0, LGPL-2.1 and BSD-2-Clause licenses, as indicated by the SPDX license headers in individual source files. The full text of these licenses is available in the files in the LICENSES subdirectory. xdp-tools-1.5.4/xdp-forward/0000755000175100001660000000000015003640462015252 5ustar runnerdockerxdp-tools-1.5.4/xdp-forward/README.org0000644000175100001660000001600115003640462016716 0ustar runnerdocker#+EXPORT_FILE_NAME: xdp-forward #+TITLE: xdp-forward #+OPTIONS: ^:nil #+MAN_CLASS_OPTIONS: :section-id "8\" \"DATE\" \"VERSION\" \"XDP program loader" # This file serves both as a README on github, and as the source for the man # page; the latter through the org-mode man page export support. # . # To export the man page, simply use the org-mode exporter; (require 'ox-man) if # it's not available. There's also a Makefile rule to export it. * xdp-forward - the XDP forwarding plane xdp-forward is an XDP forwarding plane, which will accelerate packet forwarding using XDP. To use it, simply load it on the set of interfaces to accelerate forwarding between. The userspace component of xdp-forward will then configure and load XDP programs on those interfaces, and forward packets between them using XDP_REDIRECT, using the kernel routing table or netfilter flowtable to determine the destination for each packet. Any packets that xdp-forward does not know how to forward will be passed up to the networking stack and handled by the kernel like normal. Depending on the mode xdp-forward is loaded in, this leads to different forwarding behaviours. See the sectinon on *Operating modes* below. ** Running xdp-forward The syntax for running xdp-forward is: #+begin_src sh xdp-forward COMMAND [options] Where COMMAND can be one of: load - Load the XDP forwarding plane unload - Unload the XDP forwarding plane help - show the list of available commands #+end_src Each command, and its options are explained below. Or use =xdp-forward COMMAND --help= to see the options for each command. * The LOAD command The =load= command loads the XDP forwarding plane on a list of interfaces. The syntax for the =load= command is: =xdp-forward load [options] = Where == is the name of the set of interfaces to forward packets between. An XDP program will be loaded on each interface, configured to forward packets to all other interfaces in the set (using the kernel routing table to determine the destination interface of each packet). The supported options are: ** -f, --fwd-mode Specifies which forwarding mode =xdp-forward= should operate in. Depending on the mode selected, =xdp-forward= will perform forwarding in different ways, which can lead to different behaviour, including which subset of kernel configuration (such as firewall rules) is respected during forwarding. See the section *FORWARDING MODES* below for a full description of each mode. ** -F, --fib-mode Specifies how =xdp-forward= performs routing table lookup in the linux kernel. See the section *FIB MODES* below for a full description of each mode. ** -m, --mode Specifies which mode to load the XDP program to be loaded in. The valid values are 'native', which is the default in-driver XDP mode, 'skb', which causes the so-called /skb mode/ (also known as /generic XDP/) to be used, 'hw' which causes the program to be offloaded to the hardware, or 'unspecified' which leaves it up to the kernel to pick a mode (which it will do by picking native mode if the driver supports it, or generic mode otherwise). Note that using 'unspecified' can make it difficult to predict what mode a program will end up being loaded in. For this reason, the default is 'native'. Note that hardware with support for the 'hw' mode is rare: Solarflare cards (using the 'sfc' driver) are the only devices with support for this in the mainline Linux kernel. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The UNLOAD command The =unload= command is used for unloading programs from an interface. The syntax for the =unload= command is: =xdp-forward unload [options] = Where == is the list of interfaces to unload the XDP forwarding plane from. Note that while =xdp-forward= will examine the XDP programs loaded on each interface and make sure to only unload its own program, it will not check that the list of supplied interfaces is the same as the one supplied during load. As such, it is possible to perform a partial unload by supplying a different list of interfaces, which may lead to unexpected behaviour. The supported options are: ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * FORWARDING MODES The =xdp-forward= utility supports the following forwarding modes (selected by the =--fwd-mode= parameter to =xdp-forward load=. ** fib (default) In the =fib= forwarding mode, =xdp-forward= will perform a lookup in the kernel routing table (or FIB) for each packet, and forward packets between the configured interfaces based on the result of the lookup. Any packet where the lookup fails will be passed up to the stack. This includes packets that require neighbour discovery for the next hop, meaning that packets will periodically pass up the kernel stack for next hop discovery (initially, and when the nexthop entry expires). Note that no checks other than the FIB lookup is performed; in particular, this completely bypasses the netfilter subsystem, so firewall rules will not be checked before forwarding. ** flowtable The =flowtable= operating mode offloads netfilter sw flowtable logic in the XDP layer if the hardware flowtable is not available. At the moment =xdp-forward= is able to offload just TCP or UDP netfilter flowtable entries to XDP. The user is supposed to configure the flowtable separately. * FIB MODES The =xdp-forward= utility supports the following fib modes (selected by the =--fib-mode= parameter to =xdp-forward load=. ** full (default) In the =full= operating mode, =xdp-forward= will perform a full lookup in the kernel routing table (or FIB) for each packet, and forward packets between the configured interfaces based on the result of the lookup. In particular, it will apply any policy routing rules configured by the user. ** direct The =direct= mode functions like =full=, except it passes the =BPF_FIB_LOOKUP_DIRECT= flag to the FIB lookup routine. This means that any policy routing rules configured will be skipped during the lookup, which can improve performance (but won't obey the policy of those rules, obviously). * Examples In order to enable flowtable offloading for tcp and udp traffic between NICs n0 and n1, issue the following commands: #+begin_src sh #nft -f /dev/stdin < in pre-routing chain chain prerouting { type nat hook prerouting priority filter; policy accept; iifname == "${NS_NAMES[0]}" meta nfproto ipv4 tcp dport 12345 dnat ip to ${ALL_INSIDE_IP4[-1]}:10000 iifname == "${NS_NAMES[0]}" meta nfproto ipv6 tcp dport 12345 dnat ip6 to [${ALL_INSIDE_IP6[-1]}]:10000 } # enable SNAT of the client ip via masquerading in post-routing chain chain postrouting { type nat hook postrouting priority filter; policy accept; oifname "${NS_NAMES[-1]}" masquerade } } table inet filter { flowtable ft { hook ingress priority filter devices = { ${NS_NAMES[0]}, ${NS_NAMES[-1]} } } chain forward { type filter hook forward priority filter meta l4proto { tcp } flow add @ft } } EOF # check if bpf flowtable lookup is available skip_if_missing_kernel_symbol bpf_xdp_flow_lookup # Add some nft rules to check {dnat/snat} is done properly in # the main namespace check_run ip netns exec ${NS_NAMES[-1]} nft -f /dev/stdin </dev/null 2>&1 { $XDP_FORWARD unload ${NS_NAMES[@]} $XDP_LOADER unload $NS --all check_run ip netns exec ${NS_NAMES[-1]} nft flush ruleset check_run nft flush ruleset ip link del dev veth-forw-test } >/dev/null 2>&1 } xdp-tools-1.5.4/xdp-forward/xdp_flowtable.bpf.c0000644000175100001660000003660115003640462021024 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /* Original xdp_fwd sample Copyright (c) 2017-18 David Ahern */ #include #include #include #include #define AF_INET 2 #define AF_INET6 10 #define IPV6_FLOWINFO_MASK bpf_htons(0x0FFFFFFF) #define IP_MF 0x2000 /* "More Fragments" */ #define IP_OFFSET 0x1fff /* "Fragment Offset" */ #define CSUM_MANGLED_0 ((__sum16)0xffff) #define BIT(x) (1 << (x)) struct { __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(int)); __uint(max_entries, 64); } xdp_tx_ports SEC(".maps"); struct bpf_flowtable_opts { __s32 error; }; struct flow_offload_tuple_rhash * bpf_xdp_flow_lookup(struct xdp_md *, struct bpf_fib_lookup *, struct bpf_flowtable_opts *, __u32) __ksym; /* from include/net/ip.h */ static __always_inline int ip_decrease_ttl(struct iphdr *iph) { __u32 check = (__u32)iph->check; check += (__u32)bpf_htons(0x0100); iph->check = (__sum16)(check + (check >= 0xFFFF)); return --iph->ttl; } static __always_inline __u32 csum_add(__u32 csum, __u32 addend) { __u32 res = csum + addend; return res + (res < addend); } static __always_inline __u16 csum_fold(__u32 csum) { csum = (csum & 0xffff) + (csum >> 16); csum = (csum & 0xffff) + (csum >> 16); return ~csum; } static __always_inline __u16 csum_replace4(__u32 csum, __u32 from, __u32 to) { __u32 tmp = csum_add(~csum, ~from); return csum_fold(csum_add(tmp, to)); } static __always_inline __u16 csum_replace16(__u32 csum, __u32 *from, __u32 *to) { __u32 diff[] = { ~from[0], ~from[1], ~from[2], ~from[3], to[0], to[1], to[2], to[3], }; csum = bpf_csum_diff(0, 0, diff, sizeof(diff), ~csum); return csum_fold(csum); } static __always_inline int xdp_flowtable_check_tcp_state(void *ports, void *data_end, __u8 proto) { if (proto == IPPROTO_TCP) { struct tcphdr *tcph = ports; if (tcph + 1 > data_end) return -1; if (tcph->fin || tcph->rst) return -1; } return 0; } static __always_inline void xdp_flowtable_update_port_csum(struct flow_ports *ports, void *data_end, __u8 proto, __be16 port, __be16 nat_port) { switch (proto) { case IPPROTO_TCP: { struct tcphdr *tcph = (struct tcphdr *)ports; if (tcph + 1 > data_end) break; tcph->check = csum_replace4((__u32)tcph->check, (__u32)port, (__u32)nat_port); break; } case IPPROTO_UDP: { struct udphdr *udph = (struct udphdr *)ports; if (udph + 1 > data_end) break; if (!udph->check) break; udph->check = csum_replace4((__u32)udph->check, (__u32)port, (__u32)nat_port); if (!udph->check) udph->check = CSUM_MANGLED_0; break; } default: break; } } static __always_inline void xdp_flowtable_snat_port(const struct flow_offload *flow, struct flow_ports *ports, void *data_end, __u8 proto, enum flow_offload_tuple_dir dir) { __be16 port, nat_port; if (ports + 1 > data_end) return; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: port = ports->source; /* For original direction (FLOW_OFFLOAD_DIR_ORIGINAL): * - tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port contains * the source port used for the traffic transmitted by the * host. * - tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port contains * the destination port used for the traffic transmitted by * the host. */ bpf_core_read(&nat_port, bpf_core_type_size(nat_port), &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port); ports->source = nat_port; break; case FLOW_OFFLOAD_DIR_REPLY: /* For reply direction (FLOW_OFFLOAD_DIR_REPLY): * - tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port * contains source port used for the traffic received by the * host. * - tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port * contains the destination port used for the traffic * received by the host. */ port = ports->dest; bpf_core_read(&nat_port, bpf_core_type_size(nat_port), &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port); ports->dest = nat_port; break; default: return; } xdp_flowtable_update_port_csum(ports, data_end, proto, port, nat_port); } static __always_inline void xdp_flowtable_dnat_port(const struct flow_offload *flow, struct flow_ports *ports, void *data_end, __u8 proto, enum flow_offload_tuple_dir dir) { __be16 port, nat_port; if (ports + 1 > data_end) return; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: /* For original direction (FLOW_OFFLOAD_DIR_ORIGINAL): * - tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port contains * the source port used for the traffic transmitted by the * host. * - tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port contains * the destination port used for the traffic transmitted by * the host. */ port = ports->dest; bpf_core_read(&nat_port, bpf_core_type_size(nat_port), &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port); ports->dest = nat_port; break; case FLOW_OFFLOAD_DIR_REPLY: /* For reply direction (FLOW_OFFLOAD_DIR_REPLY): * - tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port * contains the source port used for the traffic received by * the host. * - tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port * contains destination port used for the traffic received by * the host. */ port = ports->source; bpf_core_read(&nat_port, bpf_core_type_size(nat_port), &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port); ports->source = nat_port; break; default: return; } xdp_flowtable_update_port_csum(ports, data_end, proto, port, nat_port); } static __always_inline void xdp_flowtable_update_ipv4_csum(struct iphdr *iph, void *data_end, __be32 addr, __be32 nat_addr) { switch (iph->protocol) { case IPPROTO_TCP: { struct tcphdr *tcph = (struct tcphdr *)(iph + 1); if (tcph + 1 > data_end) break; tcph->check = csum_replace4((__u32)tcph->check, addr, nat_addr); break; } case IPPROTO_UDP: { struct udphdr *udph = (struct udphdr *)(iph + 1); if (udph + 1 > data_end) break; if (!udph->check) break; udph->check = csum_replace4((__u32)udph->check, addr, nat_addr); if (!udph->check) udph->check = CSUM_MANGLED_0; break; } default: break; } } static __always_inline void xdp_flowtable_snat_ip(const struct flow_offload *flow, struct iphdr *iph, void *data_end, enum flow_offload_tuple_dir dir) { __be32 addr, nat_addr; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: addr = iph->saddr; bpf_core_read(&nat_addr, bpf_core_type_size(nat_addr), &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr); iph->saddr = nat_addr; break; case FLOW_OFFLOAD_DIR_REPLY: addr = iph->daddr; bpf_core_read(&nat_addr, bpf_core_type_size(nat_addr), &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr); iph->daddr = nat_addr; break; default: return; } iph->check = csum_replace4((__u32)iph->check, addr, nat_addr); xdp_flowtable_update_ipv4_csum(iph, data_end, addr, nat_addr); } static __always_inline void xdp_flowtable_get_dnat_ip(__be32 *addr, const struct flow_offload *flow, enum flow_offload_tuple_dir dir) { switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: bpf_core_read(addr, sizeof(*addr), &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr); break; case FLOW_OFFLOAD_DIR_REPLY: bpf_core_read(addr, sizeof(*addr), &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr); break; default: break; } } static __always_inline void xdp_flowtable_dnat_ip(const struct flow_offload *flow, struct iphdr *iph, void *data_end, enum flow_offload_tuple_dir dir) { __be32 addr, nat_addr; xdp_flowtable_get_dnat_ip(&nat_addr, flow, dir); switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: addr = iph->daddr; iph->daddr = nat_addr; break; case FLOW_OFFLOAD_DIR_REPLY: addr = iph->saddr; iph->saddr = nat_addr; break; default: return; } iph->check = csum_replace4((__u32)iph->check, addr, nat_addr); xdp_flowtable_update_ipv4_csum(iph, data_end, addr, nat_addr); } static __always_inline void xdp_flowtable_update_ipv6_csum(struct ipv6hdr *ip6h, void *data_end, struct in6_addr *addr, struct in6_addr *nat_addr) { switch (ip6h->nexthdr) { case IPPROTO_TCP: { struct tcphdr *tcph = (struct tcphdr *)(ip6h + 1); if (tcph + 1 > data_end) break; tcph->check = csum_replace16((__u32)tcph->check, addr->in6_u.u6_addr32, nat_addr->in6_u.u6_addr32); break; } case IPPROTO_UDP: { struct udphdr *udph = (struct udphdr *)(ip6h + 1); if (udph + 1 > data_end) break; if (!udph->check) break; udph->check = csum_replace16((__u32)udph->check, addr->in6_u.u6_addr32, nat_addr->in6_u.u6_addr32); if (!udph->check) udph->check = CSUM_MANGLED_0; break; } default: break; } } static __always_inline void xdp_flowtable_snat_ipv6(const struct flow_offload *flow, struct ipv6hdr *ip6h, void *data_end, enum flow_offload_tuple_dir dir) { struct in6_addr addr, nat_addr; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: addr = ip6h->saddr; bpf_core_read(&nat_addr, bpf_core_type_size(nat_addr), &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6); ip6h->saddr = nat_addr; break; case FLOW_OFFLOAD_DIR_REPLY: addr = ip6h->daddr; bpf_core_read(&nat_addr, bpf_core_type_size(nat_addr), &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6); ip6h->daddr = nat_addr; break; default: return; } xdp_flowtable_update_ipv6_csum(ip6h, data_end, &addr, &nat_addr); } static __always_inline void xdp_flowtable_get_dnat_ipv6(struct in6_addr *addr, const struct flow_offload *flow, enum flow_offload_tuple_dir dir) { switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: bpf_core_read(addr, sizeof(*addr), &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6); break; case FLOW_OFFLOAD_DIR_REPLY: bpf_core_read(addr, sizeof(*addr), &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6); break; default: break; } } static __always_inline void xdp_flowtable_dnat_ipv6(const struct flow_offload *flow, struct ipv6hdr *ip6h, void *data_end, enum flow_offload_tuple_dir dir) { struct in6_addr addr, nat_addr; xdp_flowtable_get_dnat_ipv6(&nat_addr, flow, dir); switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: addr = ip6h->daddr; ip6h->daddr = nat_addr; break; case FLOW_OFFLOAD_DIR_REPLY: addr = ip6h->saddr; ip6h->saddr = nat_addr; break; default: return; } xdp_flowtable_update_ipv6_csum(ip6h, data_end, &addr, &nat_addr); } static __always_inline void xdp_flowtable_forward_ip(const struct flow_offload *flow, void *data, void *data_end, struct flow_ports *ports, enum flow_offload_tuple_dir dir, unsigned long flags) { struct iphdr *iph = data + sizeof(struct ethhdr); if (iph + 1 > data_end) return; if (flags & BIT(NF_FLOW_SNAT)) { xdp_flowtable_snat_port(flow, ports, data_end, iph->protocol, dir); xdp_flowtable_snat_ip(flow, iph, data_end, dir); } if (flags & BIT(NF_FLOW_DNAT)) { xdp_flowtable_dnat_port(flow, ports, data_end, iph->protocol, dir); xdp_flowtable_dnat_ip(flow, iph, data_end, dir); } ip_decrease_ttl(iph); } static __always_inline void xdp_flowtable_forward_ipv6(const struct flow_offload *flow, void *data, void *data_end, struct flow_ports *ports, enum flow_offload_tuple_dir dir, unsigned long flags) { struct ipv6hdr *ip6h = data + sizeof(struct ethhdr); if (ip6h + 1 > data_end) return; if (flags & BIT(NF_FLOW_SNAT)) { xdp_flowtable_snat_port(flow, ports, data_end, ip6h->nexthdr, dir); xdp_flowtable_snat_ipv6(flow, ip6h, data_end, dir); } if (flags & BIT(NF_FLOW_DNAT)) { xdp_flowtable_dnat_port(flow, ports, data_end, ip6h->nexthdr, dir); xdp_flowtable_dnat_ipv6(flow, ip6h, data_end, dir); } ip6h->hop_limit--; } static __always_inline int xdp_flowtable_flags(struct xdp_md *ctx, __u32 fib_flags) { void *data_end = (void *)(long)ctx->data_end; struct flow_offload_tuple_rhash *tuplehash; struct bpf_fib_lookup tuple = { .ifindex = ctx->ingress_ifindex, }; void *data = (void *)(long)ctx->data; struct bpf_flowtable_opts opts = {}; enum flow_offload_tuple_dir dir; struct ethhdr *eth = data; struct flow_offload *flow; struct flow_ports *ports; unsigned long flags; if (eth + 1 > data_end) return XDP_PASS; switch (eth->h_proto) { case bpf_htons(ETH_P_IP): { struct iphdr *iph = data + sizeof(*eth); ports = (struct flow_ports *)(iph + 1); if (ports + 1 > data_end) return XDP_PASS; /* ip fragmented traffic */ if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) return XDP_PASS; /* ip options */ if (iph->ihl * 4 != sizeof(*iph)) return XDP_PASS; if (iph->ttl <= 1) return XDP_PASS; if (xdp_flowtable_check_tcp_state(ports, data_end, iph->protocol) < 0) return XDP_PASS; tuple.family = AF_INET; tuple.tos = iph->tos; tuple.l4_protocol = iph->protocol; tuple.tot_len = bpf_ntohs(iph->tot_len); tuple.ipv4_src = iph->saddr; tuple.ipv4_dst = iph->daddr; tuple.sport = ports->source; tuple.dport = ports->dest; break; } case bpf_htons(ETH_P_IPV6): { struct in6_addr *src = (struct in6_addr *)tuple.ipv6_src; struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst; struct ipv6hdr *ip6h = data + sizeof(*eth); ports = (struct flow_ports *)(ip6h + 1); if (ports + 1 > data_end) return XDP_PASS; if (ip6h->hop_limit <= 1) return XDP_PASS; if (xdp_flowtable_check_tcp_state(ports, data_end, ip6h->nexthdr) < 0) return XDP_PASS; tuple.family = AF_INET6; tuple.l4_protocol = ip6h->nexthdr; tuple.tot_len = bpf_ntohs(ip6h->payload_len); *src = ip6h->saddr; *dst = ip6h->daddr; tuple.sport = ports->source; tuple.dport = ports->dest; break; } default: return XDP_PASS; } tuplehash = bpf_xdp_flow_lookup(ctx, &tuple, &opts, sizeof(opts)); if (!tuplehash) return XDP_PASS; flow = container_of(tuplehash, struct flow_offload, tuplehash); if (bpf_core_read(&flags, sizeof(flags), &flow->flags)) return XDP_PASS; if (tuplehash->tuple.xmit_type != FLOW_OFFLOAD_XMIT_NEIGH) return XDP_PASS; dir = tuplehash->tuple.dir; if (dir >= FLOW_OFFLOAD_DIR_MAX) return XDP_PASS; /* update the destination address in case of dnatting before * performing the route lookup */ if (tuple.family == AF_INET6) { struct in6_addr *dst_addr = (struct in6_addr *)&tuple.ipv6_dst; xdp_flowtable_get_dnat_ipv6(dst_addr, flow, dir); } else { xdp_flowtable_get_dnat_ip(&tuple.ipv4_dst, flow, dir); } if (bpf_fib_lookup(ctx, &tuple, sizeof(tuple), fib_flags) != BPF_FIB_LKUP_RET_SUCCESS) return XDP_PASS; /* Verify egress index has been configured as TX-port */ if (!bpf_map_lookup_elem(&xdp_tx_ports, &tuple.ifindex)) return XDP_PASS; if (tuple.family == AF_INET6) xdp_flowtable_forward_ipv6(flow, data, data_end, ports, dir, flags); else xdp_flowtable_forward_ip(flow, data, data_end, ports, dir, flags); __builtin_memcpy(eth->h_dest, tuple.dmac, ETH_ALEN); __builtin_memcpy(eth->h_source, tuple.smac, ETH_ALEN); return bpf_redirect_map(&xdp_tx_ports, tuple.ifindex, 0); } SEC("xdp") int xdp_fwd_flow_full(struct xdp_md *ctx) { return xdp_flowtable_flags(ctx, 0); } SEC("xdp") int xdp_fwd_flow_direct(struct xdp_md *ctx) { return xdp_flowtable_flags(ctx, BPF_FIB_LOOKUP_DIRECT); } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-forward/xdp-forward.80000644000175100001660000001576115003640462017612 0ustar runnerdocker.TH "xdp-forward" "8" "OCTOBER 11, 2024" "V1.5.4" "XDP program loader" .SH "NAME" xdp-forward \- the XDP forwarding plane .SH "SYNOPSIS" .PP xdp-forward is an XDP forwarding plane, which will accelerate packet forwarding using XDP. To use it, simply load it on the set of interfaces to accelerate forwarding between. The userspace component of xdp-forward will then configure and load XDP programs on those interfaces, and forward packets between them using XDP_REDIRECT, using the kernel routing table or netfilter flowtable to determine the destination for each packet. .PP Any packets that xdp-forward does not know how to forward will be passed up to the networking stack and handled by the kernel like normal. Depending on the mode xdp-forward is loaded in, this leads to different forwarding behaviours. See the sectinon on \fBOperating modes\fP below. .SS "Running xdp-forward" .PP The syntax for running xdp-forward is: .RS .nf \fCxdp-forward COMMAND [options] Where COMMAND can be one of: load - Load the XDP forwarding plane unload - Unload the XDP forwarding plane help - show the list of available commands \fP .fi .RE .PP Each command, and its options are explained below. Or use \fIxdp\-forward COMMAND \-\-help\fP to see the options for each command. .SH "The LOAD command" .PP The \fIload\fP command loads the XDP forwarding plane on a list of interfaces. .PP The syntax for the \fIload\fP command is: .PP \fIxdp\-forward load [options] \fP .PP Where \fI\fP is the name of the set of interfaces to forward packets between. An XDP program will be loaded on each interface, configured to forward packets to all other interfaces in the set (using the kernel routing table to determine the destination interface of each packet). .PP The supported options are: .SS "-f, --fwd-mode " .PP Specifies which forwarding mode \fIxdp\-forward\fP should operate in. Depending on the mode selected, \fIxdp\-forward\fP will perform forwarding in different ways, which can lead to different behaviour, including which subset of kernel configuration (such as firewall rules) is respected during forwarding. See the section \fBFORWARDING MODES\fP below for a full description of each mode. .SS "-F, --fib-mode " .PP Specifies how \fIxdp\-forward\fP performs routing table lookup in the linux kernel. See the section \fBFIB MODES\fP below for a full description of each mode. .SS "-m, --mode " .PP Specifies which mode to load the XDP program to be loaded in. The valid values are 'native', which is the default in-driver XDP mode, 'skb', which causes the so-called \fIskb mode\fP (also known as \fIgeneric XDP\fP) to be used, 'hw' which causes the program to be offloaded to the hardware, or 'unspecified' which leaves it up to the kernel to pick a mode (which it will do by picking native mode if the driver supports it, or generic mode otherwise). Note that using 'unspecified' can make it difficult to predict what mode a program will end up being loaded in. For this reason, the default is 'native'. Note that hardware with support for the 'hw' mode is rare: Solarflare cards (using the 'sfc' driver) are the only devices with support for this in the mainline Linux kernel. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The UNLOAD command" .PP The \fIunload\fP command is used for unloading programs from an interface. .PP The syntax for the \fIunload\fP command is: .PP \fIxdp\-forward unload [options] \fP .PP Where \fI\fP is the list of interfaces to unload the XDP forwarding plane from. Note that while \fIxdp\-forward\fP will examine the XDP programs loaded on each interface and make sure to only unload its own program, it will not check that the list of supplied interfaces is the same as the one supplied during load. As such, it is possible to perform a partial unload by supplying a different list of interfaces, which may lead to unexpected behaviour. .PP The supported options are: .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "FORWARDING MODES" .PP The \fIxdp\-forward\fP utility supports the following forwarding modes (selected by the \fI\-\-fwd\-mode\fP parameter to \fIxdp\-forward load\fP. .SS "fib (default)" .PP In the \fIfib\fP forwarding mode, \fIxdp\-forward\fP will perform a lookup in the kernel routing table (or FIB) for each packet, and forward packets between the configured interfaces based on the result of the lookup. Any packet where the lookup fails will be passed up to the stack. This includes packets that require neighbour discovery for the next hop, meaning that packets will periodically pass up the kernel stack for next hop discovery (initially, and when the nexthop entry expires). .PP Note that no checks other than the FIB lookup is performed; in particular, this completely bypasses the netfilter subsystem, so firewall rules will not be checked before forwarding. .SS "flowtable" .PP The \fIflowtable\fP operating mode offloads netfilter sw flowtable logic in the XDP layer if the hardware flowtable is not available. At the moment \fIxdp\-forward\fP is able to offload just TCP or UDP netfilter flowtable entries to XDP. The user is supposed to configure the flowtable separately. .SH "FIB MODES" .PP The \fIxdp\-forward\fP utility supports the following fib modes (selected by the \fI\-\-fib\-mode\fP parameter to \fIxdp\-forward load\fP. .SS "full (default)" .PP In the \fIfull\fP operating mode, \fIxdp\-forward\fP will perform a full lookup in the kernel routing table (or FIB) for each packet, and forward packets between the configured interfaces based on the result of the lookup. In particular, it will apply any policy routing rules configured by the user. .SS "direct" .PP The \fIdirect\fP mode functions like \fIfull\fP, except it passes the \fIBPF_FIB_LOOKUP_DIRECT\fP flag to the FIB lookup routine. This means that any policy routing rules configured will be skipped during the lookup, which can improve performance (but won't obey the policy of those rules, obviously). .SH "Examples" .PP In order to enable flowtable offloading for tcp and udp traffic between NICs n0 and n1, issue the following commands: .RS .nf \fC#nft -f /dev/stdin < #include #include #include #include "params.h" #include "util.h" #include "logging.h" #include "compat.h" #include "xdp_forward.skel.h" #include "xdp_flowtable.skel.h" #include "xdp_flowtable_sample.skel.h" #define MAX_IFACE_NUM 32 #define PROG_NAME "xdp-forward" int do_help(__unused const void *cfg, __unused const char *pin_root_path) { fprintf(stderr, "Usage: xdp-forward COMMAND [options]\n" "\n" "COMMAND can be one of:\n" " load - Load the XDP forwarding plane\n" " unload - Unload the XDP forwarding plane\n" " help - show this help message\n" "\n" "Use 'xdp-forward COMMAND --help' to see options for each command\n"); return -1; } struct enum_val xdp_modes[] = { { "native", XDP_MODE_NATIVE }, { "skb", XDP_MODE_SKB }, { NULL, 0 } }; enum fwd_mode { FWD_FIB, FWD_FLOWTABLE, }; struct enum_val fwd_modes[] = { { "fib", FWD_FIB }, { "flowtable", FWD_FLOWTABLE }, { NULL, 0 } }; enum fib_mode { FIB_DIRECT, FIB_FULL, }; struct enum_val fib_modes[] = { { "direct", FIB_DIRECT }, { "full", FIB_FULL }, { NULL, 0 } }; static int find_prog(struct iface *iface, bool detach) { struct xdp_program *prog = NULL; enum xdp_attach_mode mode; struct xdp_multiprog *mp; int ret = -ENOENT; mp = xdp_multiprog__get_from_ifindex(iface->ifindex); if (!mp) return ret; if (xdp_multiprog__is_legacy(mp)) { prog = xdp_multiprog__main_prog(mp); goto check; } while ((prog = xdp_multiprog__next_prog(prog, mp))) { check: if (!strcmp(xdp_program__name(prog), "xdp_fwd_fib_full") || !strcmp(xdp_program__name(prog), "xdp_fwd_fib_direct") || !strcmp(xdp_program__name(prog), "xdp_fwd_flow_full") || !strcmp(xdp_program__name(prog), "xdp_fwd_flow_direct")) { mode = xdp_multiprog__attach_mode(mp); ret = 0; if (detach) { ret = xdp_program__detach(prog, iface->ifindex, mode, 0); if (ret) pr_warn("Couldn't detach XDP program from interface %s: %s\n", iface->ifname, strerror(errno)); break; } } } xdp_multiprog__close(mp); return ret; } struct load_opts { enum fwd_mode fwd_mode; enum fib_mode fib_mode; enum xdp_attach_mode xdp_mode; struct iface *ifaces; } defaults_load = { .fwd_mode = FWD_FIB, .fib_mode = FIB_FULL, }; struct prog_option load_options[] = { DEFINE_OPTION("fwd-mode", OPT_ENUM, struct load_opts, fwd_mode, .short_opt = 'f', .typearg = fwd_modes, .metavar = "", .help = "Forward mode to run in; see man page. Default fib"), DEFINE_OPTION("fib-mode", OPT_ENUM, struct load_opts, fib_mode, .short_opt = 'F', .typearg = fib_modes, .metavar = "", .help = "Fib mode to run in; see man page. Default full"), DEFINE_OPTION("xdp-mode", OPT_ENUM, struct load_opts, xdp_mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load XDP program in ; default native"), DEFINE_OPTION("devs", OPT_IFNAME_MULTI, struct load_opts, ifaces, .positional = true, .metavar = "", .min_num = 1, .max_num = MAX_IFACE_NUM, .required = 1, .help = "Redirect from and to devices "), END_OPTIONS }; static bool sample_probe_bpf_xdp_flow_lookup(void) { struct xdp_flowtable_sample *skel; bool res; skel = xdp_flowtable_sample__open_and_load(); res = !!skel; xdp_flowtable_sample__destroy(skel); return res; } static int do_load(const void *cfg, __unused const char *pin_root_path) { DECLARE_LIBBPF_OPTS(xdp_program_opts, opts); struct xdp_program *xdp_prog = NULL; const struct load_opts *opt = cfg; struct bpf_program *prog = NULL; struct bpf_map *map = NULL; struct bpf_object *obj; int ret = EXIT_FAILURE; struct iface *iface; void *skel; switch (opt->fwd_mode) { case FWD_FIB: opts.prog_name = opt->fib_mode == FIB_DIRECT ? "xdp_fwd_fib_direct" : "xdp_fwd_fib_full"; break; case FWD_FLOWTABLE: opts.prog_name = opt->fib_mode == FIB_DIRECT ? "xdp_fwd_flow_direct" : "xdp_fwd_flow_full"; break; default: goto end; } if (opt->fwd_mode == FWD_FLOWTABLE) { struct xdp_flowtable *xdp_flowtable_skel; if (!sample_probe_bpf_xdp_flow_lookup()) { pr_warn("The kernel does not support the bpf_xdp_flow_lookup() kfunc\n"); goto end; } xdp_flowtable_skel = xdp_flowtable__open(); if (!xdp_flowtable_skel) { pr_warn("Failed to load skeleton: %s\n", strerror(errno)); goto end; } map = xdp_flowtable_skel->maps.xdp_tx_ports; obj = xdp_flowtable_skel->obj; skel = (void *)xdp_flowtable_skel; } else { struct xdp_forward *xdp_forward_skel = xdp_forward__open(); if (!xdp_forward_skel) { pr_warn("Failed to load skeleton: %s\n", strerror(errno)); goto end; } map = xdp_forward_skel->maps.xdp_tx_ports; obj = xdp_forward_skel->obj; skel = (void *)xdp_forward_skel; } /* Make sure we only load the one XDP program we are interested in */ while ((prog = bpf_object__next_program(obj, prog)) != NULL) if (bpf_program__type(prog) == BPF_PROG_TYPE_XDP && bpf_program__expected_attach_type(prog) == BPF_XDP) bpf_program__set_autoload(prog, false); opts.obj = obj; xdp_prog = xdp_program__create(&opts); if (!xdp_prog) { pr_warn("Couldn't open XDP program: %s\n", strerror(errno)); goto end_destroy; } /* We always set the frags support bit: nothing the program does is * incompatible with multibuf, and it's perfectly fine to load a program * with frags support on an interface with a small MTU. We don't risk * setting any flags the kernel will balk at, either, since libxdp will * do the feature probing for us and skip the flag if the kernel doesn't * support it. * * The function below returns EOPNOTSUPP it libbpf is too old to support * setting the flags, but we just ignore that, since in such a case the * best we can do is just attempt to run without the frags support. */ xdp_program__set_xdp_frags_support(xdp_prog, true); for (iface = opt->ifaces; iface; iface = iface->next) { if (find_prog(iface, false) != -ENOENT) { pr_warn("Already attached to %s, not reattaching\n", iface->ifname); continue; } ret = xdp_program__attach(xdp_prog, iface->ifindex, opt->xdp_mode, 0); if (ret) { pr_warn("Failed to attach XDP program to iface %s: %s\n", iface->ifname, strerror(-ret)); goto end_detach; } ret = bpf_map_update_elem(bpf_map__fd(map), &iface->ifindex, &iface->ifindex, 0); if (ret) { pr_warn("Failed to update devmap value: %s\n", strerror(errno)); goto end_detach; } pr_info("Loaded on interface %s\n", iface->ifname); } ret = EXIT_SUCCESS; end_destroy: if (opt->fwd_mode == FWD_FLOWTABLE) xdp_flowtable__destroy(skel); else xdp_forward__destroy(skel); end: return ret; end_detach: ret = EXIT_FAILURE; for (iface = opt->ifaces; iface; iface = iface->next) xdp_program__detach(xdp_prog, iface->ifindex, opt->xdp_mode, 0); goto end_destroy; } struct unload_opts { struct iface *ifaces; } defaults_unload = {}; struct prog_option unload_options[] = { DEFINE_OPTION("devs", OPT_IFNAME_MULTI, struct unload_opts, ifaces, .positional = true, .metavar = "", .min_num = 1, .max_num = MAX_IFACE_NUM, .help = "Redirect from and to devices "), END_OPTIONS }; static int do_unload(const void *cfg, __unused const char *pin_root_path) { const struct unload_opts *opt = cfg; int ret = EXIT_SUCCESS; struct iface *iface; for (iface = opt->ifaces; iface; iface = iface->next) { if (find_prog(iface, true)) { pr_warn("Couldn't find program on interface %s\n", iface->ifname); ret = EXIT_FAILURE; } pr_info("Unloaded from interface %s\n", iface->ifname); } return ret; } static const struct prog_command cmds[] = { DEFINE_COMMAND(load, "Load XDP forwarding plane"), DEFINE_COMMAND(unload, "Unload XDP forwarding plane"), { .name = "help", .func = do_help, .no_cfg = true }, END_COMMANDS }; union all_opts { struct load_opts load; struct unload_opts unload; }; int main(int argc, char **argv) { if (argc > 1) return dispatch_commands(argv[1], argc - 1, argv + 1, cmds, sizeof(union all_opts), PROG_NAME, false); return do_help(NULL, NULL); } xdp-tools-1.5.4/xdp-forward/xdp_flowtable_sample.bpf.c0000644000175100001660000000152115003640462022356 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /* Original xdp_fwd sample Copyright (c) 2017-18 David Ahern */ #include #include #include #include #define AF_INET 2 struct bpf_flowtable_opts { __s32 error; }; struct flow_offload_tuple_rhash * bpf_xdp_flow_lookup(struct xdp_md *, struct bpf_fib_lookup *, struct bpf_flowtable_opts *, __u32) __ksym; SEC("xdp") int xdp_fwd_flowtable_sample(struct xdp_md *ctx) { struct flow_offload_tuple_rhash *tuplehash; struct bpf_flowtable_opts opts = {}; struct bpf_fib_lookup tuple = { .family = AF_INET, .ifindex = ctx->ingress_ifindex, }; tuplehash = bpf_xdp_flow_lookup(ctx, &tuple, &opts, sizeof(opts)); if (!tuplehash) return XDP_DROP; return XDP_PASS; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-forward/xdp_forward.bpf.c0000644000175100001660000001011515003640462020501 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /* Original xdp_fwd sample Copyright (c) 2017-18 David Ahern */ #include #include #include #include #define AF_INET 2 #define AF_INET6 10 #define IPV6_FLOWINFO_MASK bpf_htons(0x0FFFFFFF) struct { __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(int)); __uint(max_entries, 64); } xdp_tx_ports SEC(".maps"); /* from include/net/ip.h */ static __always_inline int ip_decrease_ttl(struct iphdr *iph) { __u32 check = (__u32)iph->check; check += (__u32)bpf_htons(0x0100); iph->check = (__sum16)(check + (check >= 0xFFFF)); return --iph->ttl; } static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, __u32 flags) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct bpf_fib_lookup fib_params; struct ethhdr *eth = data; struct ipv6hdr *ip6h; struct iphdr *iph; __u16 h_proto; __u64 nh_off; int rc; nh_off = sizeof(*eth); if (data + nh_off > data_end) return XDP_DROP; __builtin_memset(&fib_params, 0, sizeof(fib_params)); h_proto = eth->h_proto; if (h_proto == bpf_htons(ETH_P_IP)) { iph = data + nh_off; if (iph + 1 > data_end) return XDP_DROP; if (iph->ttl <= 1) return XDP_PASS; fib_params.family = AF_INET; fib_params.tos = iph->tos; fib_params.l4_protocol = iph->protocol; fib_params.sport = 0; fib_params.dport = 0; fib_params.tot_len = bpf_ntohs(iph->tot_len); fib_params.ipv4_src = iph->saddr; fib_params.ipv4_dst = iph->daddr; } else if (h_proto == bpf_htons(ETH_P_IPV6)) { struct in6_addr *src = (struct in6_addr *) fib_params.ipv6_src; struct in6_addr *dst = (struct in6_addr *) fib_params.ipv6_dst; ip6h = data + nh_off; if (ip6h + 1 > data_end) return XDP_DROP; if (ip6h->hop_limit <= 1) return XDP_PASS; fib_params.family = AF_INET6; fib_params.flowinfo = *(__be32 *)ip6h & IPV6_FLOWINFO_MASK; fib_params.l4_protocol = ip6h->nexthdr; fib_params.sport = 0; fib_params.dport = 0; fib_params.tot_len = bpf_ntohs(ip6h->payload_len); *src = ip6h->saddr; *dst = ip6h->daddr; } else { return XDP_PASS; } fib_params.ifindex = ctx->ingress_ifindex; rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); /* * Some rc (return codes) from bpf_fib_lookup() are important, * to understand how this XDP-prog interacts with network stack. * * BPF_FIB_LKUP_RET_NO_NEIGH: * Even if route lookup was a success, then the MAC-addresses are also * needed. This is obtained from arp/neighbour table, but if table is * (still) empty then BPF_FIB_LKUP_RET_NO_NEIGH is returned. To avoid * doing ARP lookup directly from XDP, then send packet to normal * network stack via XDP_PASS and expect it will do ARP resolution. * * BPF_FIB_LKUP_RET_FWD_DISABLED: * The bpf_fib_lookup respect sysctl net.ipv{4,6}.conf.all.forwarding * setting, and will return BPF_FIB_LKUP_RET_FWD_DISABLED if not * enabled this on ingress device. */ if (rc == BPF_FIB_LKUP_RET_SUCCESS) { /* Verify egress index has been configured as TX-port. * (Note: User can still have inserted an egress ifindex that * doesn't support XDP xmit, which will result in packet drops). * * Note: lookup in devmap supported since 0cdbb4b09a0. * If not supported will fail with: * cannot pass map_type 14 into func bpf_map_lookup_elem#1: */ if (!bpf_map_lookup_elem(&xdp_tx_ports, &fib_params.ifindex)) return XDP_PASS; if (h_proto == bpf_htons(ETH_P_IP)) ip_decrease_ttl(iph); else if (h_proto == bpf_htons(ETH_P_IPV6)) ip6h->hop_limit--; __builtin_memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN); __builtin_memcpy(eth->h_source, fib_params.smac, ETH_ALEN); return bpf_redirect_map(&xdp_tx_ports, fib_params.ifindex, 0); } return XDP_PASS; } SEC("xdp") int xdp_fwd_fib_full(struct xdp_md *ctx) { return xdp_fwd_flags(ctx, 0); } SEC("xdp") int xdp_fwd_fib_direct(struct xdp_md *ctx) { return xdp_fwd_flags(ctx, BPF_FIB_LOOKUP_DIRECT); } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-forward/.gitignore0000644000175100001660000000001415003640462017235 0ustar runnerdockerxdp-forward xdp-tools-1.5.4/xdp-forward/Makefile0000644000175100001660000000051515003640462016713 0ustar runnerdocker# SPDX-License-Identifier: GPL-2.0 XDP_TARGETS := xdp_forward.bpf xdp_flowtable.bpf xdp_flowtable_sample.bpf BPF_SKEL_TARGETS := $(XDP_TARGETS) XDP_OBJ_INSTALL := TOOL_NAME := xdp-forward MAN_PAGE := xdp-forward.8 TEST_FILE := tests/test-xdp-forward.sh USER_TARGETS := xdp-forward LIB_DIR := ../lib include $(LIB_DIR)/common.mk xdp-tools-1.5.4/lib/0000755000175100001660000000000015003640462013563 5ustar runnerdockerxdp-tools-1.5.4/lib/libxdp/0000755000175100001660000000000015003640462015045 5ustar runnerdockerxdp-tools-1.5.4/lib/libxdp/xsk_def_xdp_prog.c0000644000175100001660000000207615003640462020543 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include #include #include #include "xsk_def_xdp_prog.h" #define DEFAULT_QUEUE_IDS 64 struct { __uint(type, BPF_MAP_TYPE_XSKMAP); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(int)); __uint(max_entries, DEFAULT_QUEUE_IDS); } xsks_map SEC(".maps"); struct { __uint(priority, 20); __uint(XDP_PASS, 1); } XDP_RUN_CONFIG(xsk_def_prog); /* Program refcount, in order to work properly, * must be declared before any other global variables * and initialized with '1'. */ volatile int refcnt = 1; /* This is the program for post 5.3 kernels. */ SEC("xdp") int xsk_def_prog(struct xdp_md *ctx) { /* Make sure refcount is referenced by the program */ if (!refcnt) return XDP_PASS; /* A set entry here means that the corresponding queue_id * has an active AF_XDP socket bound to it. */ return bpf_redirect_map(&xsks_map, ctx->rx_queue_index, XDP_PASS); } char _license[] SEC("license") = "GPL"; __uint(xsk_prog_version, XSK_PROG_VERSION) SEC(XDP_METADATA_SECTION); xdp-tools-1.5.4/lib/libxdp/libxdp_internal.h0000644000175100001660000000751015003640462020377 0ustar runnerdocker#ifndef __LIBXDP_LIBXDP_INTERNAL_H #define __LIBXDP_LIBXDP_INTERNAL_H #include #include #include #include #include #include #include #define LIBXDP_HIDE_SYMBOL __attribute__((visibility("hidden"))) #define __unused __attribute__((unused)) #define __printf(a, b) __attribute__((format(printf, a, b))) static inline int try_snprintf(char *buf, size_t buf_len, const char *format, ...) { va_list args; int len; va_start(args, format); len = vsnprintf(buf, buf_len, format, args); va_end(args); if (len < 0) return -EINVAL; else if ((size_t)len >= buf_len) return -ENAMETOOLONG; return 0; } LIBXDP_HIDE_SYMBOL __printf(2, 3) void libxdp_print(enum libxdp_print_level level, const char *format, ...); #define __pr(level, fmt, ...) \ do { \ libxdp_print(level, "libxdp: " fmt, ##__VA_ARGS__); \ } while (0) #define pr_warn(fmt, ...) __pr(LIBXDP_WARN, fmt, ##__VA_ARGS__) #define pr_info(fmt, ...) __pr(LIBXDP_INFO, fmt, ##__VA_ARGS__) #define pr_debug(fmt, ...) __pr(LIBXDP_DEBUG, fmt, ##__VA_ARGS__) LIBXDP_HIDE_SYMBOL int check_xdp_prog_version(const struct btf *btf, const char *name, __u32 *version); LIBXDP_HIDE_SYMBOL int libxdp_check_kern_compat(void); #define min(x, y) ((x) < (y) ? x : y) #define max(x, y) ((x) > (y) ? x : y) #ifndef offsetof #define offsetof(type, member) ((size_t) & ((type *)0)->member) #endif #ifndef offsetofend #define offsetofend(TYPE, FIELD) (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD)) #endif #ifndef container_of #define container_of(ptr, type, member) \ ({ \ const typeof(((type *)0)->member) *__mptr = (ptr); \ (type *)((char *)__mptr - offsetof(type, member)); \ }) #endif #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) /* OPTS macros, from libbpf_internal.h */ static inline bool libxdp_is_mem_zeroed(const char *obj, size_t off_start, size_t off_end) { const char *p; for (p = obj + off_start; p < obj + off_end; p++) { if (*p) return false; } return true; } static inline bool libxdp_validate_opts(const char *opts, size_t opts_sz, size_t user_sz, const char *type_name) { if (user_sz < sizeof(size_t)) { pr_warn("%s size (%zu) is too small\n", type_name, user_sz); return false; } if (!libxdp_is_mem_zeroed(opts, opts_sz, user_sz)) { pr_warn("%s has non-zero extra bytes\n", type_name); return false; } return true; } #define OPTS_VALID(opts, type) \ (!(opts) || libxdp_validate_opts((const char *)opts, \ offsetofend(struct type, \ type##__last_field), \ (opts)->sz, #type)) #define OPTS_HAS(opts, field) \ ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field)) #define OPTS_GET(opts, field, fallback_value) \ (OPTS_HAS(opts, field) ? (opts)->field : fallback_value) #define OPTS_SET(opts, field, value) \ do { \ if (OPTS_HAS(opts, field)) \ (opts)->field = value; \ } while (0) #define OPTS_ZEROED(opts, last_nonzero_field) \ (!(opts) || libxdp_is_mem_zeroed((const void *)opts, \ offsetofend(typeof(*(opts)), \ last_nonzero_field), \ (opts)->sz)) /* handle direct returned errors */ static inline int libxdp_err(int ret) { if (ret < 0) errno = -ret; return ret; } /* handle error for pointer-returning APIs, err is assumed to be < 0 always */ static inline void *libxdp_err_ptr(int err, bool ret_null) { /* set errno on error, this doesn't break anything */ errno = -err; if (ret_null) return NULL; /* legacy: encode err as ptr */ return ERR_PTR(err); } LIBXDP_HIDE_SYMBOL int xdp_lock_acquire(void); LIBXDP_HIDE_SYMBOL int xdp_lock_release(int lock_fd); LIBXDP_HIDE_SYMBOL int xdp_attach_fd(int prog_fd, int old_fd, int ifindex, enum xdp_attach_mode mode); #endif /* __LIBXDP_LIBXDP_INTERNAL_H */ xdp-tools-1.5.4/lib/libxdp/README.org0000644000175100001660000005443015003640462016521 0ustar runnerdocker#+EXPORT_FILE_NAME: libxdp #+TITLE: libxdp #+OPTIONS: ^:nil #+MAN_CLASS_OPTIONS: :section-id "3\" \"DATE\" \"VERSION\" \"libxdp - library for loading XDP programs" # This file serves both as a README on github, and as the source for the man # page; the latter through the org-mode man page export support. # . # To export the man page, simply use the org-mode exporter; (require 'ox-man) if # it's not available. There's also a Makefile rule to export it. * libxdp - library for attaching XDP programs and using AF_XDP sockets This directory contains the files for the =libxdp= library for attaching XDP programs to network interfaces and using AF_XDP sockets. The library is fairly lightweight and relies on =libbpf= to do the heavy lifting for processing eBPF object files etc. =Libxdp= provides two primary features on top of =libbpf=. The first is the ability to load multiple XDP programs in sequence on a single network device (which is not natively supported by the kernel). This support relies on the =freplace= functionality in the kernel, which makes it possible to attach an eBPF program as a replacement for a global function in another (already loaded) eBPF program. The second main feature is helper functions for configuring AF_XDP sockets as well as reading and writing packets from these sockets. Some of the functionality provided by libxdp depends on particular kernel features; see the "Kernel feature compatibility" section below for details. ** Using libxdp from an application Basic usage of libxdp from an application is quite straight forward. The following example loads, then unloads, an XDP program from the 'lo' interface: #+begin_src C #define IFINDEX 1 struct xdp_program *prog; int err; prog = xdp_program__open_file("my-program.o", "section_name", NULL); err = xdp_program__attach(prog, IFINDEX, XDP_MODE_NATIVE, 0); if (!err) xdp_program__detach(prog, IFINDEX, XDP_MODE_NATIVE, 0); xdp_program__close(prog); #+end_src The =xdp_program= structure is an opaque structure that represents a single XDP program. =libxdp= contains functions to create such a struct either from a BPF object file on disk, from a =libbpf= BPF object, or from an identifier of a program that is already loaded into the kernel: #+begin_src C struct xdp_program *xdp_program__from_bpf_obj(struct bpf_object *obj, const char *section_name); struct xdp_program *xdp_program__find_file(const char *filename, const char *section_name, struct bpf_object_open_opts *opts); struct xdp_program *xdp_program__open_file(const char *filename, const char *section_name, struct bpf_object_open_opts *opts); struct xdp_program *xdp_program__from_fd(int fd); struct xdp_program *xdp_program__from_id(__u32 prog_id); struct xdp_program *xdp_program__from_pin(const char *pin_path); #+end_src The functions that open a BPF object or file need the function name of the XDP program as well as the file name or object, since an ELF file can contain multiple XDP programs. The =xdp_program__find_file()= function takes a filename without a path, and will look for the object in =LIBXDP_OBJECT_PATH= which defaults to =/usr/lib/bpf= (or =/usr/lib64/bpf= on systems using a split library path). This is convenient for applications shipping pre-compiled eBPF object files. The =xdp_program__attach()= function will attach the program to an interface, building a dispatcher program to execute it. Multiple programs can be attached at once with =xdp_program__attach_multi()=; they will be sorted in order of their run priority, and execution from one program to the next will proceed based on the chain call actions defined for each program (see the *Program metadata* section below). Because the loading process involves modifying the attach type of the program, the attach functions only work with =struct xdp_program= objects that have not yet been loaded into the kernel. When using the attach functions to attach to an interface that already has an XDP program loaded, libxdp will attempt to add the program to the list of loaded programs. However, this may fail, either due to missing kernel support, or because the already-attached program was not loaded using a dispatcher compatible with libxdp. If the kernel support for incremental attach (merged in kernel 5.10) is missing, the only way to actually run multiple programs on a single interface is to attach them all at the same time with =xdp_program__attach_multi()=. If the existing program is not an XDP dispatcher, that program will have to be detached from the interface before libxdp can attach a new one. This can be done by calling =xdp_program__detach()= with a reference to the loaded program; but note that this will of course break any application relying on that other XDP program to be present. * Program metadata To support multiple XDP programs on the same interface, libxdp uses two pieces of metadata for each XDP program: Run priority and chain call actions. *** Run priority This is the priority of the program and is a simple integer used to sort programs when loading multiple programs onto the same interface. Programs that wish to run early (such as a packet filter) should set low values for this, while programs that want to run later (such as a packet forwarder or counter) should set higher values. Note that later programs are only run if the previous programs end with a return code that is part of its chain call actions (see below). If not specified, the default priority value is 50. *** Chain call actions These are the program return codes that the program indicate for packets that should continue processing. If the program returns one of these actions, later programs in the call chain will be run, whereas if it returns any other action, processing will be interrupted, and the XDP dispatcher will return the verdict immediately. If not set, this defaults to just XDP_PASS, which is likely the value most programs should use. *** Specifying metadata The metadata outlined above is specified as BTF information embedded in the ELF file containing the XDP program. The =xdp_helpers.h= file shipped with libxdp contains helper macros to include this information, which can be used as follows: #+begin_src C #include #include struct { __uint(priority, 10); __uint(XDP_PASS, 1); __uint(XDP_DROP, 1); } XDP_RUN_CONFIG(my_xdp_func); #+end_src This example specifies that the XDP program in =my_xdp_func= should have priority 10 and that its chain call actions are =XDP_PASS= and =XDP_DROP=. In a source file with multiple XDP programs in the same file, a definition like the above can be included for each program (main XDP function). Any program that does not specify any config information will use the default values outlined above. *** Inspecting and modifying metadata =libxdp= exposes the following functions that an application can use to inspect and modify the metadata on an XDP program. Modification is only possible before a program is attached on an interface. These functions won't modify the BTF information itself, but the new values will be stored as part of the program attachment. #+begin_src C unsigned int xdp_program__run_prio(const struct xdp_program *xdp_prog); int xdp_program__set_run_prio(struct xdp_program *xdp_prog, unsigned int run_prio); bool xdp_program__chain_call_enabled(const struct xdp_program *xdp_prog, enum xdp_action action); int xdp_program__set_chain_call_enabled(struct xdp_program *prog, unsigned int action, bool enabled); int xdp_program__print_chain_call_actions(const struct xdp_program *prog, char *buf, size_t buf_len); #+end_src * The dispatcher program To support multiple non-offloaded programs on the same network interface, =libxdp= uses a *dispatcher program* which is a small wrapper program that will call each component program in turn, expect the return code, and then chain call to the next program based on the chain call actions of the previous program (see the *Program metadata* section above). While applications using =libxdp= do not need to know the details of the dispatcher program to just load an XDP program unto an interface, =libxdp= does expose the dispatcher and its attached component programs, which can be used to list the programs currently attached to an interface. The structure used for this is =struct xdp_multiprog=, which can only be constructed from the programs loaded on an interface based on ifindex. The API for getting a multiprog reference and iterating through the attached programs looks like this: #+begin_src C struct xdp_multiprog *xdp_multiprog__get_from_ifindex(int ifindex); struct xdp_program *xdp_multiprog__next_prog(const struct xdp_program *prog, const struct xdp_multiprog *mp); void xdp_multiprog__close(struct xdp_multiprog *mp); int xdp_multiprog__detach(struct xdp_multiprog *mp, int ifindex); enum xdp_attach_mode xdp_multiprog__attach_mode(const struct xdp_multiprog *mp); struct xdp_program *xdp_multiprog__main_prog(const struct xdp_multiprog *mp); struct xdp_program *xdp_multiprog__hw_prog(const struct xdp_multiprog *mp); bool xdp_multiprog__is_legacy(const struct xdp_multiprog *mp); #+end_src If a non-offloaded program is attached to the interface which =libxdp= doesn't recognise as a dispatcher program, an =xdp_multiprog= structure will still be returned, and =xdp_multiprog__is_legacy()= will return true for that program (note that this also holds true if only an offloaded program is loaded). A reference to that (regular) XDP program can be obtained by =xdp_multiprog__main_prog()=. If the program attached to the interface *is* a dispatcher program, =xdp_multiprog__main_prog()= will return a reference to the dispatcher program itself, which is mainly useful for obtaining other data about that program (such as the program ID). A reference to an offloaded program can be acquired using =xdp_multiprog_hw_prog()=. Function =xdp_multiprog__attach_mode()= returns the attach mode of the non-offloaded program, whether an offloaded program is attached should be checked through =xdp_multiprog_hw_prog()=. ** Pinning in bpffs The kernel will automatically detach component programs from the dispatcher once the last reference to them disappears. To prevent this from happening, =libxdp= will pin the component program references in =bpffs= before attaching the dispatcher to the network interface. The pathnames generated for pinning is as follows: - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID - dispatcher program for IFINDEX with BPF program ID DID - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog0-prog - component program 0, program reference - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog0-link - component program 0, bpf_link reference - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog1-prog - component program 1, program reference - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog1-link - component program 1, bpf_link reference - etc, up to ten component programs If set, the =LIBXDP_BPFFS= environment variable will override the location of =bpffs=, but the =xdp= subdirectory is always used. If no =bpffs= is mounted, libxdp will consult the environment variable =LIBXDP_BPFFS_AUTOMOUNT=. If this is set to =1=, libxdp will attempt to automount a bpffs. If not, libxdp will fall back to loading a single program without a dispatcher, as if the kernel did not support the features needed for multiprog attachment. * Using AF_XDP sockets Libxdp implements helper functions for configuring AF_XDP sockets as well as reading and writing packets from these sockets. AF_XDP sockets can be used to redirect packets to user-space at high rates from an XDP program. Note that this functionality used to reside in libbpf, but has now been moved over to libxdp as it is a better fit for this library. As of the 1.0 release of libbpf, the AF_XDP socket support will be removed and all future development will be performed in libxdp instead. For an overview of AF_XDP sockets, please refer to this Linux Plumbers paper (http://vger.kernel.org/lpc_net2018_talks/lpc18_pres_af_xdp_perf-v3.pdf) and the documentation in the Linux kernel (Documentation/networking/af_xdp.rst or https://www.kernel.org/doc/html/latest/networking/af_xdp.html). For an example on how to use the interface, take a look at the AF_XDP-example and AF_XDP-forwarding programs in the bpf-examples repository: https://github.com/xdp-project/bpf-examples. ** Control path Libxdp provides helper functions for creating and destroying umems and sockets as shown below. The first thing that a user generally wants to do is to create a umem area. This is the area that will contain all packets received and the ones that are going to be sent. After that, AF_XDP sockets can be created tied to this umem. These can either be sockets that have exclusive ownership of that umem through xsk_socket__create() or shared with other sockets using xsk_socket__create_shared. There is one option called XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD that can be set in the libxdp_flags field (also called libbpf_flags for compatibility reasons). This will make libxdp not load any XDP program or set and BPF maps which is a must if users want to add their own XDP program. If there is already a socket created with socket(AF_XDP, SOCK_RAW, 0) not bound and not tied to any umem, file descriptor of this socket can be used in param opts of xsk_umem__create_opts(), which is a recommended way of umem creation. #+begin_src C struct xsk_umem *xsk_umem__create_opts(void *umem_area, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, struct xsk_umem_opts *opts); int xsk_umem__create(struct xsk_umem **umem, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *config); int xsk_umem__create_with_fd(struct xsk_umem **umem, int fd, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *config); int xsk_socket__create(struct xsk_socket **xsk, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *config); int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_socket_config *config); int xsk_umem__delete(struct xsk_umem *umem); void xsk_socket__delete(struct xsk_socket *xsk); #+end_src There are also two helper function to get the file descriptor of a umem or a socket. These are needed when using standard Linux syscalls such as poll(), recvmsg(), sendto(), etc. #+begin_src C int xsk_umem__fd(const struct xsk_umem *umem); int xsk_socket__fd(const struct xsk_socket *xsk); #+end_src The control path also provides two APIs for setting up AF_XDP sockets when the process that is going to use the AF_XDP socket is non-privileged. These two functions perform the operations that require privileges and can be executed from some form of control process that has the necessary privileges. The xsk_socket__create executed on the non-privileged process will then skip these two steps. For an example on how to use these, please take a look at the AF_XDP-example program in the bpf-examples repository: https://github.com/xdp-project/bpf-examples/tree/main/AF_XDP-example. #+begin_src C int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd); int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd); #+end_src To further reduce required level of privileges, an AF_XDP socket can be created beforehand with socket(AF_XDP, SOCK_RAW, 0) and passed to a non-privileged process. This socket can be used in xsk_umem__create_opts() and later in xsk_socket__create() with created umem. xsk_socket__create_shared() would still require privileges for AF_XDP socket creation. ** Data path For performance reasons, all the data path functions are static inline functions found in the xsk.h header file so they can be optimized into the target application binary for best possible performance. There are four FIFO rings of two main types: producer rings (fill and Tx) and consumer rings (Rx and completion). The producer rings use xsk_ring_prod functions and consumer rings use xsk_ring_cons functions. For producer rings, you start with =reserving= one or more slots in a producer ring and then when they have been filled out, you =submit= them so that the kernel will act on them. For a consumer ring, you =peek= if there are any new packets in the ring and if so you can read them from the ring. Once you are done reading them, you =release= them back to the kernel so it can use them for new packets. There is also a =cancel= operation for consumer rings if the application does not want to consume all packets received with the peek operation. #+begin_src C __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx); void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb); __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx); void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb); void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb); #+end_src The functions below are used for reading and writing the descriptors of the rings. xsk_ring_prod__fill_addr() and xsk_ring_prod__tx_desc() *writes* entries in the fill and Tx rings respectively, while xsk_ring_cons__comp_addr and xsk_ring_cons__rx_desc *reads* entries from the completion and Rx rings respectively. The =idx= is the parameter returned in the xsk_ring_prod__reserve or xsk_ring_cons__peek calls. To advance to the next entry, simply do =idx++=. #+begin_src C __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, __u32 idx); struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, __u32 idx); const __u64 *xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx); const struct xdp_desc *xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx); #+end_src The xsk_umem functions are used to get a pointer to the packet data itself, always located inside the umem. In the default aligned mode, you can get the addr variable straight from the Rx descriptor. But in unaligned mode, you need to use the three last function below as the offset used is carried in the upper 16 bits of the addr. Therefore, you cannot use the addr straight from the descriptor in the unaligned case. #+begin_src C void *xsk_umem__get_data(void *umem_area, __u64 addr); __u64 xsk_umem__extract_addr(__u64 addr); __u64 xsk_umem__extract_offset(__u64 addr); __u64 xsk_umem__add_offset_to_addr(__u64 addr); #+end_src There is one more function in the data path and that checks if the need_wakeup flag is set. Use of this flag is highly encouraged and should be enabled by setting =XDP_USE_NEED_WAKEUP= bit in the =xdp_bind_flags= field that is provided to the xsk_socket_create_[shared]() calls. If this function returns true, then you need to call =recvmsg()=, =sendto()=, or =poll()= depending on the situation. =recvmsg()= if you are *receiving*, or =sendto()= if you are *sending*. =poll()= can be used for both cases and provide the ability to sleep too, as with any other socket. But note that poll is a slower operation than the other two. #+begin_src C int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r); #+end_src For an example on how to use all these APIs, take a look at the AF_XDP-example and AF_XDP-forwarding programs in the bpf-examples repository: https://github.com/xdp-project/bpf-examples. * Kernel and BPF program feature compatibility The features exposed by libxdp relies on certain kernel versions and BPF features to work. To get the full benefit of all features, libxdp needs to be used with kernel 5.10 or newer, unless the commits mentioned below have been backported. However, libxdp will probe the kernel and transparently fall back to legacy loading procedures, so it is possible to use the library with older versions, although some features will be unavailable, as detailed below. The ability to attach multiple BPF programs to a single interface relies on the kernel "BPF program extension" feature which was introduced by commit be8704ff07d2 ("bpf: Introduce dynamic program extensions") in the upstream kernel and first appeared in kernel release 5.6. To *incrementally* attach multiple programs, a further refinement added by commit 4a1e7c0c63e0 ("bpf: Support attaching freplace programs to multiple attach points") is needed; this first appeared in the upstream kernel version 5.10. The functionality relies on the "BPF trampolines" feature which is unfortunately only available on the x86_64 architecture. In other words, kernels before 5.6 can only attach a single XDP program to each interface, kernels 5.6+ can attach multiple programs if they are all attached at the same time, and kernels 5.10 have full support for XDP multiprog on x86_64. On other architectures, only a single program can be attached to each interface. To load AF_XDP programs, kernel support for AF_XDP sockets needs to be included and enabled in the kernel build. In addition, when using AF_XDP sockets, an XDP program is also loaded on the interface. The XDP program used for this by libxdp requires the ability to do map lookups into XSK maps, which was introduced with commit fada7fdc83c0 ("bpf: Allow bpf_map_lookup_elem() on an xskmap") in kernel 5.3. This means that the minimum required kernel version for using AF_XDP is kernel 5.3; however, for the AF_XDP XDP program to co-exist with other programs, the same constraints for multiprog applies as outlined above. Note that some Linux distributions backport features to earlier kernel versions, especially in enterprise kernels; for instance, Red Hat Enterprise Linux kernels include everything needed for libxdp to function since RHEL 8.5. Finally, XDP programs loaded using the multiprog facility must include type information (using the BPF Type Format, BTF). To get this, compile the programs with a recent version of Clang/LLVM (version 10+), and enable debug information when compiling (using the =-g= option). * BUGS Please report any bugs on Github: https://github.com/xdp-project/xdp-tools/issues * AUTHORS libxdp and this man page were written by Toke Høiland-Jørgensen. AF_XDP support and documentation was contributed by Magnus Karlsson. xdp-tools-1.5.4/lib/libxdp/tests/0000755000175100001660000000000015003640462016207 5ustar runnerdockerxdp-tools-1.5.4/lib/libxdp/tests/check_kern_compat.c0000644000175100001660000000032615003640462022013 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include "test_utils.h" #include "../libxdp_internal.h" int main(__unused int argc, __unused char** argv) { silence_libbpf_logging(); return libxdp_check_kern_compat(); } xdp-tools-1.5.4/lib/libxdp/tests/test_xsk_non_privileged.c0000644000175100001660000001163115003640462023305 0ustar runnerdocker// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) #include #include #include #include #include #include #include #include #include "test_utils.h" #include #include #define NUM_DESCS ((XSK_RING_PROD__DEFAULT_NUM_DESCS \ + XSK_RING_CONS__DEFAULT_NUM_DESCS) * 2) #define UMEM_SIZE (NUM_DESCS * XSK_UMEM__DEFAULT_FRAME_SIZE) static void run_privileged_operations(int ifindex, int queue_id, int *sock_fd) { int xsks_map_fd = -1; if (xsk_setup_xdp_prog(ifindex, &xsks_map_fd) || xsks_map_fd < 0) { perror("xsk_setup_xdp_prog failed"); exit(EXIT_FAILURE); } *sock_fd = socket(AF_XDP, SOCK_RAW, 0); if (*sock_fd < 0) { perror("socket(AF_XDP, ...) failed"); exit(EXIT_FAILURE); } /* This call requires extra capabilities in older kernels, so keeping * it in a privileged section. And it's not supported on even older * kernels, so not failing if that's the case. */ if (bpf_map_update_elem(xsks_map_fd, &queue_id, sock_fd, 0) && errno != EOPNOTSUPP) { perror("bpf_map_update_elem failed"); exit(EXIT_FAILURE); } close(xsks_map_fd); } static void update_rlimit_memlock(void) { struct rlimit rlim = { .rlim_cur = UMEM_SIZE, .rlim_max = UMEM_SIZE }; if (setrlimit(RLIMIT_MEMLOCK, &rlim)) { perror("setrlimit(RLIMIT_MEMLOCK) failed"); exit(EXIT_FAILURE); } } static void drop_capabilities(void) { if (capng_get_caps_process()) { perror("capng_get_caps_process failed"); exit(EXIT_FAILURE); } capng_clear(CAPNG_SELECT_BOTH); if (capng_apply(CAPNG_SELECT_BOTH)) { perror("capng_apply failed"); exit(EXIT_FAILURE); } } static void run_non_privileged_preconfig(const char *ifname, const char *ifname2, int sock_fd) { /* This call requires CAP_NET_RAW on kernels older than 5.7, * so not checking the result. It may fail or not, we do not * rely on that much. */ setsockopt(sock_fd, SOL_SOCKET, SO_BINDTODEVICE, ifname, strlen(ifname)); /* The second update should always fail because it always * requires CAP_NET_RAW. */ if (!setsockopt(sock_fd, SOL_SOCKET, SO_BINDTODEVICE, ifname2, strlen(ifname2))) { perror("setsockopt(SO_BINDTODEVICE, ifname2) succeeded"); exit(EXIT_FAILURE); } } static struct xsk_umem *create_umem_non_privileged(int sock_fd) { struct xsk_umem *umem = NULL; struct xsk_ring_cons cq; struct xsk_ring_prod fq; void *b; if (posix_memalign(&b, getpagesize(), UMEM_SIZE)) { perror("posix_memalign failed"); exit(EXIT_FAILURE); } /* This variant requires CAP_NET_RAW, so should fail. */ DECLARE_LIBXDP_OPTS(xsk_umem_opts, opts_cap, .size = UMEM_SIZE, .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE, ); umem = xsk_umem__create_opts(b, &fq, &cq, &opts_cap); if (umem) { perror("xsk_umem__create_opts succeeded"); exit(EXIT_FAILURE); } /* This variant shouldn't need any capabilities, so should pass. */ DECLARE_LIBXDP_OPTS(xsk_umem_opts, opts, .fd = sock_fd, .size = UMEM_SIZE, .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE, ); umem = xsk_umem__create_opts(b, &fq, &cq, &opts); if (!umem) { perror("xsk_umem__create_opts failed"); exit(EXIT_FAILURE); } return umem; } static struct xsk_socket *create_xsk_non_privileged(const char *ifname, struct xsk_umem *umem, int queue_id) { struct xsk_socket *xsk = NULL; struct xsk_ring_cons rx; struct xsk_ring_prod tx; DECLARE_LIBXDP_OPTS(xsk_socket_opts, opts, .rx = &rx, .tx = &tx, .rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, .tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .libxdp_flags = XSK_LIBXDP_FLAGS__INHIBIT_PROG_LOAD, .bind_flags = XDP_USE_NEED_WAKEUP, .xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST, ); xsk = xsk_socket__create_opts(ifname, queue_id, umem, &opts); if (!xsk) { perror("xsk_socket__create_opts failed"); exit(EXIT_FAILURE); } return xsk; } int main(int argc, const char *argv[]) { const char *ifname, *ifname2; struct xsk_socket *xsk; struct xsk_umem *umem; int ifindex, queue_id; int sock_fd; silence_libbpf_logging(); if (argc < 3) { printf("Usage: %s \n", argv[0]); exit(EXIT_FAILURE); } update_rlimit_memlock(); ifname = argv[1]; ifname2 = argv[2]; queue_id = 0; ifindex = if_nametoindex(ifname); if (!ifindex) { perror("if_nametoindex(ifname) failed"); exit(EXIT_FAILURE); } if (!if_nametoindex(ifname2)) { perror("if_nametoindex(ifname2) failed"); exit(EXIT_FAILURE); } run_privileged_operations(ifindex, queue_id, &sock_fd); drop_capabilities(); run_non_privileged_preconfig(ifname, ifname2, sock_fd); umem = create_umem_non_privileged(sock_fd); xsk = create_xsk_non_privileged(ifname, umem, queue_id); xsk_socket__delete(xsk); return EXIT_SUCCESS; } xdp-tools-1.5.4/lib/libxdp/tests/test_xsk_umem_flags.c0000644000175100001660000000650215003640462022421 0ustar runnerdocker// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) #include #include #include #include #include #include #include #include #include "test_utils.h" #include #include #define NUM_DESCS ((XSK_RING_PROD__DEFAULT_NUM_DESCS \ + XSK_RING_CONS__DEFAULT_NUM_DESCS) * 2) #define UMEM_SIZE (NUM_DESCS * XSK_UMEM__DEFAULT_FRAME_SIZE) static void update_rlimit_memlock(void) { struct rlimit rlim = { .rlim_cur = UMEM_SIZE, .rlim_max = UMEM_SIZE }; if (setrlimit(RLIMIT_MEMLOCK, &rlim)) { perror("setrlimit(RLIMIT_MEMLOCK) failed"); exit(EXIT_FAILURE); } } static struct xsk_umem *create_umem_with_flags() { struct xsk_umem *umem = NULL; struct xsk_ring_cons cq; struct xsk_ring_prod fq; void *b; if (posix_memalign(&b, getpagesize(), UMEM_SIZE)) { perror("posix_memalign failed"); exit(EXIT_FAILURE); } /* This variant uses a frame_size that is not a power of 2 without * flags, should fail. */ DECLARE_LIBXDP_OPTS(xsk_umem_opts, opts_no_flags, .size = UMEM_SIZE - 1, .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE - 1, ); umem = xsk_umem__create_opts(b, &fq, &cq, &opts_no_flags); if (umem) { perror("xsk_umem__create_opts with odd frame_size " "unexpectedly succeeded"); exit(EXIT_FAILURE); } /* This variant uses a frame_size that is not a power of 2 with flags, * should succeed. * * A failure here may indicate a mismatch in struct xdp_umem_reg * between user space and kernel space, and that fall back processing * is happening in the kernel. (Ref: LP: #2098005 and PR #477). */ DECLARE_LIBXDP_OPTS(xsk_umem_opts, opts, .size = UMEM_SIZE - 1, .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE - 1, .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG, ); umem = xsk_umem__create_opts(b, &fq, &cq, &opts); if (!umem) { perror("xsk_umem__create_opts failed"); exit(EXIT_FAILURE); } return umem; } static struct xsk_socket *create_xsk(const char *ifname, struct xsk_umem *umem, int queue_id) { struct xsk_socket *xsk = NULL; struct xsk_ring_cons rx; struct xsk_ring_prod tx; DECLARE_LIBXDP_OPTS(xsk_socket_opts, opts, .rx = &rx, .tx = &tx, .rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, .tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .libxdp_flags = XSK_LIBXDP_FLAGS__INHIBIT_PROG_LOAD, .bind_flags = XDP_USE_NEED_WAKEUP, .xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST, ); xsk = xsk_socket__create_opts(ifname, queue_id, umem, &opts); if (!xsk) { perror("xsk_socket__create_opts failed"); exit(EXIT_FAILURE); } return xsk; } int main(int argc, const char *argv[]) { struct xsk_socket *xsk; struct xsk_umem *umem; int ifindex, queue_id; const char *ifname; silence_libbpf_logging(); if (argc < 2) { printf("Usage: %s \n", argv[0]); exit(EXIT_FAILURE); } update_rlimit_memlock(); ifname = argv[1]; queue_id = 0; ifindex = if_nametoindex(ifname); if (!ifindex) { perror("if_nametoindex(ifname) failed"); exit(EXIT_FAILURE); } umem = create_umem_with_flags(); xsk = create_xsk(ifname, umem, queue_id); xsk_socket__delete(xsk); return EXIT_SUCCESS; } xdp-tools-1.5.4/lib/libxdp/tests/xdp_pass.c0000644000175100001660000000031615003640462020174 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include #include SEC("xdp") int xdp_pass(struct xdp_md *ctx) { return XDP_PASS; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/lib/libxdp/tests/test_dispatcher_versions.c0000644000175100001660000001633715003640462023502 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include "test_utils.h" #include "../libxdp_internal.h" #include "xdp_dispatcher_v1.h" #include #include #include #ifndef PATH_MAX #define PATH_MAX 4096 #endif #define BPFFS_DIR "/sys/fs/bpf/xdp" #define PROG_RUN_PRIO 42 #define PROG_CHAIN_CALL_ACTIONS (1 << XDP_DROP) int get_prog_id(int prog_fd) { struct bpf_prog_info info = {}; __u32 len = sizeof(info); int err; err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); if (err) return -errno; return info.id; } int load_dispatcher_v1(int ifindex) { struct xdp_dispatcher_config_v1 dispatcher_config = {}; struct bpf_object *obj_dispatcher, *obj_prog = NULL; DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); struct bpf_program *dispatcher_prog, *xdp_prog; int ret, btf_id, lfd = -1, dispatcher_id; char pin_path[PATH_MAX], buf[PATH_MAX]; const char *attach_func = "prog0"; struct bpf_map *map; if (!ifindex) return -ENOENT; obj_dispatcher = bpf_object__open("xdp_dispatcher_v1.o"); if (!obj_dispatcher) return -errno; btf_id = btf__find_by_name_kind(bpf_object__btf(obj_dispatcher), attach_func, BTF_KIND_FUNC); if (btf_id <= 0) { ret = -ENOENT; goto out; } opts.target_btf_id = btf_id; map = bpf_object__next_map(obj_dispatcher, NULL); if (!map) { ret = -ENOENT; goto out; } dispatcher_prog = bpf_object__find_program_by_name(obj_dispatcher, "xdp_dispatcher"); if (!dispatcher_prog) { ret = -errno; goto out; } dispatcher_config.num_progs_enabled = 1; dispatcher_config.chain_call_actions[0] = PROG_CHAIN_CALL_ACTIONS; dispatcher_config.run_prios[0] = PROG_RUN_PRIO; ret = bpf_map__set_initial_value(map, &dispatcher_config, sizeof(dispatcher_config)); if (ret) goto out; ret = bpf_object__load(obj_dispatcher); if (ret) goto out; dispatcher_id = get_prog_id(bpf_program__fd(dispatcher_prog)); if (dispatcher_id < 0) { ret = dispatcher_id; goto out; } obj_prog = bpf_object__open("xdp_pass.o"); if (!obj_prog) { ret = -errno; goto out; } xdp_prog = bpf_object__find_program_by_name(obj_prog, "xdp_pass"); if (!xdp_prog) { ret = -errno; goto out; } ret = bpf_program__set_attach_target(xdp_prog, bpf_program__fd(dispatcher_prog), attach_func); if (ret) goto out; bpf_program__set_type(xdp_prog, BPF_PROG_TYPE_EXT); bpf_program__set_expected_attach_type(xdp_prog, 0); ret = bpf_object__load(obj_prog); if (ret) goto out; lfd = bpf_link_create(bpf_program__fd(xdp_prog), bpf_program__fd(dispatcher_prog), 0, &opts); if (lfd < 0) { ret = -errno; goto out; } ret = try_snprintf(pin_path, sizeof(pin_path), "%s/dispatch-%d-%d", BPFFS_DIR, ifindex, dispatcher_id); if (ret) goto out; ret = mkdir(BPFFS_DIR, S_IRWXU); if (ret && errno != EEXIST) { ret = -errno; printf("mkdir err (%s): %s\n", BPFFS_DIR, strerror(-ret)); goto out; } ret = mkdir(pin_path, S_IRWXU); if (ret) { ret = -errno; printf("mkdir err (%s): %s\n", pin_path, strerror(-ret)); goto out; } ret = try_snprintf(buf, sizeof(buf), "%s/prog0-link", pin_path); if (ret) goto err_unpin; ret = bpf_obj_pin(lfd, buf); if (ret) goto err_unpin; ret = try_snprintf(buf, sizeof(buf), "%s/prog0-prog", pin_path); if (ret) goto err_unpin; ret = bpf_obj_pin(bpf_program__fd(xdp_prog), buf); if (ret) goto err_unpin; ret = xdp_attach_fd(bpf_program__fd(dispatcher_prog), -1, ifindex, XDP_MODE_NATIVE); if (ret) goto err_unpin; out: if (lfd >= 0) close(lfd); bpf_object__close(obj_dispatcher); bpf_object__close(obj_prog); return ret; err_unpin: if (!try_snprintf(buf, sizeof(buf), "%s/prog0-link", pin_path)) unlink(buf); if (!try_snprintf(buf, sizeof(buf), "%s/prog0-prog", pin_path)) unlink(buf); rmdir(pin_path); goto out; } int check_old_dispatcher(int ifindex) { struct xdp_multiprog *mp = NULL; struct xdp_program *xdp_prog; char buf[100]; int ret; ret = load_dispatcher_v1(ifindex); if (ret) goto out; mp = xdp_multiprog__get_from_ifindex(ifindex); ret = libxdp_get_error(mp); if (ret) goto out; if (xdp_multiprog__is_legacy(mp)) { printf("Got unexpected legacy multiprog\n"); ret = -EINVAL; goto out; } if (xdp_multiprog__program_count(mp) != 1) { printf("Expected 1 attached program, got %d\n", xdp_multiprog__program_count(mp)); ret = -EINVAL; goto out; } xdp_prog = xdp_multiprog__next_prog(NULL, mp); if (!xdp_prog) { ret = -errno; goto out; } if (strcmp(xdp_program__name(xdp_prog), "xdp_pass")) { printf("Expected xdp_pass program, got %s\n", xdp_program__name(xdp_prog)); ret = -EINVAL; goto out; } if (xdp_program__run_prio(xdp_prog) != PROG_RUN_PRIO) { printf("Expected run prio %d got %d\n", PROG_RUN_PRIO, xdp_program__run_prio(xdp_prog)); ret = -EINVAL; goto out; } ret = xdp_program__print_chain_call_actions(xdp_prog, buf, sizeof(buf)); if (ret) goto out; if (strcmp(buf, "XDP_DROP")) { printf("Expected actions XDP_PASS, got %s\n", buf); ret = -EINVAL; goto out; } xdp_prog = xdp_program__open_file("xdp_pass.o", "xdp", NULL); ret = libxdp_get_error(xdp_prog); if (ret) goto out; ret = xdp_program__attach(xdp_prog, ifindex, XDP_MODE_NATIVE, 0); xdp_program__close(xdp_prog); if (!ret) { printf("Shouldn't have been able to attach a new program to ifindex!\n"); ret = -EINVAL; goto out; } ret = 0; out: if (mp) xdp_multiprog__detach(mp); xdp_multiprog__close(mp); return ret; } static void usage(char *progname) { fprintf(stderr, "Usage: %s \n", progname); exit(EXIT_FAILURE); } int main(int argc, char **argv) { int ifindex, ret; char *envval; envval = secure_getenv("VERBOSE_TESTS"); silence_libbpf_logging(); if (envval && envval[0] == '1') verbose_libxdp_logging(); else silence_libxdp_logging(); if (argc != 2) usage(argv[0]); ifindex = if_nametoindex(argv[1]); ret = check_old_dispatcher(ifindex); return ret; } xdp-tools-1.5.4/lib/libxdp/tests/xdp_dispatcher_v1.c0000644000175100001660000000174415003640462021770 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include #include #include #include "xdp_dispatcher_v1.h" #define XDP_METADATA_SECTION "xdp_metadata" #define XDP_DISPATCHER_VERSION_V1 1 #define XDP_DISPATCHER_RETVAL 31 static volatile const struct xdp_dispatcher_config_v1 conf = {}; __attribute__ ((noinline)) int prog0(struct xdp_md *ctx) { volatile int ret = XDP_DISPATCHER_RETVAL; if (!ctx) return XDP_ABORTED; return ret; } __attribute__ ((noinline)) SEC("xdp") int xdp_dispatcher(struct xdp_md *ctx) { __u8 num_progs_enabled = conf.num_progs_enabled; int ret; if (num_progs_enabled < 1) goto out; ret = prog0(ctx); if (!((1U << ret) & conf.chain_call_actions[0])) return ret; out: return XDP_PASS; } char _license[] SEC("license") = "GPL"; __uint(dispatcher_version, XDP_DISPATCHER_VERSION_V1) SEC(XDP_METADATA_SECTION); xdp-tools-1.5.4/lib/libxdp/tests/test_xdp_frags.c0000644000175100001660000002064015003640462021371 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include "test_utils.h" #include #include # define ARRAY_SIZE(_x) (sizeof(_x) / sizeof((_x)[0])) static bool kern_compat; static struct xdp_program *load_prog(void) { DECLARE_LIBXDP_OPTS(xdp_program_opts, opts, .prog_name = "xdp_pass", .find_filename = "xdp-dispatcher.o", ); return xdp_program__create(&opts); } static int check_attached_progs(int ifindex, int count, bool frags) { struct xdp_multiprog *mp; int ret; /* If the kernel does not support frags, we always expect * frags support to be disabled on a returned dispatcher */ if (!kern_compat) frags = false; mp = xdp_multiprog__get_from_ifindex(ifindex); ret = libxdp_get_error(mp); if (ret) { fprintf(stderr, "Couldn't get multiprog on ifindex %d: %s\n", ifindex, strerror(-ret)); return ret; } ret = -EINVAL; if (xdp_multiprog__is_legacy(mp)) { fprintf(stderr, "Found legacy prog on ifindex %d\n", ifindex); goto out; } if (xdp_multiprog__program_count(mp) != count) { fprintf(stderr, "Expected %d programs loaded on ifindex %d, found %d\n", count, ifindex, xdp_multiprog__program_count(mp)); goto out; } if (xdp_multiprog__xdp_frags_support(mp) != frags) { fprintf(stderr, "Multiprog on ifindex %d %s frags, expected %s\n", ifindex, xdp_multiprog__xdp_frags_support(mp) ? "supports" : "does not support", frags ? "support" : "no support"); goto out; } ret = 0; out: xdp_multiprog__close(mp); return ret; } static void print_test_result(const char *func, int ret) { fflush(stderr); fprintf(stderr, "%s:\t%s\n", func, ret ? "FAILED" : "PASSED"); fflush(stdout); } static int load_attach_prog(struct xdp_program **prog, int ifindex, bool frags) { int ret; *prog = load_prog(); if (!*prog) { ret = -errno; fprintf(stderr, "Couldn't load program: %s\n", strerror(-ret)); return ret; } ret = xdp_program__set_xdp_frags_support(*prog, frags); if (ret) return ret; return xdp_program__attach(*prog, ifindex, XDP_MODE_NATIVE, 0); } static int _check_load(int ifindex, bool frags, bool should_succeed) { struct xdp_program *prog = NULL; bool attached; int ret; ret = load_attach_prog(&prog, ifindex, frags); attached = !ret; if (attached != should_succeed) { ret = -EINVAL; goto out; } if (should_succeed) ret = check_attached_progs(ifindex, 1, frags); else ret = 0; out: if (attached) xdp_program__detach(prog, ifindex, XDP_MODE_NATIVE, 0); xdp_program__close(prog); return ret; } static int check_load_frags(int ifindex_bigmtu, int ifindex_smallmtu) { int ret = _check_load(ifindex_smallmtu, true, true); if (!ret && ifindex_bigmtu) _check_load(ifindex_bigmtu, true, true); print_test_result(__func__, ret); return ret; } static int check_load_nofrags_success(int ifindex) { int ret = _check_load(ifindex, false, true); print_test_result(__func__, ret); return ret; } static int check_load_nofrags_fail(int ifindex) { int ret = _check_load(ifindex, false, false); print_test_result(__func__, ret); return ret; } static int check_load_frags_multi(int ifindex) { struct xdp_program *prog1 = NULL, *prog2 = NULL; int ret; ret = load_attach_prog(&prog1, ifindex, true); if (ret) goto out; ret = load_attach_prog(&prog2, ifindex, true); if (ret) goto out_prog1; ret = check_attached_progs(ifindex, 2, true); xdp_program__detach(prog2, ifindex, XDP_MODE_NATIVE, 0); out_prog1: xdp_program__detach(prog1, ifindex, XDP_MODE_NATIVE, 0); out: xdp_program__close(prog2); xdp_program__close(prog1); print_test_result(__func__, ret); return ret; } static int check_load_mix_small(int ifindex) { struct xdp_program *prog1 = NULL, *prog2 = NULL; int ret; ret = load_attach_prog(&prog1, ifindex, true); if (ret) goto out; /* First program attached, dispatcher supports frags */ ret = check_attached_progs(ifindex, 1, true); if (ret) goto out; ret = load_attach_prog(&prog2, ifindex, false); if (ret) goto out_prog1; /* Mixed program attachment, dispatcher should not support frags */ ret = check_attached_progs(ifindex, 2, false); ret = xdp_program__detach(prog2, ifindex, XDP_MODE_NATIVE, 0) || ret; if (ret) goto out_prog1; /* Second program removed, back to frags-only */ ret = check_attached_progs(ifindex, 1, true) || ret; out_prog1: xdp_program__detach(prog1, ifindex, XDP_MODE_NATIVE, 0); out: xdp_program__close(prog2); xdp_program__close(prog1); print_test_result(__func__, ret); return ret; } static int check_load_mix_big(int ifindex) { struct xdp_program *prog1 = NULL, *prog2 = NULL; int ret; ret = load_attach_prog(&prog1, ifindex, true); if (ret) goto out; /* First program attached, dispatcher supports frags */ ret = check_attached_progs(ifindex, 1, true); if (ret) goto out; /* Second non-frags program should fail on big-MTU device */ ret = load_attach_prog(&prog2, ifindex, false); if (!ret) { xdp_program__detach(prog2, ifindex, XDP_MODE_NATIVE, 0); ret = -EINVAL; goto out_prog1; } /* Still only a single program loaded, with frags support */ ret = check_attached_progs(ifindex, 1, true); out_prog1: xdp_program__detach(prog1, ifindex, XDP_MODE_NATIVE, 0); out: xdp_program__close(prog2); xdp_program__close(prog1); print_test_result(__func__, ret); return ret; } static bool check_frags_compat(void) { struct xdp_program *test_prog; struct bpf_program *prog; struct bpf_object *obj; bool ret = false; int err; test_prog = load_prog(); if (!test_prog) return false; obj = xdp_program__bpf_obj(test_prog); if (!obj) goto out; prog = bpf_object__find_program_by_name(obj, "xdp_pass"); if (!prog) goto out; bpf_program__set_flags(prog, BPF_F_XDP_HAS_FRAGS); err = bpf_object__load(obj); if (!err) { printf("Kernel supports XDP programs with frags\n"); ret = true; } else { printf("Kernel DOES NOT support XDP programs with frags\n"); } fflush(stdout); out: xdp_program__close(test_prog); return ret; } static void usage(char *progname) { fprintf(stderr, "Usage: %s \n", progname); exit(EXIT_FAILURE); } int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; int ifindex_bigmtu, ifindex_smallmtu, ret; if (setrlimit(RLIMIT_MEMLOCK, &r)) { fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n", strerror(errno)); exit(EXIT_FAILURE); } char *envval; envval = secure_getenv("VERBOSE_TESTS"); silence_libbpf_logging(); if (envval && envval[0] == '1') verbose_libxdp_logging(); else silence_libxdp_logging(); if (argc != 3) usage(argv[0]); ifindex_bigmtu = if_nametoindex(argv[1]); ifindex_smallmtu = if_nametoindex(argv[2]); if (!ifindex_bigmtu || !ifindex_smallmtu) { fprintf(stderr, "Interface '%s' or '%s' not found.\n", argv[1], argv[2]); usage(argv[0]); } kern_compat = check_frags_compat(); ret = check_load_frags(kern_compat ? ifindex_bigmtu : 0, ifindex_smallmtu); ret = check_load_nofrags_success(ifindex_smallmtu) || ret; if (kern_compat) { ret = check_load_nofrags_fail(ifindex_bigmtu) || ret; ret = check_load_frags_multi(ifindex_bigmtu) || ret; ret = check_load_mix_big(ifindex_bigmtu) || ret; } ret = check_load_mix_small(ifindex_smallmtu) || ret; return ret; } xdp-tools-1.5.4/lib/libxdp/tests/test_xsk_refcnt.c0000644000175100001660000001574415003640462021573 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) #include #include #include #include #include #include #include #include #include #include "test_utils.h" #include #include typedef __u64 u64; typedef __u32 u32; typedef __u16 u16; typedef __u8 u8; #define MAX_EVENTS 10 #define MAX_NUM_QUEUES 4 #define TEST_NAME_LENGTH 128 struct xsk_umem_info { struct xsk_ring_prod fq; struct xsk_ring_cons cq; struct xsk_umem *umem; void *buffer; }; struct xsk_socket_info { struct xsk_ring_cons rx; struct xsk_umem_info *umem; struct xsk_socket *xsk; }; /* Event holds socket operations that are run concurrently * and in theory can produce a race condition */ struct xsk_test_event { u32 num_create; u32 num_delete; u32 create_qids[MAX_NUM_QUEUES]; /* QIDs for sockets being created in this event */ u32 delete_qids[MAX_NUM_QUEUES]; /* QIDs for sockets being deleted in this event */ }; struct xsk_test { char name[TEST_NAME_LENGTH]; u32 num_events; struct xsk_test_event events[MAX_EVENTS]; }; /* Tests that use less queues must come first, * so we can run all possible tests on VMs with * small number of CPUs */ static struct xsk_test all_tests[] = { { "Single socket created and deleted", .num_events = 2, .events = {{ .num_create = 1, .create_qids = {0} }, { .num_delete = 1, .delete_qids = {0} } }}, { "2 sockets, created and deleted sequentially", .num_events = 4, .events = {{ .num_create = 1, .create_qids = {0} }, { .num_create = 1, .create_qids = {1} }, { .num_delete = 1, .delete_qids = {0} }, { .num_delete = 1, .delete_qids = {1} } }}, { "2 sockets, created sequentially and deleted asynchronously", .num_events = 3, .events = {{ .num_create = 1, .create_qids = {0} }, { .num_create = 1, .create_qids = {1} }, { .num_delete = 2, .delete_qids = {0, 1} } }}, { "2 sockets, asynchronously delete and create", .num_events = 3, .events = {{ .num_create = 1, .create_qids = {0} }, { .num_create = 1, .create_qids = {1}, .num_delete = 1, .delete_qids = {0} }, { .num_delete = 1, .delete_qids = {1} } }}, { "3 sockets, created and deleted sequentially", .num_events = 6, .events = {{ .num_create = 1, .create_qids = {0} }, { .num_create = 1, .create_qids = {1} }, { .num_create = 1, .create_qids = {2} }, { .num_delete = 1, .delete_qids = {1} }, { .num_delete = 1, .delete_qids = {2} }, { .num_delete = 1, .delete_qids = {0} } }}, }; # define ARRAY_SIZE(_x) (sizeof(_x) / sizeof((_x)[0])) static const char *opt_if; static const u8 num_tests = ARRAY_SIZE(all_tests); static struct xsk_socket_info *xsks[MAX_NUM_QUEUES]; #define FRAME_SIZE 64 #define NUM_FRAMES (XSK_RING_CONS__DEFAULT_NUM_DESCS * 2) static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size) { struct xsk_umem_info *umem; umem = calloc(1, sizeof(*umem)); if (!umem) exit(EXIT_FAILURE); DECLARE_LIBXDP_OPTS(xsk_umem_opts, opts, .size = size, ); umem->umem = xsk_umem__create_opts(buffer, &umem->fq, &umem->cq, &opts); if (!umem->umem) exit(errno); umem->buffer = buffer; return umem; } static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem, unsigned int qid) { struct xsk_socket_info *xsk; struct xsk_ring_cons *rxr; xsk = calloc(1, sizeof(*xsk)); if (!xsk) exit(EXIT_FAILURE); xsk->umem = umem; rxr = &xsk->rx; DECLARE_LIBXDP_OPTS(xsk_socket_opts, opts, .rx = rxr, .rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, ); xsk->xsk = xsk_socket__create_opts(opt_if, qid, umem->umem, &opts); return xsk; } static void *create_socket(void *args) { struct xsk_umem_info *umem; u32 qid = *(u32 *)args; void *buffs; if (posix_memalign(&buffs, getpagesize(), /* PAGE_SIZE aligned */ NUM_FRAMES * FRAME_SIZE)) { fprintf(stderr, "ERROR: Can't allocate buffer memory \"%s\"\n", strerror(errno)); exit(EXIT_FAILURE); } umem = xsk_configure_umem(buffs, NUM_FRAMES * FRAME_SIZE); xsks[qid] = xsk_configure_socket(umem, qid); return NULL; } static void *delete_socket(void *args) { u32 qid = *(u32 *)args; struct xsk_umem *umem; void *buff; buff = xsks[qid]->umem->buffer; umem = xsks[qid]->umem->umem; xsk_socket__delete(xsks[qid]->xsk); free(buff); (void)xsk_umem__delete(umem); return NULL; } static bool xsk_prog_attached(void) { char xsk_prog_name[] = "xsk_def_prog"; int ifindex = if_nametoindex(opt_if); struct xdp_program *xsk_prog; struct xdp_multiprog *mp; bool answer = false; mp = xdp_multiprog__get_from_ifindex(ifindex); if (IS_ERR_OR_NULL(mp)) return false; xsk_prog = xdp_multiprog__is_legacy(mp) ? xdp_multiprog__main_prog(mp) : xdp_multiprog__next_prog(NULL, mp); if (IS_ERR_OR_NULL(xsk_prog)) goto free_mp; answer = !strncmp(xsk_prog_name, xdp_program__name(xsk_prog), sizeof(xsk_prog_name)); free_mp: xdp_multiprog__close(mp); return answer; } static void update_reference_refcnt(struct xsk_test_event *event, int *refcnt) { *refcnt += event->num_create; *refcnt -= event->num_delete; } static bool check_run_event(struct xsk_test_event *event, int *refcnt) { pthread_t threads[MAX_NUM_QUEUES]; bool prog_attached, prog_needed; u8 thread_num = 0, i; int ret; update_reference_refcnt(event, refcnt); for (i = 0; i < event->num_create; i++) { ret = pthread_create(&threads[thread_num++], NULL, &create_socket, &event->create_qids[i]); if (ret) exit(ret); } for (i = 0; i < event->num_delete; i++) { ret = pthread_create(&threads[thread_num++], NULL, &delete_socket, &event->delete_qids[i]); if (ret) exit(ret); } for (i = 0; i < thread_num; i++) pthread_join(threads[i], NULL); prog_attached = xsk_prog_attached(); prog_needed = *refcnt > 0; if (prog_needed != prog_attached) { printf("Program is referenced by %d sockets, but is %s attached\n", *refcnt, prog_attached ? "still" : "not"); return false; } return true; } static bool check_run_test(struct xsk_test *test) { bool test_ok = false; int refcnt = 0; u8 i = 0; for (i = 0; i < test->num_events; i++) { if (!check_run_event(&test->events[i], &refcnt)) { printf("Event %u failed\n", i); goto print_result; } } /* Do not let tests interfere with each other */ sleep(1); test_ok = true; print_result: printf("%s: %s\n", test->name, test_ok ? "PASSED" : "FAILED"); return test_ok; } static int read_args(int argc, char **argv) { if (argc != 2) return -1; opt_if = argv[1]; return 0; } int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; u8 i = 0; if (read_args(argc, argv)) return -1; if (setrlimit(RLIMIT_MEMLOCK, &r)) { fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n", strerror(errno)); exit(EXIT_FAILURE); } silence_libbpf_logging(); for (i = 0; i < num_tests; i++) { if (!check_run_test(&all_tests[i])) exit(EXIT_FAILURE); } return 0; } xdp-tools-1.5.4/lib/libxdp/tests/xdp_dispatcher_v1.h0000644000175100001660000000052115003640462021765 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __XDP_DISPATCHER_V1_H #define __XDP_DISPATCHER_V1_H #ifndef MAX_DISPATCHER_ACTIONS #define MAX_DISPATCHER_ACTIONS 10 #endif struct xdp_dispatcher_config_v1 { __u8 num_progs_enabled; __u32 chain_call_actions[MAX_DISPATCHER_ACTIONS]; __u32 run_prios[MAX_DISPATCHER_ACTIONS]; }; #endif xdp-tools-1.5.4/lib/libxdp/tests/test-libxdp.sh0000644000175100001660000000662315003640462021011 0ustar runnerdocker# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) ALL_TESTS="test_link_so test_link_a test_old_dispatcher test_xdp_frags test_xsk_prog_refcnt_bpffs test_xsk_prog_refcnt_legacy test_xsk_non_privileged test_link_detach test_xsk_umem_flags" TESTS_DIR=$(dirname "${BASH_SOURCE[0]}") test_link_so() { TMPDIR=$(mktemp --tmpdir -d libxdp-test.XXXXXX) cat >$TMPDIR/libxdptest.c < int main(int argc, char **argv) { (void) argc; (void) argv; (void) xdp_program__open_file("filename", "section_name", NULL); return 0; } EOF $CC -o $TMPDIR/libxdptest $TMPDIR/libxdptest.c $CFLAGS $CPPFLAGS -lxdp $LDLIBS 2>&1 retval=$? rm -rf "$TMPDIR" return $retval } test_link_a() { TMPDIR=$(mktemp --tmpdir -d libxdp-test.XXXXXX) cat >$TMPDIR/libxdptest.c < int main(int argc, char **argv) { (void) argc; (void) argv; (void) xdp_program__open_file("filename", "section_name", NULL); return 0; } EOF $CC -o $TMPDIR/libxdptest $TMPDIR/libxdptest.c $CFLAGS $CPPFLAGS -l:libxdp.a $LDLIBS 2>&1 retval=$? rm -rf "$TMPDIR" return $retval } test_refcnt_once() { # We need multiple queues for this test NUM_QUEUES_REQUIRED=3 ip link add xsk_veth0 numrxqueues $NUM_QUEUES_REQUIRED type veth peer name xsk_veth1 check_run $TESTS_DIR/test_xsk_refcnt xsk_veth0 2>&1 ip link delete xsk_veth0 } check_mount_bpffs() { mount | grep -q /sys/fs/bpf || mount -t bpf bpf /sys/fs/bpf/ || echo "Unable to mount /sys/fs/bpf" mount | grep -q /sys/fs/bpf } check_unmount_bpffs() { mount | grep -q /sys/fs/bpf && umount /sys/fs/bpf/ || echo "Unable to unmount /sys/fs/bpf" ! mount | grep -q /sys/fs/bpf } test_xsk_prog_refcnt_bpffs() { check_mount_bpffs && test_refcnt_once "$@" } test_xsk_prog_refcnt_legacy() { check_unmount_bpffs && test_refcnt_once "$@" } test_xdp_frags() { skip_if_missing_libxdp_compat check_mount_bpffs || return 1 ip link add xdp_veth_big0 mtu 5000 type veth peer name xdp_veth_big1 mtu 5000 ip link add xdp_veth_small0 type veth peer name xdp_veth_small1 check_run $TESTS_DIR/test_xdp_frags xdp_veth_big0 xdp_veth_small0 2>&1 ip link delete xdp_veth_big0 ip link delete xdp_veth_small0 } test_old_dispatcher() { skip_if_missing_libxdp_compat check_mount_bpffs || return 1 ip link add xdp_veth0 type veth peer name xdp_veth1 check_run $TESTS_DIR/test_dispatcher_versions xdp_veth0 ip link delete xdp_veth0 } test_xsk_non_privileged() { if test ! -f $TEST_PROG_DIR/test_xsk_non_privileged; then exit "$SKIPPED_TEST" fi ip link add xdp_veth0 type veth peer name xdp_veth1 check_run $TESTS_DIR/test_xsk_non_privileged xdp_veth0 xdp_veth1 ip link delete xdp_veth0 } test_link_detach() { if test ! -f $TEST_PROG_DIR/test_link_detach; then exit "$SKIPPED_TEST" fi ip link add xdp_veth0 type veth peer name xdp_veth1 check_run $TESTS_DIR/test_link_detach xdp_veth0 ip link delete xdp_veth0 } test_xsk_umem_flags() { ip link add xdp_veth0 type veth peer name xdp_veth1 check_run $TESTS_DIR/test_xsk_umem_flags xdp_veth0 ip link delete xdp_veth0 } cleanup_tests() { ip link del dev xdp_veth_big0 >/dev/null 2>&1 ip link del dev xdp_veth_small0 >/dev/null 2>&1 ip link del dev xsk_veth0 >/dev/null 2>&1 ip link del dev xdp_veth0 >/dev/null 2>&1 } xdp-tools-1.5.4/lib/libxdp/tests/test_utils.h0000644000175100001660000000173615003640462020566 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __TEST_UTILS_H #define __TEST_UTILS_H #include #include #define __unused __attribute__((unused)) static int libbpf_silent_func(__unused enum libbpf_print_level level, __unused const char *format, __unused va_list args) { return 0; } static inline void silence_libbpf_logging(void) { libbpf_set_print(libbpf_silent_func); } static int libxdp_silent_func(__unused enum libxdp_print_level level, __unused const char *format, __unused va_list args) { return 0; } static int libxdp_verbose_func(__unused enum libxdp_print_level level, __unused const char *format, __unused va_list args) { fprintf(stderr, " "); vfprintf(stderr, format, args); return 0; } static inline void silence_libxdp_logging(void) { libxdp_set_print(libxdp_silent_func); } static inline void verbose_libxdp_logging(void) { libxdp_set_print(libxdp_verbose_func); } #endif xdp-tools-1.5.4/lib/libxdp/tests/.gitignore0000644000175100001660000000020715003640462020176 0ustar runnerdockertest_xsk_refcnt check_kern_compat test_xdp_frags test_dispatcher_versions test_xsk_non_privileged test_link_detach test_xsk_umem_flags xdp-tools-1.5.4/lib/libxdp/tests/test_link_detach.c0000644000175100001660000000562315003640462021665 0ustar runnerdocker /* SPDX-License-Identifier: GPL-2.0 */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include "test_utils.h" #include #include #define SKIPPED_TEST 249 // needs to match SKIPPED_TEST value in test_runner.sh static void usage(char *progname) { fprintf(stderr, "Usage: %s \n", progname); exit(EXIT_FAILURE); } static int check_link_detach(int ifindex, enum xdp_attach_mode mode) { DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); struct bpf_object *obj_prog = NULL; struct bpf_program *prog; struct xdp_multiprog *mp = NULL; int ret, prog_fd, link_fd =0; if (!ifindex) return -EINVAL; obj_prog = bpf_object__open("xdp_pass.o"); if (!obj_prog) { ret = -errno; goto out; } prog = bpf_object__find_program_by_name(obj_prog, "xdp_pass"); if (!prog) { ret = -errno; goto out; } ret = bpf_object__load(obj_prog); if (ret) { ret = -errno; fprintf(stderr, "Couldn't load object: %s\n", strerror(-ret)); goto out; } prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { ret = -errno; fprintf(stderr, "Couldn't get prog fd: %s\n", strerror(-ret)); goto out; } if (mode == XDP_MODE_SKB) opts.flags = XDP_FLAGS_SKB_MODE; link_fd = bpf_link_create(prog_fd, ifindex, BPF_XDP, &opts); if (link_fd < 0) { ret = SKIPPED_TEST; fprintf(stderr, "Couldn't attach XDP prog to ifindex %d: %s\n", ifindex, strerror(errno)); goto out; } mp = xdp_multiprog__get_from_ifindex(ifindex); ret = libxdp_get_error(mp); if (ret) { fprintf(stderr, "Couldn't get multiprog on ifindex %d: %s\n", ifindex, strerror(-ret)); goto out; } ret = xdp_multiprog__detach(mp); out: if (link_fd > 0) close(link_fd); xdp_multiprog__close(mp); bpf_object__close(obj_prog); return ret; } int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; int ifindex, ret; if (setrlimit(RLIMIT_MEMLOCK, &r)) { fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n", strerror(errno)); exit(EXIT_FAILURE); } char *envval; envval = secure_getenv("VERBOSE_TESTS"); silence_libbpf_logging(); if (envval && envval[0] == '1') verbose_libxdp_logging(); else silence_libxdp_logging(); if (argc != 2) usage(argv[0]); ifindex = if_nametoindex(argv[1]); if (!ifindex) { fprintf(stderr, "Interface '%s' not found.\n", argv[1]); usage(argv[0]); } ret = check_link_detach(ifindex, XDP_MODE_SKB); if (ret) { fprintf(stderr, "Failed to detach XDP prog from ifindex %d mode %s: %s\n", ifindex, "XDP_MODE_SKB", strerror(-ret)); return ret; } ret = check_link_detach(ifindex, XDP_MODE_NATIVE); if (ret) { fprintf(stderr, "Failed to detach XDP prog from ifindex %d mode %s: %s\n", ifindex, "XDP_MODE_NATIVE", strerror(-ret)); } return ret; } xdp-tools-1.5.4/lib/libxdp/tests/Makefile0000644000175100001660000000420315003640462017646 0ustar runnerdocker# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) USER_TARGETS := test_xsk_refcnt check_kern_compat test_xdp_frags test_dispatcher_versions test_link_detach test_xsk_umem_flags BPF_TARGETS := xdp_dispatcher_v1 xdp_pass USER_LIBS := -lpthread EXTRA_DEPS += xdp_dispatcher_v1.h EXTRA_USER_DEPS += test_utils.h TEST_FILE := ./test-libxdp.sh TEST_RUNNER := ./test_runner.sh LIB_DIR := ../.. LDLIBS += $(USER_LIBS) include $(LIB_DIR)/defines.mk ifeq ($(HAVE_CAP_NG),y) USER_TARGETS += test_xsk_non_privileged CFLAGS += $(CAP_NG_CFLAGS) LDLIBS += $(CAP_NG_LDLIBS) endif USER_C := ${USER_TARGETS:=.c} USER_OBJ := ${USER_C:.c=.o} BPF_OBJS := $(BPF_TARGETS:=.o) LDFLAGS+=-L$(LIBXDP_DIR) ifeq ($(DYNAMIC_LIBXDP),1) LDLIBS:=-lxdp $(LDLIBS) OBJECT_LIBXDP:=$(LIBXDP_DIR)/libxdp.so.$(LIBXDP_VERSION) else LDLIBS:=-l:libxdp.a $(LDLIBS) OBJECT_LIBXDP:=$(LIBXDP_DIR)/libxdp.a endif # Detect submodule libbpf source file changes ifeq ($(SYSTEM_LIBBPF),n) LIBBPF_SOURCES := $(wildcard $(LIBBPF_DIR)/src/*.[ch]) endif LIBXDP_SOURCES := $(wildcard $(LIBXDP_DIR)/*.[ch] $(LIBXDP_DIR)/*.in) CFLAGS += -I$(HEADER_DIR) BPF_HEADERS := $(wildcard $(HEADER_DIR)/bpf/*.h) $(wildcard $(HEADER_DIR)/xdp/*.h) all: $(USER_TARGETS) $(BPF_OBJS) .PHONY: clean clean:: $(Q)rm -f $(USER_TARGETS) $(USER_OBJ) $(OBJECT_LIBBPF): $(LIBBPF_SOURCES) $(Q)$(MAKE) -C $(LIB_DIR) libbpf $(OBJECT_LIBXDP): $(LIBXDP_SOURCES) $(Q)$(MAKE) -C $(LIBXDP_DIR) # Create expansions for dependencies LIB_H := ${LIB_OBJS:.o=.h} # Detect if any of common obj changed and create dependency on .h-files $(LIB_OBJS): %.o: %.c %.h $(LIB_H) $(Q)$(MAKE) -C $(dir $@) $(notdir $@) ALL_EXEC_TARGETS=$(USER_TARGETS) $(ALL_EXEC_TARGETS): %: %.c $(OBJECT_LIBBPF) $(OBJECT_LIBXDP) $(LIBMK) $(LIB_OBJS) $(EXTRA_DEPS) $(EXTRA_USER_DEPS) $(QUIET_CC)$(CC) -Wall $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -o $@ $(LIB_OBJS) \ $< $(LDLIBS) $(BPF_OBJS): %.o: %.c $(BPF_HEADERS) $(LIBMK) $(EXTRA_DEPS) $(QUIET_CLANG)$(CLANG) -target $(BPF_TARGET) $(BPF_CFLAGS) -O2 -c -g -o $@ $< run: all $(Q)env CC="$(CC)" CFLAGS="$(CFLAGS) $(LDFLAGS)" CPPFLAGS="$(CPPFLAGS)" LDLIBS="$(LDLIBS)" V=$(V) $(TEST_RUNNER) $(TEST_FILE) $(RUN_TESTS) xdp-tools-1.5.4/lib/libxdp/tests/test_runner.sh0000755000175100001660000000442215003640462021120 0ustar runnerdocker#!/bin/bash # SPDX-License-Identifier: GPL-2.0-or-later # # Script to setup and manage tests for xdp-tools. # Based on the test-env script from xdp-tutorial. # # Author: Toke Høiland-Jørgensen (toke@redhat.com) # Date: 26 May 2020 # Copyright (c) 2020 Red Hat set -o errexit set -o nounset umask 077 TEST_PROG_DIR="${TEST_PROG_DIR:-$(dirname "${BASH_SOURCE[0]}")}" ALL_TESTS="" VERBOSE_TESTS=${V:-0} export VERBOSE_TESTS # Odd return value for skipping, as only 0-255 is valid. SKIPPED_TEST=249 skip_if_missing_libxdp_compat() { if ! $TEST_PROG_DIR/check_kern_compat; then exit "$SKIPPED_TEST" fi } is_func() { type "$1" 2>/dev/null | grep -q 'is a function' } check_run() { local ret [ "$VERBOSE_TESTS" -eq "1" ] && echo "$@" "$@" ret=$? if [ "$ret" -ne "0" ]; then exit $ret fi } exec_test() { local testn="$1" local output local ret printf " %-30s" "[$testn]" if ! is_func "$testn"; then echo "INVALID" return 1 fi output=$($testn 2>&1) ret=$? if [ "$ret" -eq "0" ]; then echo "PASS" elif [ "$ret" -eq "$SKIPPED_TEST" ]; then echo "SKIPPED" ret=0 else echo "FAIL" fi if [ "$ret" -ne "0" ] || [ "$VERBOSE_TESTS" -eq "1" ]; then echo "$output" | sed 's/^/\t/' fi return $ret } run_tests() { local TESTS="$*" local ret=0 [ -z "$TESTS" ] && TESTS="$ALL_TESTS" echo " Running tests from $TEST_DEFINITIONS" for testn in $TESTS; do exec_test $testn || ret=1 if is_func cleanup_tests; then cleanup_tests || true fi done return $ret } usage() { echo "Usage: $0 [test names]" >&2 exit 1 } if [ "$EUID" -ne "0" ]; then if command -v sudo >/dev/null 2>&1; then exec sudo env CC="$CC" CFLAGS="$CFLAGS" CPPFLAGS="$CPPFLAGS" LDLIBS="$LDLIBS" V=${VERBOSE_TESTS} "$0" "$@" else die "Tests must be run as root" fi else if [ "${DID_UNSHARE:-0}" -ne "1" ]; then echo "Executing tests in separate net- and mount namespaces" >&2 exec env DID_UNSHARE=1 unshare -n -m "$0" "$@" fi fi TEST_DEFINITIONS="${1:-}" [ -f "$TEST_DEFINITIONS" ] || usage source "$TEST_DEFINITIONS" shift run_tests "$@" xdp-tools-1.5.4/lib/libxdp/compat.h0000644000175100001660000000040415003640462016477 0ustar runnerdocker#ifndef __COMPAT_H #define __COMPAT_H #ifndef HAVE_SECURE_GETENV #include // Source: https://www.openwall.com/lists/musl/2019/05/28/3 static inline char *secure_getenv(const char *name) { return libc.secure ? NULL : getenv(name); } #endif #endif xdp-tools-1.5.4/lib/libxdp/libxdp.30000644000175100001660000005632515003640462016426 0ustar runnerdocker.TH "libxdp" "3" "January 14, 2025" "v1.5.4" "libxdp - library for loading XDP programs" .SH "NAME" libxdp \- library for attaching XDP programs and using AF_XDP sockets .SH "SYNOPSIS" .PP This directory contains the files for the \fIlibxdp\fP library for attaching XDP programs to network interfaces and using AF_XDP sockets. The library is fairly lightweight and relies on \fIlibbpf\fP to do the heavy lifting for processing eBPF object files etc. .PP \fILibxdp\fP provides two primary features on top of \fIlibbpf\fP. The first is the ability to load multiple XDP programs in sequence on a single network device (which is not natively supported by the kernel). This support relies on the \fIfreplace\fP functionality in the kernel, which makes it possible to attach an eBPF program as a replacement for a global function in another (already loaded) eBPF program. The second main feature is helper functions for configuring AF_XDP sockets as well as reading and writing packets from these sockets. .PP Some of the functionality provided by libxdp depends on particular kernel features; see the "Kernel feature compatibility" section below for details. .SS "Using libxdp from an application" .PP Basic usage of libxdp from an application is quite straight forward. The following example loads, then unloads, an XDP program from the 'lo' interface: .RS .nf \fC#define IFINDEX 1 struct xdp_program *prog; int err; prog = xdp_program__open_file("my-program.o", "section_name", NULL); err = xdp_program__attach(prog, IFINDEX, XDP_MODE_NATIVE, 0); if (!err) xdp_program__detach(prog, IFINDEX, XDP_MODE_NATIVE, 0); xdp_program__close(prog); \fP .fi .RE .PP The \fIxdp_program\fP structure is an opaque structure that represents a single XDP program. \fIlibxdp\fP contains functions to create such a struct either from a BPF object file on disk, from a \fIlibbpf\fP BPF object, or from an identifier of a program that is already loaded into the kernel: .RS .nf \fCstruct xdp_program *xdp_program__from_bpf_obj(struct bpf_object *obj, const char *section_name); struct xdp_program *xdp_program__find_file(const char *filename, const char *section_name, struct bpf_object_open_opts *opts); struct xdp_program *xdp_program__open_file(const char *filename, const char *section_name, struct bpf_object_open_opts *opts); struct xdp_program *xdp_program__from_fd(int fd); struct xdp_program *xdp_program__from_id(__u32 prog_id); struct xdp_program *xdp_program__from_pin(const char *pin_path); \fP .fi .RE .PP The functions that open a BPF object or file need the function name of the XDP program as well as the file name or object, since an ELF file can contain multiple XDP programs. The \fIxdp_program__find_file()\fP function takes a filename without a path, and will look for the object in \fILIBXDP_OBJECT_PATH\fP which defaults to \fI/usr/lib/bpf\fP (or \fI/usr/lib64/bpf\fP on systems using a split library path). This is convenient for applications shipping pre-compiled eBPF object files. .PP The \fIxdp_program__attach()\fP function will attach the program to an interface, building a dispatcher program to execute it. Multiple programs can be attached at once with \fIxdp_program__attach_multi()\fP; they will be sorted in order of their run priority, and execution from one program to the next will proceed based on the chain call actions defined for each program (see the \fBProgram metadata\fP section below). Because the loading process involves modifying the attach type of the program, the attach functions only work with \fIstruct xdp_program\fP objects that have not yet been loaded into the kernel. .PP When using the attach functions to attach to an interface that already has an XDP program loaded, libxdp will attempt to add the program to the list of loaded programs. However, this may fail, either due to missing kernel support, or because the already-attached program was not loaded using a dispatcher compatible with libxdp. If the kernel support for incremental attach (merged in kernel 5.10) is missing, the only way to actually run multiple programs on a single interface is to attach them all at the same time with \fIxdp_program__attach_multi()\fP. If the existing program is not an XDP dispatcher, that program will have to be detached from the interface before libxdp can attach a new one. This can be done by calling \fIxdp_program__detach()\fP with a reference to the loaded program; but note that this will of course break any application relying on that other XDP program to be present. .SH "Program metadata" .PP To support multiple XDP programs on the same interface, libxdp uses two pieces of metadata for each XDP program: Run priority and chain call actions. .SS "Run priority" .PP This is the priority of the program and is a simple integer used to sort programs when loading multiple programs onto the same interface. Programs that wish to run early (such as a packet filter) should set low values for this, while programs that want to run later (such as a packet forwarder or counter) should set higher values. Note that later programs are only run if the previous programs end with a return code that is part of its chain call actions (see below). If not specified, the default priority value is 50. .SS "Chain call actions" .PP These are the program return codes that the program indicate for packets that should continue processing. If the program returns one of these actions, later programs in the call chain will be run, whereas if it returns any other action, processing will be interrupted, and the XDP dispatcher will return the verdict immediately. If not set, this defaults to just XDP_PASS, which is likely the value most programs should use. .SS "Specifying metadata" .PP The metadata outlined above is specified as BTF information embedded in the ELF file containing the XDP program. The \fIxdp_helpers.h\fP file shipped with libxdp contains helper macros to include this information, which can be used as follows: .RS .nf \fC#include #include struct { __uint(priority, 10); __uint(XDP_PASS, 1); __uint(XDP_DROP, 1); } XDP_RUN_CONFIG(my_xdp_func); \fP .fi .RE .PP This example specifies that the XDP program in \fImy_xdp_func\fP should have priority 10 and that its chain call actions are \fIXDP_PASS\fP and \fIXDP_DROP\fP. In a source file with multiple XDP programs in the same file, a definition like the above can be included for each program (main XDP function). Any program that does not specify any config information will use the default values outlined above. .SS "Inspecting and modifying metadata" .PP \fIlibxdp\fP exposes the following functions that an application can use to inspect and modify the metadata on an XDP program. Modification is only possible before a program is attached on an interface. These functions won't modify the BTF information itself, but the new values will be stored as part of the program attachment. .RS .nf \fCunsigned int xdp_program__run_prio(const struct xdp_program *xdp_prog); int xdp_program__set_run_prio(struct xdp_program *xdp_prog, unsigned int run_prio); bool xdp_program__chain_call_enabled(const struct xdp_program *xdp_prog, enum xdp_action action); int xdp_program__set_chain_call_enabled(struct xdp_program *prog, unsigned int action, bool enabled); int xdp_program__print_chain_call_actions(const struct xdp_program *prog, char *buf, size_t buf_len); \fP .fi .RE .SH "The dispatcher program" .PP To support multiple non-offloaded programs on the same network interface, \fIlibxdp\fP uses a \fBdispatcher program\fP which is a small wrapper program that will call each component program in turn, expect the return code, and then chain call to the next program based on the chain call actions of the previous program (see the \fBProgram metadata\fP section above). .PP While applications using \fIlibxdp\fP do not need to know the details of the dispatcher program to just load an XDP program unto an interface, \fIlibxdp\fP does expose the dispatcher and its attached component programs, which can be used to list the programs currently attached to an interface. .PP The structure used for this is \fIstruct xdp_multiprog\fP, which can only be constructed from the programs loaded on an interface based on ifindex. The API for getting a multiprog reference and iterating through the attached programs looks like this: .RS .nf \fCstruct xdp_multiprog *xdp_multiprog__get_from_ifindex(int ifindex); struct xdp_program *xdp_multiprog__next_prog(const struct xdp_program *prog, const struct xdp_multiprog *mp); void xdp_multiprog__close(struct xdp_multiprog *mp); int xdp_multiprog__detach(struct xdp_multiprog *mp, int ifindex); enum xdp_attach_mode xdp_multiprog__attach_mode(const struct xdp_multiprog *mp); struct xdp_program *xdp_multiprog__main_prog(const struct xdp_multiprog *mp); struct xdp_program *xdp_multiprog__hw_prog(const struct xdp_multiprog *mp); bool xdp_multiprog__is_legacy(const struct xdp_multiprog *mp); \fP .fi .RE .PP If a non-offloaded program is attached to the interface which \fIlibxdp\fP doesn't recognise as a dispatcher program, an \fIxdp_multiprog\fP structure will still be returned, and \fIxdp_multiprog__is_legacy()\fP will return true for that program (note that this also holds true if only an offloaded program is loaded). A reference to that (regular) XDP program can be obtained by \fIxdp_multiprog__main_prog()\fP. If the program attached to the interface \fBis\fP a dispatcher program, \fIxdp_multiprog__main_prog()\fP will return a reference to the dispatcher program itself, which is mainly useful for obtaining other data about that program (such as the program ID). A reference to an offloaded program can be acquired using \fIxdp_multiprog_hw_prog()\fP. Function \fIxdp_multiprog__attach_mode()\fP returns the attach mode of the non-offloaded program, whether an offloaded program is attached should be checked through \fIxdp_multiprog_hw_prog()\fP. .SS "Pinning in bpffs" .PP The kernel will automatically detach component programs from the dispatcher once the last reference to them disappears. To prevent this from happening, \fIlibxdp\fP will pin the component program references in \fIbpffs\fP before attaching the dispatcher to the network interface. The pathnames generated for pinning is as follows: .IP \(em 4 /sys/fs/bpf/xdp/dispatch-IFINDEX-DID - dispatcher program for IFINDEX with BPF program ID DID .IP \(em 4 /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog0-prog - component program 0, program reference .IP \(em 4 /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog0-link - component program 0, bpf_link reference .IP \(em 4 /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog1-prog - component program 1, program reference .IP \(em 4 /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog1-link - component program 1, bpf_link reference .IP \(em 4 etc, up to ten component programs .PP If set, the \fILIBXDP_BPFFS\fP environment variable will override the location of \fIbpffs\fP, but the \fIxdp\fP subdirectory is always used. If no \fIbpffs\fP is mounted, libxdp will consult the environment variable \fILIBXDP_BPFFS_AUTOMOUNT\fP. If this is set to \fI1\fP, libxdp will attempt to automount a bpffs. If not, libxdp will fall back to loading a single program without a dispatcher, as if the kernel did not support the features needed for multiprog attachment. .SH "Using AF_XDP sockets" .PP Libxdp implements helper functions for configuring AF_XDP sockets as well as reading and writing packets from these sockets. AF_XDP sockets can be used to redirect packets to user-space at high rates from an XDP program. Note that this functionality used to reside in libbpf, but has now been moved over to libxdp as it is a better fit for this library. As of the 1.0 release of libbpf, the AF_XDP socket support will be removed and all future development will be performed in libxdp instead. .PP For an overview of AF_XDP sockets, please refer to this Linux Plumbers paper (\fIhttp://vger.kernel.org/lpc_net2018_talks/lpc18_pres_af_xdp_perf-v3.pdf\fP) and the documentation in the Linux kernel (Documentation/networking/af_xdp.rst or \fIhttps://www.kernel.org/doc/html/latest/networking/af_xdp.html\fP). .PP For an example on how to use the interface, take a look at the AF_XDP-example and AF_XDP-forwarding programs in the bpf-examples repository: \fIhttps://github.com/xdp-project/bpf-examples\fP. .SS "Control path" .PP Libxdp provides helper functions for creating and destroying umems and sockets as shown below. The first thing that a user generally wants to do is to create a umem area. This is the area that will contain all packets received and the ones that are going to be sent. After that, AF_XDP sockets can be created tied to this umem. These can either be sockets that have exclusive ownership of that umem through xsk_socket__create() or shared with other sockets using xsk_socket__create_shared. There is one option called XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD that can be set in the libxdp_flags field (also called libbpf_flags for compatibility reasons). This will make libxdp not load any XDP program or set and BPF maps which is a must if users want to add their own XDP program. .PP If there is already a socket created with socket(AF_XDP, SOCK_RAW, 0) not bound and not tied to any umem, file descriptor of this socket can be used in param opts of xsk_umem__create_opts(), which is a recommended way of umem creation. .RS .nf \fCstruct xsk_umem *xsk_umem__create_opts(void *umem_area, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, struct xsk_umem_opts *opts); int xsk_umem__create(struct xsk_umem **umem, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *config); int xsk_umem__create_with_fd(struct xsk_umem **umem, int fd, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *config); int xsk_socket__create(struct xsk_socket **xsk, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *config); int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_socket_config *config); int xsk_umem__delete(struct xsk_umem *umem); void xsk_socket__delete(struct xsk_socket *xsk); \fP .fi .RE .PP There are also two helper function to get the file descriptor of a umem or a socket. These are needed when using standard Linux syscalls such as poll(), recvmsg(), sendto(), etc. .RS .nf \fCint xsk_umem__fd(const struct xsk_umem *umem); int xsk_socket__fd(const struct xsk_socket *xsk); \fP .fi .RE .PP The control path also provides two APIs for setting up AF_XDP sockets when the process that is going to use the AF_XDP socket is non-privileged. These two functions perform the operations that require privileges and can be executed from some form of control process that has the necessary privileges. The xsk_socket__create executed on the non-privileged process will then skip these two steps. For an example on how to use these, please take a look at the AF_XDP-example program in the bpf-examples repository: \fIhttps://github.com/xdp-project/bpf-examples/tree/main/AF_XDP-example\fP. .RS .nf \fCint xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd); int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd); \fP .fi .RE .PP To further reduce required level of privileges, an AF_XDP socket can be created beforehand with socket(AF_XDP, SOCK_RAW, 0) and passed to a non-privileged process. This socket can be used in xsk_umem__create_opts() and later in xsk_socket__create() with created umem. xsk_socket__create_shared() would still require privileges for AF_XDP socket creation. .SS "Data path" .PP For performance reasons, all the data path functions are static inline functions found in the xsk.h header file so they can be optimized into the target application binary for best possible performance. There are four FIFO rings of two main types: producer rings (fill and Tx) and consumer rings (Rx and completion). The producer rings use xsk_ring_prod functions and consumer rings use xsk_ring_cons functions. For producer rings, you start with \fIreserving\fP one or more slots in a producer ring and then when they have been filled out, you \fIsubmit\fP them so that the kernel will act on them. For a consumer ring, you \fIpeek\fP if there are any new packets in the ring and if so you can read them from the ring. Once you are done reading them, you \fIrelease\fP them back to the kernel so it can use them for new packets. There is also a \fIcancel\fP operation for consumer rings if the application does not want to consume all packets received with the peek operation. .RS .nf \fC__u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx); void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb); __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx); void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb); void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb); \fP .fi .RE .PP The functions below are used for reading and writing the descriptors of the rings. xsk_ring_prod__fill_addr() and xsk_ring_prod__tx_desc() \fBwrites\fP entries in the fill and Tx rings respectively, while xsk_ring_cons__comp_addr and xsk_ring_cons__rx_desc \fBreads\fP entries from the completion and Rx rings respectively. The \fIidx\fP is the parameter returned in the xsk_ring_prod__reserve or xsk_ring_cons__peek calls. To advance to the next entry, simply do \fIidx++\fP. .RS .nf \fC__u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, __u32 idx); struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, __u32 idx); const __u64 *xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx); const struct xdp_desc *xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx); \fP .fi .RE .PP The xsk_umem functions are used to get a pointer to the packet data itself, always located inside the umem. In the default aligned mode, you can get the addr variable straight from the Rx descriptor. But in unaligned mode, you need to use the three last function below as the offset used is carried in the upper 16 bits of the addr. Therefore, you cannot use the addr straight from the descriptor in the unaligned case. .RS .nf \fCvoid *xsk_umem__get_data(void *umem_area, __u64 addr); __u64 xsk_umem__extract_addr(__u64 addr); __u64 xsk_umem__extract_offset(__u64 addr); __u64 xsk_umem__add_offset_to_addr(__u64 addr); \fP .fi .RE .PP There is one more function in the data path and that checks if the need_wakeup flag is set. Use of this flag is highly encouraged and should be enabled by setting \fIXDP_USE_NEED_WAKEUP\fP bit in the \fIxdp_bind_flags\fP field that is provided to the xsk_socket_create_[shared]() calls. If this function returns true, then you need to call \fIrecvmsg()\fP, \fIsendto()\fP, or \fIpoll()\fP depending on the situation. \fIrecvmsg()\fP if you are \fBreceiving\fP, or \fIsendto()\fP if you are \fBsending\fP. \fIpoll()\fP can be used for both cases and provide the ability to sleep too, as with any other socket. But note that poll is a slower operation than the other two. .RS .nf \fCint xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r); \fP .fi .RE .PP For an example on how to use all these APIs, take a look at the AF_XDP-example and AF_XDP-forwarding programs in the bpf-examples repository: \fIhttps://github.com/xdp-project/bpf-examples\fP. .SH "Kernel and BPF program feature compatibility" .PP The features exposed by libxdp relies on certain kernel versions and BPF features to work. To get the full benefit of all features, libxdp needs to be used with kernel 5.10 or newer, unless the commits mentioned below have been backported. However, libxdp will probe the kernel and transparently fall back to legacy loading procedures, so it is possible to use the library with older versions, although some features will be unavailable, as detailed below. .PP The ability to attach multiple BPF programs to a single interface relies on the kernel "BPF program extension" feature which was introduced by commit be8704ff07d2 ("bpf: Introduce dynamic program extensions") in the upstream kernel and first appeared in kernel release 5.6. To \fBincrementally\fP attach multiple programs, a further refinement added by commit 4a1e7c0c63e0 ("bpf: Support attaching freplace programs to multiple attach points") is needed; this first appeared in the upstream kernel version 5.10. The functionality relies on the "BPF trampolines" feature which is unfortunately only available on the x86_64 architecture. In other words, kernels before 5.6 can only attach a single XDP program to each interface, kernels 5.6+ can attach multiple programs if they are all attached at the same time, and kernels 5.10 have full support for XDP multiprog on x86_64. On other architectures, only a single program can be attached to each interface. .PP To load AF_XDP programs, kernel support for AF_XDP sockets needs to be included and enabled in the kernel build. In addition, when using AF_XDP sockets, an XDP program is also loaded on the interface. The XDP program used for this by libxdp requires the ability to do map lookups into XSK maps, which was introduced with commit fada7fdc83c0 ("bpf: Allow bpf_map_lookup_elem() on an xskmap") in kernel 5.3. This means that the minimum required kernel version for using AF_XDP is kernel 5.3; however, for the AF_XDP XDP program to co-exist with other programs, the same constraints for multiprog applies as outlined above. .PP Note that some Linux distributions backport features to earlier kernel versions, especially in enterprise kernels; for instance, Red Hat Enterprise Linux kernels include everything needed for libxdp to function since RHEL 8.5. .PP Finally, XDP programs loaded using the multiprog facility must include type information (using the BPF Type Format, BTF). To get this, compile the programs with a recent version of Clang/LLVM (version 10+), and enable debug information when compiling (using the \fI\-g\fP option). .SH "BUGS" .PP Please report any bugs on Github: \fIhttps://github.com/xdp-project/xdp-tools/issues\fP .SH "AUTHORS" .PP libxdp and this man page were written by Toke Høiland-Jørgensen. AF_XDP support and documentation was contributed by Magnus Karlsson. xdp-tools-1.5.4/lib/libxdp/xsk.c0000644000175100001660000010164315003640462016023 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * AF_XDP user-space access library. * * Copyright(c) 2018 - 2021 Intel Corporation. * * Author(s): Magnus Karlsson */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libxdp_internal.h" #include "xsk_def_xdp_prog.h" #include "bpf_instr.h" #ifndef SOL_XDP #define SOL_XDP 283 #endif #ifndef AF_XDP #define AF_XDP 44 #endif #ifndef PF_XDP #define PF_XDP AF_XDP #endif #ifndef SO_NETNS_COOKIE #define SO_NETNS_COOKIE 71 #endif #define INIT_NS 1 struct xsk_umem { struct xsk_ring_prod *fill_save; struct xsk_ring_cons *comp_save; char *umem_area; struct xsk_umem_config config; int fd; int refcount; struct list_head ctx_list; bool rx_ring_setup_done; bool tx_ring_setup_done; }; struct xsk_ctx { struct xsk_ring_prod *fill; struct xsk_ring_cons *comp; struct xsk_umem *umem; __u32 queue_id; int refcount; int ifindex; __u64 netns_cookie; int xsks_map_fd; struct list_head list; struct xdp_program *xdp_prog; int refcnt_map_fd; char ifname[IFNAMSIZ]; }; struct xsk_socket { struct xsk_ring_cons *rx; struct xsk_ring_prod *tx; struct xsk_ctx *ctx; struct xsk_socket_config config; int fd; }; struct xsk_nl_info { int ifindex; int fd; bool xdp_prog_attached; }; /* Up until and including Linux 5.3 */ struct xdp_ring_offset_v1 { __u64 producer; __u64 consumer; __u64 desc; }; /* Up until and including Linux 5.3 */ struct xdp_mmap_offsets_v1 { struct xdp_ring_offset_v1 rx; struct xdp_ring_offset_v1 tx; struct xdp_ring_offset_v1 fr; struct xdp_ring_offset_v1 cr; }; /* Export all inline helpers as symbols for use by language bindings. */ extern inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, __u32 idx); extern inline const __u64 * xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx); extern inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, __u32 idx); extern inline const struct xdp_desc * xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx); extern inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r); extern inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb); extern inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb); extern inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx); extern inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb); extern inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx); extern inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb); extern inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb); extern inline void *xsk_umem__get_data(void *umem_area, __u64 addr); extern inline __u64 xsk_umem__extract_addr(__u64 addr); extern inline __u64 xsk_umem__extract_offset(__u64 addr); extern inline __u64 xsk_umem__add_offset_to_addr(__u64 addr); int xsk_umem__fd(const struct xsk_umem *umem) { return umem ? umem->fd : -EINVAL; } int xsk_socket__fd(const struct xsk_socket *xsk) { return xsk ? xsk->fd : -EINVAL; } static bool xsk_page_aligned(void *buffer) { unsigned long addr = (unsigned long)buffer; return !(addr & (getpagesize() - 1)); } static void xsk_set_umem_config(struct xsk_umem_config *cfg, const struct xsk_umem_opts *opts) { cfg->fill_size = OPTS_GET(opts, fill_size, 0) ?: XSK_RING_PROD__DEFAULT_NUM_DESCS; cfg->comp_size = OPTS_GET(opts, comp_size, 0) ?: XSK_RING_CONS__DEFAULT_NUM_DESCS; cfg->frame_size = OPTS_GET(opts, frame_size, 0) ?: XSK_UMEM__DEFAULT_FRAME_SIZE; cfg->frame_headroom = OPTS_GET(opts, frame_headroom, 0) ?: XSK_UMEM__DEFAULT_FRAME_HEADROOM; cfg->flags = OPTS_GET(opts, flags, 0) ?: XSK_UMEM__DEFAULT_FLAGS; } static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, const struct xsk_socket_opts *opts) { __u32 libxdp_flags; libxdp_flags = OPTS_GET(opts, libxdp_flags, 0); if (libxdp_flags & ~XSK_LIBXDP_FLAGS__INHIBIT_PROG_LOAD) return -EINVAL; cfg->rx_size = OPTS_GET(opts, rx_size, 0) ?: XSK_RING_CONS__DEFAULT_NUM_DESCS; cfg->tx_size = OPTS_GET(opts, tx_size, 0) ?: XSK_RING_PROD__DEFAULT_NUM_DESCS; cfg->libxdp_flags = libxdp_flags; cfg->xdp_flags = OPTS_GET(opts, xdp_flags, 0); cfg->bind_flags = OPTS_GET(opts, bind_flags, 0); return 0; } static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off) { struct xdp_mmap_offsets_v1 off_v1; /* getsockopt on a kernel <= 5.3 has no flags fields. * Copy over the offsets to the correct places in the >=5.4 format * and put the flags where they would have been on that kernel. */ memcpy(&off_v1, off, sizeof(off_v1)); off->rx.producer = off_v1.rx.producer; off->rx.consumer = off_v1.rx.consumer; off->rx.desc = off_v1.rx.desc; off->rx.flags = off_v1.rx.consumer + sizeof(__u32); off->tx.producer = off_v1.tx.producer; off->tx.consumer = off_v1.tx.consumer; off->tx.desc = off_v1.tx.desc; off->tx.flags = off_v1.tx.consumer + sizeof(__u32); off->fr.producer = off_v1.fr.producer; off->fr.consumer = off_v1.fr.consumer; off->fr.desc = off_v1.fr.desc; off->fr.flags = off_v1.fr.consumer + sizeof(__u32); off->cr.producer = off_v1.cr.producer; off->cr.consumer = off_v1.cr.consumer; off->cr.desc = off_v1.cr.desc; off->cr.flags = off_v1.cr.consumer + sizeof(__u32); } static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off) { socklen_t optlen; int err; optlen = sizeof(*off); err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen); if (err) return err; if (optlen == sizeof(*off)) return 0; if (optlen == sizeof(struct xdp_mmap_offsets_v1)) { xsk_mmap_offsets_v1(off); return 0; } return -EINVAL; } static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp) { struct xdp_mmap_offsets off; void *map; int err; err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING, &umem->config.fill_size, sizeof(umem->config.fill_size)); if (err) return -errno; err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &umem->config.comp_size, sizeof(umem->config.comp_size)); if (err) return -errno; err = xsk_get_mmap_offsets(fd, &off); if (err) return -errno; map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, XDP_UMEM_PGOFF_FILL_RING); if (map == MAP_FAILED) return -errno; fill->mask = umem->config.fill_size - 1; fill->size = umem->config.fill_size; fill->producer = map + off.fr.producer; fill->consumer = map + off.fr.consumer; fill->flags = map + off.fr.flags; fill->ring = map + off.fr.desc; fill->cached_cons = umem->config.fill_size; map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, XDP_UMEM_PGOFF_COMPLETION_RING); if (map == MAP_FAILED) { err = -errno; goto out_mmap; } comp->mask = umem->config.comp_size - 1; comp->size = umem->config.comp_size; comp->producer = map + off.cr.producer; comp->consumer = map + off.cr.consumer; comp->flags = map + off.cr.flags; comp->ring = map + off.cr.desc; return 0; out_mmap: munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64)); return err; } struct xsk_umem *xsk_umem__create_opts(void *umem_area, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, struct xsk_umem_opts *opts) { struct xdp_umem_reg mr; struct xsk_umem *umem; int err, fd; __u64 size; if (!umem_area || !fill || !comp) { err = -EFAULT; goto err; } if (!OPTS_VALID(opts, xsk_umem_opts)) { err = -EINVAL; goto err; } fd = OPTS_GET(opts, fd, 0); size = OPTS_GET(opts, size, 0); if (!size && !xsk_page_aligned(umem_area)) { err = -EINVAL; goto err; } umem = calloc(1, sizeof(*umem)); if (!umem) { err = -ENOMEM; goto err; } umem->fd = fd > 0 ? fd : socket(AF_XDP, SOCK_RAW, 0); if (umem->fd < 0) { err = -errno; goto out_umem_alloc; } umem->umem_area = umem_area; INIT_LIST_HEAD(&umem->ctx_list); xsk_set_umem_config(&umem->config, opts); memset(&mr, 0, sizeof(mr)); mr.addr = (uintptr_t)umem_area; mr.len = size; mr.chunk_size = umem->config.frame_size; mr.headroom = umem->config.frame_headroom; mr.flags = umem->config.flags; mr.tx_metadata_len = OPTS_GET(opts, tx_metadata_len, XSK_UMEM__DEFAULT_TX_METADATA_LEN); err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)); if (err) { err = -errno; goto out_socket; } err = xsk_create_umem_rings(umem, umem->fd, fill, comp); if (err) goto out_socket; umem->fill_save = fill; umem->comp_save = comp; return umem; out_socket: close(umem->fd); out_umem_alloc: free(umem); err: return libxdp_err_ptr(err, true); } int xsk_umem__create_with_fd(struct xsk_umem **umem_ptr, int fd, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *usr_config) { struct xsk_umem *umem; if (!umem_ptr) return -EFAULT; DECLARE_LIBXDP_OPTS(xsk_umem_opts, opts, .fd = fd, .size = size, ); if (usr_config) { opts.fill_size = usr_config->fill_size; opts.comp_size = usr_config->comp_size; opts.frame_size = usr_config->frame_size; opts.frame_headroom = usr_config->frame_headroom; opts.flags = usr_config->flags; } umem = xsk_umem__create_opts(umem_area, fill, comp, &opts); if (!umem) return -errno; *umem_ptr = umem; return 0; } int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *usr_config) { return xsk_umem__create_with_fd(umem_ptr, 0, umem_area, size, fill, comp, usr_config); } static int xsk_init_xsk_struct(struct xsk_socket *xsk, int ifindex) { char ifname[IFNAMSIZ]; struct xsk_ctx *ctx; char *interface; ctx = calloc(1, sizeof(*ctx)); if (!ctx) return -ENOMEM; interface = if_indextoname(ifindex, &ifname[0]); if (!interface) { free(ctx); return -errno; } ctx->ifindex = ifindex; memcpy(ctx->ifname, ifname, IFNAMSIZ -1); ctx->ifname[IFNAMSIZ - 1] = 0; xsk->ctx = ctx; return 0; } static enum xdp_attach_mode xsk_convert_xdp_flags(__u32 xdp_flags) { if (xdp_flags & ~XDP_FLAGS_MASK) pr_warn("XDP flag: 0x%x contains flags not supported by libxdp.\n", xdp_flags); if (xdp_flags & XDP_FLAGS_SKB_MODE) return XDP_MODE_SKB; if (xdp_flags & XDP_FLAGS_DRV_MODE) return XDP_MODE_NATIVE; if (xdp_flags & XDP_FLAGS_HW_MODE) return XDP_MODE_HW; return XDP_MODE_NATIVE; } #define MAX_DEV_QUEUE_PATH_LEN 64 static void xsk_get_queues_from_sysfs(const char* ifname, __u32 *rx, __u32 *tx) { char buf[MAX_DEV_QUEUE_PATH_LEN]; struct dirent *entry; DIR *dir; int err; *rx = *tx = 0; err = try_snprintf(buf, MAX_DEV_QUEUE_PATH_LEN, "/sys/class/net/%s/queues/", ifname); if (err) return; dir = opendir(buf); if(dir == NULL) return; while((entry = readdir(dir))) { if (0 == strncmp(entry->d_name, "rx", 2)) ++*rx; if (0 == strncmp(entry->d_name, "tx", 2)) ++*tx; } closedir(dir); } static int xsk_get_max_queues(char *ifname) { struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; struct ifreq ifr = {}; int fd, err, ret; fd = socket(AF_LOCAL, SOCK_DGRAM, 0); if (fd < 0) return -errno; ifr.ifr_data = (void *)&channels; memcpy(ifr.ifr_name, ifname, IFNAMSIZ - 1); ifr.ifr_name[IFNAMSIZ - 1] = '\0'; err = ioctl(fd, SIOCETHTOOL, &ifr); if (err && errno != EOPNOTSUPP) { ret = -errno; goto out; } if (err) { /* If the device says it has no channels, * try to get rx tx from sysfs, otherwise all traffic * is sent to a single stream, so max queues = 1. */ __u32 rx, tx; xsk_get_queues_from_sysfs(ifr.ifr_name, &rx, &tx); ret = max(max(rx, tx), 1); } else { /* Take the max of rx, tx, combined. Drivers return * the number of channels in different ways. */ ret = max(channels.max_rx, channels.max_tx); ret = max(ret, (int)channels.max_combined); } out: close(fd); return ret; } static int xsk_size_map(struct xdp_program *xdp_prog, char *ifname) { struct bpf_object *bpf_obj = xdp_program__bpf_obj(xdp_prog); struct bpf_map *map; int max_queues; int err; max_queues = xsk_get_max_queues(ifname); if (max_queues < 0) return max_queues; map = bpf_object__find_map_by_name(bpf_obj, "xsks_map"); if (!map) return -ENOENT; err = bpf_map__set_max_entries(map, max_queues); if (err) return err; return 0; } static void xsk_delete_map_entry(int xsks_map_fd, __u32 queue_id) { bpf_map_delete_elem(xsks_map_fd, &queue_id); close(xsks_map_fd); } static int xsk_lookup_map_by_filter(int prog_fd, bool (*map_info_filter)(struct bpf_map_info *map_info)) { __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info); __u32 map_len = sizeof(struct bpf_map_info); struct bpf_prog_info prog_info = {}; int fd, err, xsks_map_fd = -ENOENT; struct bpf_map_info map_info; err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_len); if (err) return err; num_maps = prog_info.nr_map_ids; map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids)); if (!map_ids) return -ENOMEM; memset(&prog_info, 0, prog_len); prog_info.nr_map_ids = num_maps; prog_info.map_ids = (__u64)(unsigned long)map_ids; err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_len); if (err) { free(map_ids); return err; } for (i = 0; i < prog_info.nr_map_ids; i++) { fd = bpf_map_get_fd_by_id(map_ids[i]); if (fd < 0) continue; memset(&map_info, 0, map_len); err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len); if (err) { close(fd); continue; } if (map_info_filter(&map_info)) { xsks_map_fd = fd; break; } close(fd); } free(map_ids); return xsks_map_fd; } static bool xsk_map_is_socket_map(struct bpf_map_info *map_info) { return !strncmp(map_info->name, "xsks_map", sizeof(map_info->name)) && map_info->key_size == 4 && map_info->value_size == 4; } static bool xsk_map_is_refcnt_map(struct bpf_map_info *map_info) { /* In order to avoid confusing users with multiple identically named * maps, libbpf names non-custom internal maps (.data, .bss, etc.) * in an unexpected way, namely the first 8 characters of a bpf object * name + a suffix signifying the internal map type, * ex. "xdp_def_" + ".data". */ return !strncmp(map_info->name, "xsk_def_.data", sizeof(map_info->name)) && map_info->value_size >= sizeof(int); } static int xsk_lookup_bpf_map(int prog_fd) { return xsk_lookup_map_by_filter(prog_fd, &xsk_map_is_socket_map); } static int xsk_lookup_refcnt_map(int prog_fd, const char *xdp_filename) { int map_fd = xsk_lookup_map_by_filter(prog_fd, &xsk_map_is_refcnt_map); if (map_fd >= 0) goto out; if (map_fd != -ENOENT) { pr_debug("Error getting refcount map: %s\n", strerror(-map_fd)); goto out; } if (xdp_filename) pr_warn("Refcount was not found in %s or kernel does not support required features, so automatic program removal on unload is disabled\n", xdp_filename); else pr_warn("Another XSK socket was created by a version of libxdp that doesn't support program refcnt, so automatic program removal on unload is disabled.\n"); out: return map_fd; } #ifdef HAVE_LIBBPF_BPF_MAP_CREATE /* bpf_map_create() and the new bpf_prog_create() were added at the same time - * however there's a naming conflict with another bpf_prog_load() function in * older versions of libbpf; to avoid hitting that we create our own wrapper * function for this one even with new libbpf versions. */ static int xsk_check_create_prog(struct bpf_insn *insns, size_t insns_cnt) { return bpf_prog_load(BPF_PROG_TYPE_XDP, "testprog", "GPL", insns, insns_cnt, NULL); } #else static int bpf_map_create(enum bpf_map_type map_type, __unused const char *map_name, __u32 key_size, __u32 value_size, __u32 max_entries, __unused void *opts) { struct bpf_create_map_attr map_attr; memset(&map_attr, 0, sizeof(map_attr)); map_attr.map_type = map_type; map_attr.key_size = key_size; map_attr.value_size = value_size; map_attr.max_entries = max_entries; return bpf_create_map_xattr(&map_attr); } static int xsk_check_create_prog(struct bpf_insn *insns, size_t insns_cnt) { struct bpf_load_program_attr prog_attr; memset(&prog_attr, 0, sizeof(prog_attr)); prog_attr.prog_type = BPF_PROG_TYPE_XDP; prog_attr.insns = insns; prog_attr.insns_cnt = insns_cnt; prog_attr.license = "GPL"; return bpf_load_program_xattr(&prog_attr, NULL, 0); } #endif static bool xsk_check_redirect_flags(void) { char data_in = 0, data_out; DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data_in, .data_out = &data_out, .data_size_in = 1); struct bpf_insn insns[] = { BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_3, XDP_PASS), BPF_EMIT_CALL(BPF_FUNC_redirect_map), BPF_EXIT_INSN(), }; int prog_fd, map_fd, ret; bool detected = false; map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, "xskmap", sizeof(int), sizeof(int), 1, NULL); if (map_fd < 0) return detected; insns[0].imm = map_fd; prog_fd = xsk_check_create_prog(insns, ARRAY_SIZE(insns)); if (prog_fd < 0) { close(map_fd); return detected; } ret = bpf_prog_test_run_opts(prog_fd, &opts); if (!ret && opts.retval == XDP_PASS) detected = true; close(prog_fd); close(map_fd); return detected; } static struct xdp_program *xsk_lookup_program(int ifindex) { const char *version_name = "xsk_prog_version"; const char *prog_name = "xsk_def_prog"; struct xdp_multiprog *multi_prog; struct xdp_program *prog = NULL; __u32 version; int err; multi_prog = xdp_multiprog__get_from_ifindex(ifindex); if (IS_ERR(multi_prog)) return NULL; if (xdp_multiprog__is_legacy(multi_prog)) { prog = xdp_multiprog__main_prog(multi_prog); prog = strcmp(xdp_program__name(prog), prog_name) ? NULL : prog; goto check; } while ((prog = xdp_multiprog__next_prog(prog, multi_prog))) if (!strcmp(xdp_program__name(prog), prog_name)) break; check: if (!prog) goto out; err = check_xdp_prog_version(xdp_program__btf(prog), version_name, &version); if (err) { prog = ERR_PTR(err); goto out; } if (version > XSK_PROG_VERSION) { pr_warn("XSK default program version %d higher than supported %d\n", version, XSK_PROG_VERSION); prog = ERR_PTR(-EOPNOTSUPP); } out: if (!IS_ERR_OR_NULL(prog)) prog = xdp_program__clone(prog, 0); xdp_multiprog__close(multi_prog); return prog; } static int xsk_update_prog_refcnt(int refcnt_map_fd, int delta) { struct bpf_map_info map_info = {}; __u32 info_len = sizeof(map_info); int *value_data = NULL; int lock_fd, ret; __u32 key = 0; ret = bpf_obj_get_info_by_fd(refcnt_map_fd, &map_info, &info_len); if (ret) return ret; value_data = calloc(1, map_info.value_size); if (!value_data) return -ENOMEM; lock_fd = xdp_lock_acquire(); if (lock_fd < 0) { ret = lock_fd; goto out; } /* Note, if other global variables are added before the refcnt, * this changes map's value type, not number of elements, * so additional offset must be applied to value_data, * when reading refcount, but map key always stays zero */ ret = bpf_map_lookup_elem(refcnt_map_fd, &key, value_data); if (ret) goto unlock; /* If refcount is 0, program is awaiting detach and can't be used */ if (*value_data) { *value_data += delta; ret = bpf_map_update_elem(refcnt_map_fd, &key, value_data, 0); if (ret) goto unlock; } ret = *value_data; unlock: xdp_lock_release(lock_fd); out: free(value_data); return ret; } static int xsk_incr_prog_refcnt(int refcnt_map_fd) { return xsk_update_prog_refcnt(refcnt_map_fd, 1); } static int xsk_decr_prog_refcnt(int refcnt_map_fd) { return xsk_update_prog_refcnt(refcnt_map_fd, -1); } static int __xsk_setup_xdp_prog(struct xsk_socket *xsk, int *xsks_map_fd) { const char *fallback_prog = "xsk_def_xdp_prog_5.3.o"; const char *default_prog = "xsk_def_xdp_prog.o"; struct xsk_ctx *ctx = xsk->ctx; const char *file_name = NULL; bool attached = false; int err; ctx->xdp_prog = xsk_lookup_program(ctx->ifindex); if (IS_ERR(ctx->xdp_prog)) return PTR_ERR(ctx->xdp_prog); ctx->refcnt_map_fd = -ENOENT; if (ctx->xdp_prog) { int refcnt; ctx->refcnt_map_fd = xsk_lookup_refcnt_map(xdp_program__fd(ctx->xdp_prog), NULL); if (ctx->refcnt_map_fd == -ENOENT) goto map_lookup; if (ctx->refcnt_map_fd < 0) { err = ctx->refcnt_map_fd; goto err_prog_load; } refcnt = xsk_incr_prog_refcnt(ctx->refcnt_map_fd); if (refcnt < 0) { err = refcnt; pr_debug("Error occurred when incrementing xsk XDP prog refcount: %s\n", strerror(-err)); goto err_prog_load; } if (!refcnt) { pr_warn("Current program is being detached, falling back on creating a new program\n"); close(ctx->refcnt_map_fd); ctx->refcnt_map_fd = -ENOENT; xdp_program__close(ctx->xdp_prog); ctx->xdp_prog = NULL; } } if (!ctx->xdp_prog) { file_name = xsk_check_redirect_flags() ? default_prog : fallback_prog; ctx->xdp_prog = xdp_program__find_file(file_name, NULL, NULL); if (IS_ERR(ctx->xdp_prog)) return PTR_ERR(ctx->xdp_prog); err = xsk_size_map(ctx->xdp_prog, ctx->ifname); if (err) goto err_prog_load; err = xdp_program__attach(ctx->xdp_prog, ctx->ifindex, xsk_convert_xdp_flags(xsk->config.xdp_flags), 0); if (err) goto err_prog_load; attached = true; } if (ctx->refcnt_map_fd < 0) { ctx->refcnt_map_fd = xsk_lookup_refcnt_map(xdp_program__fd(ctx->xdp_prog), file_name); if (ctx->refcnt_map_fd < 0 && ctx->refcnt_map_fd != -ENOENT) { err = ctx->refcnt_map_fd; goto err_prog_load; } } map_lookup: ctx->xsks_map_fd = xsk_lookup_bpf_map(xdp_program__fd(ctx->xdp_prog)); if (ctx->xsks_map_fd < 0) { err = ctx->xsks_map_fd; goto err_lookup; } if (xsk->rx) { err = bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id, &xsk->fd, 0); if (err) goto err_lookup; } if (xsks_map_fd) *xsks_map_fd = ctx->xsks_map_fd; return 0; err_lookup: if (attached) xdp_program__detach(ctx->xdp_prog, ctx->ifindex, xsk_convert_xdp_flags(xsk->config.xdp_flags), 0); err_prog_load: if (ctx->refcnt_map_fd >= 0) close(ctx->refcnt_map_fd); ctx->refcnt_map_fd = -ENOENT; xdp_program__close(ctx->xdp_prog); ctx->xdp_prog = NULL; return err; } static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, __u64 netns_cookie, int ifindex, __u32 queue_id) { struct xsk_ctx *ctx; if (list_empty(&umem->ctx_list)) return NULL; list_for_each_entry(ctx, &umem->ctx_list, list) { if (ctx->netns_cookie == netns_cookie && ctx->ifindex == ifindex && ctx->queue_id == queue_id) { ctx->refcount++; return ctx; } } return NULL; } static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap) { struct xsk_umem *umem = ctx->umem; struct xdp_mmap_offsets off; int err; if (--ctx->refcount) return; if (!unmap) goto out_free; err = xsk_get_mmap_offsets(umem->fd, &off); if (err) goto out_free; munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size * sizeof(__u64)); munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size * sizeof(__u64)); out_free: list_del(&ctx->list); free(ctx); } static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk, struct xsk_umem *umem, __u64 netns_cookie, int ifindex, const char *ifname, __u32 queue_id, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp) { struct xsk_ctx *ctx; int err; ctx = calloc(1, sizeof(*ctx)); if (!ctx) return NULL; if (!umem->fill_save) { err = xsk_create_umem_rings(umem, xsk->fd, fill, comp); if (err) { free(ctx); return NULL; } } else if (umem->fill_save != fill || umem->comp_save != comp) { /* Copy over rings to new structs. */ memcpy(fill, umem->fill_save, sizeof(*fill)); memcpy(comp, umem->comp_save, sizeof(*comp)); } ctx->netns_cookie = netns_cookie; ctx->ifindex = ifindex; ctx->refcount = 1; ctx->umem = umem; ctx->queue_id = queue_id; memcpy(ctx->ifname, ifname, IFNAMSIZ - 1); ctx->ifname[IFNAMSIZ - 1] = '\0'; ctx->fill = fill; ctx->comp = comp; list_add(&ctx->list, &umem->ctx_list); return ctx; } static void xsk_destroy_xsk_struct(struct xsk_socket *xsk) { xdp_program__close(xsk->ctx->xdp_prog); free(xsk->ctx); free(xsk); } int xsk_socket__update_xskmap(struct xsk_socket *xsk, int fd) { struct xsk_ctx *ctx = xsk->ctx; ctx->xsks_map_fd = fd; return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id, &xsk->fd, 0); } int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd) { struct xsk_socket *xsk; int res; xsk = calloc(1, sizeof(*xsk)); if (!xsk) return -ENOMEM; res = xsk_init_xsk_struct(xsk, ifindex); if (res) { free(xsk); return -EINVAL; } res = __xsk_setup_xdp_prog(xsk, xsks_map_fd); xsk_destroy_xsk_struct(xsk); return res; } struct xsk_socket *xsk_socket__create_opts(const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_socket_opts *opts) { bool rx_setup_done = false, tx_setup_done = false; void *rx_map = NULL, *tx_map = NULL; struct sockaddr_xdp sxdp = {}; struct xdp_mmap_offsets off; struct xsk_ring_prod *fill; struct xsk_ring_cons *comp; struct xsk_ring_cons *rx; struct xsk_ring_prod *tx; struct xsk_socket *xsk; struct xsk_ctx *ctx; int err, ifindex; __u64 netns_cookie; socklen_t optlen; bool unmap; if (!OPTS_VALID(opts, xsk_socket_opts)) { err = -EINVAL; goto err; } rx = OPTS_GET(opts, rx, NULL); tx = OPTS_GET(opts, tx, NULL); fill = OPTS_GET(opts, fill, NULL); comp = OPTS_GET(opts, comp, NULL); if (!umem || !(rx || tx) || (fill == NULL) ^ (comp == NULL)) { err = -EFAULT; goto err; } if (!fill && !comp) { fill = umem->fill_save; comp = umem->comp_save; } xsk = calloc(1, sizeof(*xsk)); if (!xsk) { err = -ENOMEM; goto err; } err = xsk_set_xdp_socket_config(&xsk->config, opts); if (err) goto out_xsk_alloc; ifindex = if_nametoindex(ifname); if (!ifindex) { err = -errno; goto out_xsk_alloc; } if (umem->refcount++ > 0) { xsk->fd = socket(AF_XDP, SOCK_RAW, 0); if (xsk->fd < 0) { err = -errno; goto out_xsk_alloc; } } else { xsk->fd = umem->fd; rx_setup_done = umem->rx_ring_setup_done; tx_setup_done = umem->tx_ring_setup_done; } optlen = sizeof(netns_cookie); err = getsockopt(xsk->fd, SOL_SOCKET, SO_NETNS_COOKIE, &netns_cookie, &optlen); if (err) { if (errno != ENOPROTOOPT) { err = -errno; goto out_socket; } netns_cookie = INIT_NS; } ctx = xsk_get_ctx(umem, netns_cookie, ifindex, queue_id); if (!ctx) { if (!fill || !comp) { err = -EFAULT; goto out_socket; } ctx = xsk_create_ctx(xsk, umem, netns_cookie, ifindex, ifname, queue_id, fill, comp); if (!ctx) { err = -ENOMEM; goto out_socket; } } xsk->ctx = ctx; if (rx && !rx_setup_done) { err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, &xsk->config.rx_size, sizeof(xsk->config.rx_size)); if (err) { err = -errno; goto out_put_ctx; } if (xsk->fd == umem->fd) umem->rx_ring_setup_done = true; } if (tx && !tx_setup_done) { err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING, &xsk->config.tx_size, sizeof(xsk->config.tx_size)); if (err) { err = -errno; goto out_put_ctx; } if (xsk->fd == umem->fd) umem->tx_ring_setup_done = true; } err = xsk_get_mmap_offsets(xsk->fd, &off); if (err) { err = -errno; goto out_put_ctx; } if (rx) { rx_map = mmap(NULL, off.rx.desc + xsk->config.rx_size * sizeof(struct xdp_desc), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, xsk->fd, XDP_PGOFF_RX_RING); if (rx_map == MAP_FAILED) { err = -errno; goto out_put_ctx; } rx->mask = xsk->config.rx_size - 1; rx->size = xsk->config.rx_size; rx->producer = rx_map + off.rx.producer; rx->consumer = rx_map + off.rx.consumer; rx->flags = rx_map + off.rx.flags; rx->ring = rx_map + off.rx.desc; rx->cached_prod = *rx->producer; rx->cached_cons = *rx->consumer; } xsk->rx = rx; if (tx) { tx_map = mmap(NULL, off.tx.desc + xsk->config.tx_size * sizeof(struct xdp_desc), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, xsk->fd, XDP_PGOFF_TX_RING); if (tx_map == MAP_FAILED) { err = -errno; goto out_mmap_rx; } tx->mask = xsk->config.tx_size - 1; tx->size = xsk->config.tx_size; tx->producer = tx_map + off.tx.producer; tx->consumer = tx_map + off.tx.consumer; tx->flags = tx_map + off.tx.flags; tx->ring = tx_map + off.tx.desc; tx->cached_prod = *tx->producer; /* cached_cons is r->size bigger than the real consumer pointer * See xsk_prod_nb_free */ tx->cached_cons = *tx->consumer + xsk->config.tx_size; } xsk->tx = tx; sxdp.sxdp_family = PF_XDP; sxdp.sxdp_ifindex = ctx->ifindex; sxdp.sxdp_queue_id = ctx->queue_id; if (umem->refcount > 1) { sxdp.sxdp_flags |= XDP_SHARED_UMEM; sxdp.sxdp_shared_umem_fd = umem->fd; } else { sxdp.sxdp_flags = xsk->config.bind_flags; } err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp)); if (err) { err = -errno; goto out_mmap_tx; } if (!(xsk->config.libxdp_flags & XSK_LIBXDP_FLAGS__INHIBIT_PROG_LOAD)) { err = __xsk_setup_xdp_prog(xsk, NULL); if (err) goto out_mmap_tx; } umem->fill_save = NULL; umem->comp_save = NULL; return xsk; out_mmap_tx: if (tx) munmap(tx_map, off.tx.desc + xsk->config.tx_size * sizeof(struct xdp_desc)); out_mmap_rx: if (rx) munmap(rx_map, off.rx.desc + xsk->config.rx_size * sizeof(struct xdp_desc)); out_put_ctx: unmap = umem->fill_save != fill; xsk_put_ctx(ctx, unmap); out_socket: if (--umem->refcount) close(xsk->fd); out_xsk_alloc: free(xsk); err: return libxdp_err_ptr(err, true); } int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_socket_config *usr_config) { struct xsk_socket *xsk; if (!xsk_ptr) return -EFAULT; DECLARE_LIBXDP_OPTS(xsk_socket_opts, opts, .rx = rx, .tx = tx, .fill = fill, .comp = comp, ); if (usr_config) { opts.rx_size = usr_config->rx_size; opts.tx_size= usr_config->tx_size; opts.libxdp_flags = usr_config->libxdp_flags; opts.xdp_flags = usr_config->xdp_flags; opts.bind_flags = usr_config->bind_flags; } xsk = xsk_socket__create_opts(ifname, queue_id, umem, &opts); if (!xsk) return -errno; *xsk_ptr = xsk; return 0; } int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *usr_config) { if (!umem) return -EFAULT; return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem, rx, tx, umem->fill_save, umem->comp_save, usr_config); } int xsk_umem__delete(struct xsk_umem *umem) { struct xdp_mmap_offsets off; int err; if (!umem) return 0; if (umem->refcount) return -EBUSY; err = xsk_get_mmap_offsets(umem->fd, &off); if (!err && umem->fill_save && umem->comp_save) { munmap(umem->fill_save->ring - off.fr.desc, off.fr.desc + umem->config.fill_size * sizeof(__u64)); munmap(umem->comp_save->ring - off.cr.desc, off.cr.desc + umem->config.comp_size * sizeof(__u64)); } close(umem->fd); free(umem); return 0; } static void xsk_release_xdp_prog(struct xsk_socket *xsk) { struct xsk_ctx *ctx = xsk->ctx; int value; if (xsk->ctx->refcnt_map_fd < 0) goto out; value = xsk_decr_prog_refcnt(ctx->refcnt_map_fd); if (value < 0) pr_warn("Error occurred when decrementing xsk XDP prog refcount: %s, please detach program yourself\n", strerror(-value)); if (value) goto out; xdp_program__detach(ctx->xdp_prog, ctx->ifindex, xsk_convert_xdp_flags(xsk->config.xdp_flags), 0); out: xdp_program__close(ctx->xdp_prog); } void xsk_socket__delete(struct xsk_socket *xsk) { size_t desc_sz = sizeof(struct xdp_desc); struct xdp_mmap_offsets off; struct xsk_umem *umem; struct xsk_ctx *ctx; int err; if (!xsk) return; ctx = xsk->ctx; umem = ctx->umem; if (ctx->xdp_prog) { xsk_delete_map_entry(ctx->xsks_map_fd, ctx->queue_id); xsk_release_xdp_prog(xsk); } err = xsk_get_mmap_offsets(xsk->fd, &off); if (!err) { if (xsk->rx) { munmap(xsk->rx->ring - off.rx.desc, off.rx.desc + xsk->config.rx_size * desc_sz); } if (xsk->tx) { munmap(xsk->tx->ring - off.tx.desc, off.tx.desc + xsk->config.tx_size * desc_sz); } } xsk_put_ctx(ctx, true); umem->refcount--; /* Do not close an fd that also has an associated umem connected * to it. */ if (xsk->fd != umem->fd) close(xsk->fd); free(xsk); } xdp-tools-1.5.4/lib/libxdp/xsk_def_xdp_prog.h0000644000175100001660000000036115003640462020543 0ustar runnerdocker// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) #ifndef __LIBXDP_XSK_DEF_XDP_PROG_H #define __LIBXDP_XSK_DEF_XDP_PROG_H #define XDP_METADATA_SECTION "xdp_metadata" #define XSK_PROG_VERSION 1 #endif /* __LIBXDP_XSK_DEF_XDP_PROG_H */ xdp-tools-1.5.4/lib/libxdp/xdp-dispatcher.c.in0000644000175100001660000000460415003640462020541 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ divert(-1) #forloop definition taken from example in the M4 manual define(`forloop', `pushdef(`$1', `$2')_forloop($@)popdef(`$1')') define(`_forloop',`$4`'ifelse($1, decr(`$3'), `', `define(`$1', incr($1))$0($@)')') define(`NUM_PROGS',ifdef(`MAX_DISPATCHER_ACTIONS', MAX_DISPATCHER_ACTIONS, `10')) divert(0)dnl #include #include #include #include /* While 'const volatile' sounds a little like an oxymoron, there's reason * behind the madness: * * - const places the data in rodata, where libbpf will mark it as read-only and * frozen on program load, letting the kernel do dead code elimination based * on the values. * * - volatile prevents the compiler from optimising away the checks based on the * compile-time value of the variables, which is important since we will be * changing the values before loading the program into the kernel. */ static volatile const struct xdp_dispatcher_config conf = {}; /* The volatile return value prevents the compiler from assuming it knows the * return value and optimising based on that. */ forloop(`i', `0', NUM_PROGS, `__attribute__ ((noinline)) int format(`prog%d', i)(struct xdp_md *ctx) { volatile int ret = XDP_DISPATCHER_RETVAL; if (!ctx) return XDP_ABORTED; return ret; } ') __attribute__ ((noinline)) int compat_test(struct xdp_md *ctx) { volatile int ret = XDP_DISPATCHER_RETVAL; if (!ctx) return XDP_ABORTED; return ret; } SEC("xdp") int xdp_dispatcher(struct xdp_md *ctx) { __u8 num_progs_enabled = conf.num_progs_enabled; int ret; forloop(`i', `0', NUM_PROGS, ` if (num_progs_enabled < incr(i)) goto out; ret = format(`prog%d', i)(ctx); if (!((1U << ret) & conf.chain_call_actions[i])) return ret; ') /* keep a reference to the compat_test() function so we can use it * as an freplace target in xdp_multiprog__check_compat() in libxdp */ if (num_progs_enabled < incr(NUM_PROGS)) goto out; ret = compat_test(ctx); out: return XDP_PASS; } SEC("xdp") int xdp_pass(struct xdp_md *ctx) { return XDP_PASS; } char _license[] SEC("license") = "GPL"; __uint(dispatcher_version, XDP_DISPATCHER_VERSION) SEC(XDP_METADATA_SECTION); xdp-tools-1.5.4/lib/libxdp/libxdp.map0000644000175100001660000000371115003640462017030 0ustar runnerdockerLIBXDP_1.0.0 { global: libxdp_get_error; libxdp_set_print; libxdp_strerror; xdp_multiprog__attach_mode; xdp_multiprog__close; xdp_multiprog__detach; xdp_multiprog__get_from_ifindex; xdp_multiprog__is_legacy; xdp_multiprog__next_prog; xdp_multiprog__main_prog; xdp_multiprog__hw_prog; xdp_program__attach; xdp_program__attach_multi; xdp_program__bpf_obj; xdp_program__btf; xdp_program__chain_call_enabled; xdp_program__close; xdp_program__detach; xdp_program__detach_multi; xdp_program__find_file; xdp_program__from_bpf_obj; xdp_program__from_fd; xdp_program__from_id; xdp_program__from_pin; xdp_program__fd; xdp_program__id; xdp_program__is_attached; xdp_program__name; xdp_program__open_file; xdp_program__pin; xdp_program__print_chain_call_actions; xdp_program__run_prio; xdp_program__set_chain_call_enabled; xdp_program__set_run_prio; xdp_program__tag; }; LIBXDP_1.2.0 { libxdp_clean_references; xdp_multiprog__program_count; xsk_setup_xdp_prog; xsk_socket__create; xsk_socket__create_shared; xsk_socket__delete; xsk_socket__fd; xsk_socket__update_xskmap; xsk_umem__create; xsk_umem__delete; xsk_umem__fd; xsk_cons_nb_avail; xsk_prod_nb_free; xsk_ring_cons__cancel; xsk_ring_cons__comp_addr; xsk_ring_cons__peek; xsk_ring_cons__release; xsk_ring_cons__rx_desc; xsk_ring_prod__fill_addr; xsk_ring_prod__needs_wakeup; xsk_ring_prod__reserve; xsk_ring_prod__submit; xsk_ring_prod__tx_desc; xsk_umem__add_offset_to_addr; xsk_umem__extract_addr; xsk_umem__extract_offset; xsk_umem__get_data; } LIBXDP_1.0.0; LIBXDP_1.3.0 { xdp_multiprog__xdp_frags_support; xdp_program__clone; xdp_program__create; xdp_program__set_xdp_frags_support; xdp_program__test_run; xdp_program__xdp_frags_support; } LIBXDP_1.2.0; LIBXDP_1.4.0 { xsk_umem__create_with_fd; } LIBXDP_1.3.0; LIBXDP_1.5.0 { xsk_umem__create_opts; xsk_socket__create_opts; } LIBXDP_1.4.0; xdp-tools-1.5.4/lib/libxdp/xsk_def_xdp_prog_5.3.c0000644000175100001660000000222215003640462021121 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include #include #include #include "xsk_def_xdp_prog.h" #define DEFAULT_QUEUE_IDS 64 struct { __uint(type, BPF_MAP_TYPE_XSKMAP); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(int)); __uint(max_entries, DEFAULT_QUEUE_IDS); } xsks_map SEC(".maps"); struct { __uint(priority, 20); __uint(XDP_PASS, 1); } XDP_RUN_CONFIG(xsk_def_prog); /* Program refcount, in order to work properly, * must be declared before any other global variables * and initialized with '1'. */ volatile int refcnt = 1; /* This is the program for 5.3 kernels and older. */ SEC("xdp") int xsk_def_prog(struct xdp_md *ctx) { int index = ctx->rx_queue_index; /* Make sure refcount is referenced by the program */ if (!refcnt) return XDP_PASS; /* A set entry here means that the corresponding queue_id * has an active AF_XDP socket bound to it. */ if (bpf_map_lookup_elem(&xsks_map, &index)) return bpf_redirect_map(&xsks_map, index, 0); return XDP_PASS; } char _license[] SEC("license") = "GPL"; __uint(xsk_prog_version, XSK_PROG_VERSION) SEC(XDP_METADATA_SECTION); xdp-tools-1.5.4/lib/libxdp/libxdp.pc.template0000644000175100001660000000036715003640462020473 0ustar runnerdocker# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) prefix=@PREFIX@ libdir=@LIBDIR@ includedir=${prefix}/include Name: libxdp Description: XDP library Version: @VERSION@ Libs: -L${libdir} -lxdp Requires.private: libbpf Cflags: -I${includedir} xdp-tools-1.5.4/lib/libxdp/.gitignore0000644000175100001660000000005015003640462017030 0ustar runnerdocker*.so.* *.a *.pc sharedobjs/ staticobjs/ xdp-tools-1.5.4/lib/libxdp/protocol.org0000644000175100001660000006050115003640462017421 0ustar runnerdocker#+OPTIONS: ^:nil * Protocol for atomic loading of multi-prog dispatchers With the support for the =freplace= program type, it is possible to load multiple XDP programs on a single interface by building a /dispatcher/ program which will run on the interface, and which will call the component XDP programs as functions using the =freplace= type. For this to work in an interoperable way, applications need to agree on how to attach their XDP programs using this mechanism. This document outlines the protocol implemented by =libxdp=, serving as both documentation and a blueprint for anyone else who wants to implement the same protocol and interoperate. ** Generating a dispatcher The dispatcher is simply an XDP program that will call each of a number of stub functions in turn, and depending on their return code either continue on to the next function or return immediately. These stub functions are then replaced at load time with the user XDP programs, using the =freplace= functionality. *** Dispatcher format The dispatcher XDP program contains the main function containing the dispatcher logic, 10 stub functions that can be replaced by component BPF programs, and a configuration structure that is used by the dispatcher logic. In =libxdp=, this dispatcher is generated by [[https://github.com/xdp-project/xdp-tools/blob/main/lib/libxdp/xdp-dispatcher.c.in][an M4 macro file]] which expands to the following: #+begin_src C #define XDP_METADATA_SECTION "xdp_metadata" #define XDP_DISPATCHER_VERSION 2 #define XDP_DISPATCHER_MAGIC 236 #define XDP_DISPATCHER_RETVAL 31 #define MAX_DISPATCHER_ACTIONS 10 struct xdp_dispatcher_config { __u8 magic; /* Set to XDP_DISPATCHER_MAGIC */ __u8 dispatcher_version; /* Set to XDP_DISPATCHER_VERSION */ __u8 num_progs_enabled; /* Number of active program slots */ __u8 is_xdp_frags; /* Whether this dispatcher is loaded with XDP frags support */ __u32 chain_call_actions[MAX_DISPATCHER_ACTIONS]; __u32 run_prios[MAX_DISPATCHER_ACTIONS]; __u32 program_flags[MAX_DISPATCHER_ACTIONS]; }; /* While 'const volatile' sounds a little like an oxymoron, there's reason * behind the madness: * * - const places the data in rodata, where libbpf will mark it as read-only and * frozen on program load, letting the kernel do dead code elimination based * on the values. * * - volatile prevents the compiler from optimising away the checks based on the * compile-time value of the variables, which is important since we will be * changing the values before loading the program into the kernel. */ static volatile const struct xdp_dispatcher_config conf = {}; /* The volatile return value prevents the compiler from assuming it knows the * return value and optimising based on that. */ __attribute__ ((noinline)) int prog0(struct xdp_md *ctx) { volatile int ret = XDP_DISPATCHER_RETVAL; if (!ctx) return XDP_ABORTED; return ret; } /* the above is repeated as prog1...prog9 */ SEC("xdp") int xdp_dispatcher(struct xdp_md *ctx) { __u8 num_progs_enabled = conf.num_progs_enabled; int ret; if (num_progs_enabled < 1) goto out; ret = prog0(ctx); if (!((1U << ret) & conf.chain_call_actions[0])) return ret; /* the above is repeated for prog1...prog9 */ out: return XDP_PASS; } char _license[] SEC("license") = "GPL"; __uint(dispatcher_version, XDP_DISPATCHER_VERSION) SEC(XDP_METADATA_SECTION); #+end_src The dispatcher program is pre-compiled and distributed with =libxdp=. Because the configuration struct is marked as =const= in the source file, it will be put into the =rodata=, which libbpf will turn into a read-only (frozen) map on load. This allows the kernel verifier to perform dead code elimination based on the values in the map. This is also the reason for the =num_progs_enabled= member of the config struct: together with the checks in the main dispatcher function the verifier will effectively remove all the stub function calls not being used, without having to rely on dynamic compilation. When generating a dispatcher, this BPF object file is opened and the configuration struct is populated before the object is loaded. As a forward compatibility measure, =libxdp= will also check for the presence of the =dispatcher_version= field in the =xdp_metadata= section (encoded like the program metadata described in "Processing program metadata" below), and if it doesn't match the expected version (currently version 2), will abort any action. *** Populating the dispatcher configuration map On loading, the dispatcher configuration map is populated as follows: - The =magic= field is set to the =XDP_DISPATCHER_MAGIC= value (236). This field is here to make it possible to check if a program is a dispatcher without looking at the program BTF in the future. - The =dispatcher_version= field is set to the current dispatcher version (2). This is redundant with the BTF-encoded version in the metadata field, but must be checked so that the BTF metadata version can be removed in the future. See the section on old dispatcher versions below. - The =num_progs_enabled= member is simply set to the number of active programs that will be attached to this dispatcher. - The =is_xdp_frags= variable is set to 1 if dispatcher is loaded with XDP frags support (see section below), or 0 otherwise. The two other fields contain per-component program metadata, which is read from the component programs as explained in the "Processing program metadata" section below. - The =chain_call_actions= array is populated with a bitmap signifying which XDP actions (return codes) of each component program should be interpreted as a signal to continue execution of the next XDP program. For instance, a packet filtering program might designate that an =XDP_PASS= action should make execution continue, while other return codes should immediately end the call chain and return. The special =XDP_DISPATCHER_RETVAL= (which is set to 31 corresponding to the topmost bit in the bitmap) is always included in each programs' =chain_call_actions=; this value is returned by the stub functions, which ensures that should a component program become detached, processing will always continue past the stub function. - The =run_prios= array contains the effective run priority of each component program when it was installed. This is also read as program metadata, but because it can be overridden at load time, the effective value is stored in the configuration array so it can be carried forward when the dispatcher is replaced. Component programs are expected to be sorted in order of their run priority (as explained below in "Loading and attaching component programs"). - The =program_flags= is used to store the flags that an XDP program was loaded with. This is populated with the value of the =BPF_F_XDP_HAS_FRAGS= flag if the component program in this slot had that flag set (see the section on XDP frags support below), and is 0 otherwise. **** Processing program metadata As explained above, each component program must specify one or more chain call actions and a run priority on attach. When loading a user program, =libxdp= will attempt to read this metadata from the object file as explained in the following; if no values are found in the object file, a default run priority of 50 will be applied, and =XDP_PASS= will be the only chain call action. The metadata is read from the object file by looking for BTF-encoded metadata in the =.xdp_run_config= object section, encoded similar to the BTF-defined maps used by libbpf (in the =.maps= section). Here, =libxdp= will look for a struct definition with the XDP program function name prefixed by an underscore (e.g., if the main XDP function is called =xdp_main=, libxdp will look for a struct definition called =_xdp_main=). In this struct, a member =priority= encodes the run priority, each XDP action can be set as a chain call action by setting a struct member with the action name. The =xdp_helpers.h= header file included with XDP exposes helper macros that can be used with the existing helpers in =bpf_helpers.h= (from libbpf), so a full run configuration metadata section can be defined as follows: #+begin_src C #include #include struct { __uint(priority, 10); __uint(XDP_PASS, 1); __uint(XDP_DROP, 1); } XDP_RUN_CONFIG(my_xdp_func); #+end_src This example sets priority 10 with chain call actions =XDP_PASS= and =XDP_DROP= for the XDP program starting at =my_xdp_func()=. This turns into the following BTF information (as shown by =bpftool btf dump=): #+begin_src [12] STRUCT '(anon)' size=24 vlen=3 'priority' type_id=13 bits_offset=0 'XDP_PASS' type_id=15 bits_offset=64 'XDP_DROP' type_id=15 bits_offset=128 [13] PTR '(anon)' type_id=14 [14] ARRAY '(anon)' type_id=6 index_type_id=10 nr_elems=10 [15] PTR '(anon)' type_id=16 [16] ARRAY '(anon)' type_id=6 index_type_id=10 nr_elems=1 [17] VAR '_my_xdp_func' type_id=12, linkage=global-alloc [18] DATASEC '.xdp_run_config' size=0 vlen=1 type_id=17 offset=0 size=24 #+end_src The parser will look for the =.xdp_run_config= DATASEC, then follow the types recursively, extracting the field values from the =nr_elems= in the anonymous arrays in type IDs 14 and 16. While =libxdp= will automatically load any metadata specified as above in the program BTF, the application using =libxdp= can override these values at runtime. These overridden values will be the ones used when determining program order, and will be preserved in the dispatcher configuration map for subsequent operations. *** Old versions of the XDP dispatcher This document currently describes version 2 of the dispatcher and protocol. This differs from version 1 in the following respects: - The dispatcher configuration map has gained the =magic= and =dispatcher_version= fields for identifying the dispatcher and its version.. - The protocol now supports propagating the value of the =BPF_F_XDP_HAS_FRAGS= field for supporting XDP frags programs for higher MTU. The dispatcher configuration map has gained the =is_xdp_frags= and =program_flags= fields for use with this feature. The protocol for propagating the frags field is described below, and an implementation of this protocol that recognises version 2 of the dispatcher MUST implement this protocol. Older versions of libxdp will check the dispatcher version field of any dispatcher loaded in the kernel, and refuse to operate on a dispatcher with a higher version than the library version implements. This means that if a newer dispatcher is loaded, old versions of the library will be locked out of modifying that dispatcher. This is by design: old library versions don't recognise the semantics of new features added in subsequent versions, and so would introduce bugs if it attempted to operate on newer versions. Newer versions of libxdp will, however, recognise older dispatcher versions. If a newer version of libxdp loads a new program and finds an old dispatcher version already loaded on an interface, it will display the programs attached to it, but will refuse to replace it with a newer version so as not to lock out the program that loaded the program(s) already attached. Manually unloading the loaded programs will be required to load a new dispatcher version on the interface. *** Loading and attaching component programs When loading one or more XDP programs onto an interface (assuming no existing program is found on the interface; for adding programs, see below), =libxdp= first prepares a dispatcher program with the right number of slots, by populating the configuration struct as described above. Then, this dispatcher program is loaded into the kernel, with the =BPF_F_XDP_HAS_FRAGS= flag set if all component programs have that flag set (see the section on supporting XDP frags below). Having loaded the dispatcher program, =libxdp= then loads each of the component programs. To do this, first the list of component programs is sorted by their run priority, forming the final run sequence. Should several programs have the same run priority, ties are broken in the following arbitrary, but deterministic, order (see =cmp_xdp_programs()= [[https://github.com/xdp-project/xdp-tools/blob/main/lib/libxdp/libxdp.c][in libxdp.c]]): - By XDP function name (=bpf_program__name()= from libbpf) - By sorting already-loaded programs before not-yet-loaded ones - By unloaded programs by program size - By loaded program bpf tag value (using =memcmp()=) - By load time Before loading, each component program type is reset to =BPF_PROG_TYPE_EXT= with an expected attach type of 0, and the =BPF_F_XDP_HAS_FRAGS= is unset (see the section on supporting frags below). Then, the attachment target is set to the dispatcher file descriptor and the BTF ID of the stub function to replace (i.e., the first component program has =prog0()= as its target, and so on). Then the program is loaded, at which point the kernel will verify the component program's compatibility with the attach point. Having loaded the component program, it is attached to the dispatcher by way of =bpf_link_create()=, specifying the same target file description and BTF ID used when loading the program. This will return a link fd, which will be pinned to prevent the attachment to unravel when the fd is closed (see "Locking and pinning" below). *** Locking and pinning To prevent the kernel from detaching any =freplace= program when its last file description is closed, the programs must be pinned in =bpffs=. This is done in the =xdp= subdirectory of =bpffs=, which by default means =/sys/fs/bpf/xdp=. If the =LIBXDP_BPFFS= environment variable is set, this will override the location of the top-level =bpffs=, and the =xdp= subdirectory will be created beneath this path. The pathnames generated for pinning are the following: - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID - dispatcher program for IFINDEX with BPF program ID DID - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog0-prog - component program 0, program reference - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog0-link - component program 0, bpf_link reference - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog1-prog - component program 1, program reference - /sys/fs/bpf/xdp/dispatch-IFINDEX-DID/prog1-link - component program 1, bpf_link reference - etc, up to ten component programs This means that several pin operations have to be performed for each dispatcher program. Semantically, these are all atomic, so to make sure every consumer of the hierarchy of pinned files gets a consistent view, locking is needed. This is implemented by opening the parent directory =/sys/fs/bpf/xdp= with the =O_DIRECTORY= flag, and obtaining a lock on the resulting file descriptor using =flock(lock_fd, LOCK_EX)=. When creating a new dispatcher program, it will first be fully populated, with all component programs attached. Then, the programs will be linked in =bpffs= as specified above, and once this succeeds, the program will be attached to the interface. If attaching the program fails, the programs will be unpinned again, and the error returned to the caller. This order ensures atomic attachment to the interface, without any risk that component programs will be automatically detached due to a badly timed application crash. When loading the initial dispatcher program, the =XDP_FLAGS_UPDATE_IF_NOEXIST= flag is set to prevent accidentally overriding any concurrent modifications. If this fails, the whole operation starts over, turning the load into a modification as described below. *** Supporting XDP programs with frags support (BPF_F_XDP_HAS_FRAGS flag) Linux kernel 5.18 added support for a new API that allows XDP programs to access packet data that spans more than a single page, allowing XDP programs to be loaded on interfaces with bigger MTUs. Such packets will not have all their packet data accessible by the traditional "direct packet access"; instead, only the first fragment will be available this way, and the rest of the packet data has to be accessed via the new =bpf_xdp_load_bytes()= helper. Existing XDP programs are written with the assumption that they can see the whole packet data using direct packet access, which means they can subtly malfunction if some of the packet data is suddenly invisible (for instance, counting packet lengths is no longer accurate). Whether a given XDP program supports the frags API or not is a semantic issue, and it's not possible for the kernel to auto-detect this. For this reason, programs have to opt in to XDP frags support at load time, by setting the =BPF_F_XDP_HAS_FRAGS= flag as they are loaded into the kernel. Programs that are not loaded with this flag will be rejected from attaching to network devices that use packet fragment (i.e., those with a large MTU). This has implications for the XDP dispatcher, as its purpose is for multiple programs to be loaded at the same time. Since the =BPF_F_XDP_HAS_FRAGS= cannot be set for individual component programs, it has to be set for the dispatcher as a whole. However, as described above, programs can subtly malfunction if they are exposed to packets with fragments without being ready to do so. This means that it's only safe to set the =BPF_F_XDP_HAS_FRAGS= on the dispatcher itself if *all* component programs have the flag set. To properly propagate the flags even when adding new programs to an existing dispatcher, the dispatcher itself needs to keep track of which of its component programs had the =BPF_F_XDP_HAS_FRAGS= flag set when they were added. The dispatcher configuration map users the =program_flags= array for this: for each component program, this field is set to the value of the =BPF_F_XDP_HAS_FRAGS= flag if that component program has the flag set, and to 0 otherwise. An additional field, =is_xdp_frags=, is set if the dispatcher itself is loaded with the frags field set (which may not be the case if the kernel doesn't support the flag). When generating a dispatcher for a set of programs, libxdp simply tracks if all component programs support the =BPF_F_XDP_HAS_FRAGS=, and if they do, the dispatcher is loaded with this flag set. If any program attached to the dispatcher does not support the flag, the dispatcher is loaded without this flag set (and the =is_xdp_frags= field in the dispatcher configuration is set accordingly). If libxdp determines that the running kernel does not support the =BPF_F_XDP_HAS_FRAGS=, the dispatcher is loaded without the flag regardless of the value of the component programs. When adding a program to an existing dispatcher, this may result in a "downgrade", i.e., loading a new dispatcher without the frags flag to replace an existing dispatcher that does have the flag set. This will result in the replacement dispatcher being rejected by the kernel at attach time, but only if the interface being attached to actually requires the frags flag (i.e., if it has a large MTU). If the attachment is rejected, the old dispatcher will stay in place, leading to no loss of functionality. ** Adding or removing programs from an existing dispatcher The sections above explain how to generate a dispatcher and attach it to an interface, assuming no existing program is attached. When one or more programs is already attached, a couple of extra steps are required to ensure that the switch is made atomically. Briefly, changing the programs attached to an interface entails the following steps: - Reading the existing dispatcher program and obtaining references to the component programs. - Generating a new dispatcher containing the new set of programs (adding or removing the programs needed). - Atomically swapping out the XDP program attachment on the interface so the new dispatcher takes over from the old one. - Unpinning and dismantling the old dispatcher. These operations are each described in turn in the following sections. *** Reading list of existing programs from the kernel The first step is to obtain the ID of the currently loaded XDP program using =bpf_get_link_xdp_info()=. A file descriptor to the dispatcher is obtained using =bpf_prog_get_fd_by_id()=, and the BTF information attached to the program is obtained from the kernel. This is checked for the presence of the dispatcher version field (as explained above), and the operation is aborted if this is not present, or doesn't match what the library expects. Having thus established that the program loaded on the interface is indeed a compatible dispatcher, the map ID of the map containing the configuration struct is obtained from the kernel, and the configuration data is loaded from the map (after checking that the map value size matches the expected configuration struct). Then, the file lock on the directory in =bpffs= is obtained as explained in the "Locking and pinning" section above, and, while holding this lock, file descriptors to each of the component programs and =bpf_link= objects are obtained. The end result is a reference to the full dispatcher structure (and its component programs), corresponding to that generated on load. When populating the component program structure in memory, the chain call actions and run priority from the dispatcher configuration map is used instead of parsing the BTF metadata of each program: This ensures that any modified values specified at load time will be retained in stead of being reverted to the values compiled into the BTF metadata. Similarly, the =program_flags= array of the in-kernel dispatcher is used to determine which of the existing component programs support the =BPF_F_XDP_HAS_FRAGS= flag (see the section on frags support above). *** Generating a new dispatcher Having obtained a reference to the existing dispatcher, =libxdp= takes that and the list of programs to add to or remove from the interface, and simply generates a new dispatcher with the new set of programs. When adding programs, the whole list of programs is sorted according to their run priorities (as explained above), resulting in new programs being inserted in the right place in the existing sequence according to their priority. Generating this secondary dispatcher relies on the support for multiple attachments for =freplace= programs, which was added in kernel 5.10. This allows the =bpf_link_create()= operation to specify an attachment target in the new dispatcher. In other words, the component programs will briefly be attached to both the old and new dispatcher, but only one of those will be attached to the interface. After completion of the new dispatcher, its component programs are pinned in =bpffs= as described above. *** Atomic replace and retry At this point, =libxdp= has references to both the old dispatcher, already attached to the interface, and the new one with the modified set of component programs. The new dispatcher is then atomically swapped out with the old one, using the =XDP_FLAGS_REPLACE= flag to the netlink operation (and the accompanying =IFLA_XDP_EXPECTED_FD= attribute). Once the atomic replace operation succeeds, the old dispatcher is unpinned from =bppfs= and the in-memory references to both the old and new dispatchers are released (since the new dispatcher was already pinned, preventing it from being detached from the interface). Should this atomic replace instead *fail* because the program attached to the interface changed while the new dispatcher was being built, the whole operation is simply started over from the beginning. That is, the new dispatcher is unpinned from =bpffs=, and the in-memory references to both dispatchers are released (but no unpinning of the old dispatcher is performed!). Then, the program ID attached to the interface is again read from the kernel, and the operation proceeds from "Reading list of existing programs from the kernel". ** Compatibility with older kernels The full functionality described above can only be attained with kernels version 5.10 or newer, because this is the version that introduced support for re-attaching an freplace program in a secondary attachment point. However, the freplace functionality itself was introduced in kernel 5.7, so for kernel versions 5.7 to 5.9, multiple programs can be attached as long as they are all attached to the dispatcher immediately as they are loaded. This is achieved by using =bpf_raw_tracepoint_open()= in place of =bpf_link_create()= when attaching the component programs to the dispatcher. The =bpf_raw_tracepoint_open()= function doesn't take an attach target as a parameter; instead, it simply attached the freplace program to the target that was specified at load time (which is why it only works when all component programs are loaded together with the dispatcher). xdp-tools-1.5.4/lib/libxdp/bpf_instr.h0000644000175100001660000000606515003640462017213 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __BPF_INSTR_H #define __BPF_INSTR_H #include #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ ((struct bpf_insn) { \ .code = CODE, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = IMM }) #define BPF_ALU64_IMM(OP, DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_MOV64_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_EXIT_INSN() \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_EXIT, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = 0 }) #define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((FUNC) - BPF_FUNC_unspec) }) #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ ((struct bpf_insn) { \ .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #define BPF_MOV64_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_MOV32_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_LD_IMM64_RAW_FULL(DST, SRC, OFF1, OFF2, IMM1, IMM2) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_DW | BPF_IMM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF1, \ .imm = IMM1 }), \ ((struct bpf_insn) { \ .code = 0, \ .dst_reg = 0, \ .src_reg = 0, \ .off = OFF2, \ .imm = IMM2 }) #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_FD, 0, 0, \ MAP_FD, 0) #define BPF_LD_MAP_VALUE(DST, MAP_FD, VALUE_OFF) \ BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_VALUE, 0, 0, \ MAP_FD, VALUE_OFF) #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #endif xdp-tools-1.5.4/lib/libxdp/libxdp.c0000644000175100001660000024372015003640462016503 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * XDP management utility functions * * Copyright (C) 2020 Toke Høiland-Jørgensen */ #include #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ERR_PTR */ #include #include #include #include #include #include #include #include "compat.h" #include "libxdp_internal.h" #define XDP_RUN_CONFIG_SEC ".xdp_run_config" #define XDP_SKIP_ENVVAR "LIBXDP_SKIP_DISPATCHER" /* When cloning BPF fds, we want to make sure they don't end up as any of the * standard stdin, stderr, stdout descriptors: fd 0 can confuse the kernel, and * there are orchestration systems that will force-close the others if they * don't point to the "right" things. So just to be safe, use 3 as the minimum * fd number. */ #define MIN_FD 3 /* Max number of times we retry attachment */ #define MAX_RETRY 10 #define IFINDEX_LO 1 static const char *dispatcher_feature_err = "This means that the kernel does not support the features needed\n" "by the multiprog dispatcher, either because it is too old entirely,\n" "or because it is not yet supported on the current architecture.\n"; struct xdp_program { /* one of prog or prog_fd should be set */ struct bpf_program *bpf_prog; struct bpf_object *bpf_obj; struct btf *btf; enum bpf_prog_type prog_type; int prog_fd; int link_fd; char *prog_name; char *attach_name; __u8 prog_tag[BPF_TAG_SIZE]; __u32 prog_id; __u64 load_time; bool from_external_obj; bool is_frags; unsigned int run_prio; unsigned int chain_call_actions; /* bitmap */ /* for building list of attached programs to multiprog */ struct xdp_program *next; }; struct xdp_multiprog { struct xdp_dispatcher_config config; struct xdp_program *main_prog; /* dispatcher or legacy prog pointer */ struct xdp_program *first_prog; /* uses xdp_program->next to build a list */ struct xdp_program *hw_prog; __u32 version; size_t num_links; bool is_loaded; bool is_legacy; bool kernel_frags_support; bool checked_compat; enum xdp_attach_mode attach_mode; int ifindex; }; #define XDP_DISPATCHER_VERSION_V1 1 struct xdp_dispatcher_config_v1 { __u8 num_progs_enabled; /* Number of active program slots */ __u32 chain_call_actions[MAX_DISPATCHER_ACTIONS]; __u32 run_prios[MAX_DISPATCHER_ACTIONS]; }; static const char *xdp_action_names[] = { [XDP_ABORTED] = "XDP_ABORTED", [XDP_DROP] = "XDP_DROP", [XDP_PASS] = "XDP_PASS", [XDP_TX] = "XDP_TX", [XDP_REDIRECT] = "XDP_REDIRECT", }; static struct xdp_program *xdp_program__create_from_obj(struct bpf_object *obj, const char *section_name, const char *prog_name, bool external); #ifdef LIBXDP_STATIC struct xdp_embedded_obj { const char *filename; const void *data_start; const void *data_end; }; extern const char _binary_xdp_dispatcher_o_start; extern const char _binary_xdp_dispatcher_o_end; extern const char _binary_xsk_def_xdp_prog_o_start; extern const char _binary_xsk_def_xdp_prog_o_end; extern const char _binary_xsk_def_xdp_prog_5_3_o_start; extern const char _binary_xsk_def_xdp_prog_5_3_o_end; static struct xdp_embedded_obj embedded_objs[] = { {"xdp-dispatcher.o", &_binary_xdp_dispatcher_o_start, &_binary_xdp_dispatcher_o_end}, {"xsk_def_xdp_prog.o", &_binary_xsk_def_xdp_prog_o_start, &_binary_xsk_def_xdp_prog_o_end}, {"xsk_def_xdp_prog_5.3.o", &_binary_xsk_def_xdp_prog_5_3_o_start, &_binary_xsk_def_xdp_prog_5_3_o_end}, {}, }; static struct xdp_program *xdp_program__find_embedded(const char *filename, const char *section_name, const char *prog_name, struct bpf_object_open_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_object_open_opts, default_opts, .object_name = filename, ); struct xdp_embedded_obj *eobj; struct bpf_object *obj; size_t size; int err; for (eobj = &embedded_objs[0]; eobj->filename; eobj++) { if (strcmp(filename, eobj->filename)) continue; size = eobj->data_end - eobj->data_start; /* set the object name to the same as if we opened the file from * the filesystem */ if (!opts) opts = &default_opts; else if (!opts->object_name) opts->object_name = filename; pr_debug("Loading XDP program '%s' from embedded object file\n", filename); obj = bpf_object__open_mem(eobj->data_start, size, opts); err = libbpf_get_error(obj); if (err) return ERR_PTR(err); return xdp_program__create_from_obj(obj, section_name, prog_name, false); } return NULL; } #else static inline struct xdp_program *xdp_program__find_embedded(__unused const char *filename, __unused const char *section_name, __unused const char *prog_name, __unused struct bpf_object_open_opts *opts) { return NULL; } #endif static int __base_pr(enum libxdp_print_level level, const char *format, va_list args) { if (level == LIBXDP_DEBUG) return 0; return vfprintf(stderr, format, args); } static libxdp_print_fn_t __libxdp_pr = __base_pr; libxdp_print_fn_t libxdp_set_print(libxdp_print_fn_t fn) { libxdp_print_fn_t old_print_fn = __libxdp_pr; __libxdp_pr = fn; return old_print_fn; } __printf(2, 3) void libxdp_print(enum libxdp_print_level level, const char *format, ...) { va_list args; if (!__libxdp_pr) return; va_start(args, format); __libxdp_pr(level, format, args); va_end(args); } static enum { COMPAT_UNKNOWN, COMPAT_SUPPORTED, COMPAT_UNSUPPORTED } kernel_compat = COMPAT_UNKNOWN; static int xdp_multiprog__attach(struct xdp_multiprog *old_mp, struct xdp_multiprog *mp, enum xdp_attach_mode mode); static struct xdp_multiprog *xdp_multiprog__generate(struct xdp_program **progs, size_t num_progs, int ifindex, struct xdp_multiprog *old_mp, bool remove_progs); static int xdp_multiprog__pin(struct xdp_multiprog *mp); static int xdp_multiprog__unpin(struct xdp_multiprog *mp); /* On NULL, libxdp always sets errno to 0 for old APIs, so that their * compatibility is maintained wrt old libxdp_get_error that called the older * version of libbpf_get_error which did PTR_ERR_OR_ZERO, but newer versions * unconditionally return -errno on seeing NULL, as the libbpf practice changed * to returning NULL or errors. * * The new APIs (like xdp_program__create) which indicate error using NULL set * their errno when returning NULL. */ long libxdp_get_error(const void *ptr) { if (!IS_ERR_OR_NULL(ptr)) return 0; if (IS_ERR(ptr)) errno = -PTR_ERR(ptr); return -errno; } int libxdp_strerror(int err, char *buf, size_t size) { return libxdp_err(libbpf_strerror(err, buf, size)); } static char *libxdp_strerror_r(int err, char *dst, size_t size) { int ret = libxdp_strerror(err, dst, size); if (ret) snprintf(dst, size, "ERROR: strerror_r(%d)=%d", err, ret); return dst; } #ifndef HAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID static struct btf *btf__load_from_kernel_by_id(__u32 id) { struct btf *btf; int err; err = btf__get_from_id(id, &btf); if (err) return NULL; return btf; } #endif #ifndef HAVE_LIBBPF_BTF__TYPE_CNT static __u32 btf__type_cnt(const struct btf *btf) { /* old function didn't include 'void' type in count */ return btf__get_nr_types(btf) + 1; } #endif #ifndef HAVE_LIBBPF_BPF_OBJECT__NEXT_MAP static struct bpf_map *bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map) { return bpf_map__next(map, obj); } #endif #ifndef HAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM static struct bpf_program *bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog) { return bpf_program__next(prog, obj); } #endif #ifndef HAVE_LIBBPF_BPF_PROGRAM__INSN_CNT #define BPF_INSN_SZ (sizeof(struct bpf_insn)) static size_t bpf_program__insn_cnt(const struct bpf_program *prog) { size_t sz; sz = bpf_program__size(prog); return sz / BPF_INSN_SZ; } #endif #ifndef HAVE_LIBBPF_BPF_PROGRAM__TYPE static inline enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) { return bpf_program__get_type((struct bpf_program *)prog); } #endif #ifndef HAVE_LIBBPF_BPF_PROGRAM__FLAGS static __u32 bpf_program__flags(__unused const struct bpf_program *prog) { /* When libbpf doesn't support this we can't get the real value. * Returning 0 works because the callers check for the presence of a * specific flag (BPF_F_XDP_HAS_FRAGS), and having it always-off * disables the frags functionality which is what we want. */ return 0; } #endif /* This function has been deprecated in libbpf, but we expose an API that uses * section names, so we reimplement it to keep compatibility */ static struct bpf_program * bpf_program_by_section_name(const struct bpf_object *obj, const char *section_name) { struct bpf_program *pos; const char *sname; bpf_object__for_each_program(pos, obj) { sname = bpf_program__section_name(pos); if (sname && !strcmp(sname, section_name)) return pos; } return NULL; } static bool bpf_is_valid_mntpt(const char *mnt) { struct statfs st_fs; if (statfs(mnt, &st_fs) < 0) return false; if ((unsigned long)st_fs.f_type != BPF_FS_MAGIC) return false; return true; } static int bpf_mnt_fs(const char *target) { bool bind_done = false; int err; retry: err = mount("", target, "none", MS_PRIVATE | MS_REC, NULL); if (err) { if (errno != EINVAL || bind_done) { err = -errno; pr_warn("mount --make-private %s failed: %s\n", target, strerror(-err)); return err; } err = mount(target, target, "none", MS_BIND, NULL); if (err) { err = -errno; pr_warn("mount --bind %s %s failed: %s\n", target, target, strerror(-err)); return err; } bind_done = true; goto retry; } err = mount("bpf", target, "bpf", 0, "mode=0700"); if (err) { err = -errno; pr_warn("mount -t bpf bpf %s failed: %s\n", target, strerror(-err)); return err; } return 0; } static const char *bpf_find_mntpt_single(char *mnt, int len, const char *mntpt, bool mount) { int err; if (!bpf_is_valid_mntpt(mntpt)) { if (!mount) return NULL; pr_debug("No bpffs found at %s, mounting a new one\n", mntpt); err = bpf_mnt_fs(mntpt); if (err) return NULL; } strncpy(mnt, mntpt, len - 1); mnt[len - 1] = '\0'; return mnt; } static const char *find_bpffs() { static bool bpf_mnt_cached = false; static char bpf_wrk_dir[PATH_MAX]; static const char *mnt = NULL; char *envdir, *envval; bool mount = false; if (bpf_mnt_cached) return mnt; envdir = secure_getenv(XDP_BPFFS_ENVVAR); envval = secure_getenv(XDP_BPFFS_MOUNT_ENVVAR); if (envval && envval[0] == '1' && envval[1] == '\0') mount = true; mnt = bpf_find_mntpt_single(bpf_wrk_dir, sizeof(bpf_wrk_dir), envdir ?: BPF_DIR_MNT, mount); if (!mnt) pr_warn("No bpffs found at %s\n", envdir ?: BPF_DIR_MNT); else bpf_mnt_cached = 1; return mnt; } static int mk_state_subdir(char *dir, size_t dir_sz, const char *parent) { int err; err = try_snprintf(dir, dir_sz, "%s/xdp", parent); if (err) return err; err = mkdir(dir, S_IRWXU); if (err && errno != EEXIST) return -errno; return 0; } static const char *get_bpffs_dir(void) { static char bpffs_dir[PATH_MAX]; static const char *dir = NULL; const char *parent; int err; if (dir) return dir; parent = find_bpffs(); if (!parent) { err = -ENOENT; goto err; } err = mk_state_subdir(bpffs_dir, sizeof(bpffs_dir), parent); if (err) goto err; dir = bpffs_dir; return dir; err: return ERR_PTR(err); } static const char *get_lock_dir(void) { static const char *dir = NULL; static char rundir[PATH_MAX]; int err; if (dir) return dir; dir = get_bpffs_dir(); if (!IS_ERR(dir)) return dir; err = mk_state_subdir(rundir, sizeof(rundir), RUNDIR); if (err) return ERR_PTR(err); dir = rundir; return dir; } int xdp_lock_acquire(void) { int lock_fd, err; const char *dir; dir = get_lock_dir(); if (IS_ERR(dir)) return PTR_ERR(dir); lock_fd = open(dir, O_DIRECTORY); if (lock_fd < 0) { err = -errno; pr_warn("Couldn't open lock directory at %s: %s\n", dir, strerror(-err)); return err; } err = flock(lock_fd, LOCK_EX); if (err) { err = -errno; pr_warn("Couldn't flock fd %d: %s\n", lock_fd, strerror(-err)); close(lock_fd); return err; } pr_debug("Acquired lock from %s with fd %d\n", dir, lock_fd); return lock_fd; } int xdp_lock_release(int lock_fd) { int err; err = flock(lock_fd, LOCK_UN); if (err) { err = -errno; pr_warn("Couldn't unlock fd %d: %s\n", lock_fd, strerror(-err)); } else { pr_debug("Released lock fd %d\n", lock_fd); } close(lock_fd); return err; } static int do_xdp_attach(int ifindex, int prog_fd, int old_fd, __u32 xdp_flags) { #ifdef HAVE_LIBBPF_BPF_XDP_ATTACH LIBBPF_OPTS(bpf_xdp_attach_opts, opts, .old_prog_fd = old_fd); return bpf_xdp_attach(ifindex, prog_fd, xdp_flags, &opts); #else DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = old_fd); return bpf_set_link_xdp_fd_opts(ifindex, prog_fd, xdp_flags, old_fd ? &opts : NULL); #endif } int xdp_attach_fd(int prog_fd, int old_fd, int ifindex, enum xdp_attach_mode mode) { int err = 0, xdp_flags = 0; pr_debug("Replacing XDP fd %d with %d on ifindex %d\n", old_fd, prog_fd, ifindex); if (old_fd == -1) { xdp_flags |= XDP_FLAGS_UPDATE_IF_NOEXIST; old_fd = 0; } switch (mode) { case XDP_MODE_SKB: xdp_flags |= XDP_FLAGS_SKB_MODE; break; case XDP_MODE_NATIVE: xdp_flags |= XDP_FLAGS_DRV_MODE; break; case XDP_MODE_HW: xdp_flags |= XDP_FLAGS_HW_MODE; break; case XDP_MODE_UNSPEC: break; } again: err = do_xdp_attach(ifindex, prog_fd, old_fd, xdp_flags); if (err < 0) { if (err == -EINVAL && old_fd) { pr_debug("Got 'invalid argument', trying again without old_fd\n"); old_fd = 0; goto again; } pr_info("Error attaching XDP program to ifindex %d: %s\n", ifindex, strerror(-err)); if (err == -EEXIST && old_fd) /* We raced with another attach/detach, have to retry */ return -EAGAIN; switch (-err) { case EBUSY: case EEXIST: pr_info("XDP already loaded on device\n"); break; case EOPNOTSUPP: pr_info("XDP mode not supported; try using SKB mode\n"); break; default: break; } } return err; } const struct btf *xdp_program__btf(struct xdp_program *xdp_prog) { if (!xdp_prog) return libxdp_err_ptr(0, true); return xdp_prog->btf; } enum xdp_attach_mode xdp_program__is_attached(const struct xdp_program *xdp_prog, int ifindex) { struct xdp_program *prog = NULL; struct xdp_multiprog *mp; enum xdp_attach_mode ret = XDP_MODE_UNSPEC; if (!xdp_prog || !xdp_prog->prog_id) return ret; mp = xdp_multiprog__get_from_ifindex(ifindex); if (IS_ERR_OR_NULL(mp)) return ret; prog = xdp_multiprog__hw_prog(mp); if (xdp_program__id(prog) == xdp_program__id(xdp_prog)) { ret = XDP_MODE_HW; goto out; } if (xdp_multiprog__is_legacy(mp)) { prog = xdp_multiprog__main_prog(mp); if (xdp_program__id(prog) == xdp_program__id(xdp_prog)) ret = xdp_multiprog__attach_mode(mp); goto out; } while ((prog = xdp_multiprog__next_prog(prog, mp))) { if (xdp_program__id(prog) == xdp_program__id(xdp_prog)) { ret = xdp_multiprog__attach_mode(mp); break; } } out: xdp_multiprog__close(mp); return ret; } int xdp_program__set_chain_call_enabled(struct xdp_program *prog, unsigned int action, bool enabled) { if (IS_ERR_OR_NULL(prog) || prog->prog_fd >= 0 || action >= XDP_DISPATCHER_RETVAL) return libxdp_err(-EINVAL); if (enabled) prog->chain_call_actions |= (1U << action); else prog->chain_call_actions &= ~(1U << action); return 0; } bool xdp_program__chain_call_enabled(const struct xdp_program *prog, enum xdp_action action) { if (IS_ERR_OR_NULL(prog) || action >= XDP_DISPATCHER_RETVAL) return false; return !!(prog->chain_call_actions & (1U << action)); } unsigned int xdp_program__run_prio(const struct xdp_program *prog) { if (IS_ERR_OR_NULL(prog)) return XDP_DEFAULT_RUN_PRIO; return prog->run_prio; } int xdp_program__set_run_prio(struct xdp_program *prog, unsigned int run_prio) { if (IS_ERR_OR_NULL(prog) || prog->prog_fd >= 0) return libxdp_err(-EINVAL); prog->run_prio = run_prio; return 0; } bool xdp_program__xdp_frags_support(const struct xdp_program *prog) { if (IS_ERR_OR_NULL(prog)) return false; /* Until we load the program we just check the bpf_program__flags() to * ensure any changes made to those are honoured on the libxdp side. For * loaded programs we keep our own state variable which is populated * either by copying over the program flags in xdp_program__load(), or * by loading the state from the dispatcher state variables if * instantiating the object from the kernel. */ if (!prog->bpf_prog || prog->prog_fd >= 0) return prog->is_frags; return !!(bpf_program__flags(prog->bpf_prog) & BPF_F_XDP_HAS_FRAGS); } #ifndef HAVE_LIBBPF_BPF_PROGRAM__FLAGS int xdp_program__set_xdp_frags_support(__unused struct xdp_program *prog, __unused bool frags) { return libxdp_err(-EOPNOTSUPP); } #else int xdp_program__set_xdp_frags_support(struct xdp_program *prog, bool frags) { __u32 prog_flags; int ret; if (IS_ERR_OR_NULL(prog) || !prog->bpf_prog || prog->prog_fd >= 0) return libxdp_err(-EINVAL); prog_flags = bpf_program__flags(prog->bpf_prog); if (frags) prog_flags |= BPF_F_XDP_HAS_FRAGS; else prog_flags &= ~BPF_F_XDP_HAS_FRAGS; ret = bpf_program__set_flags(prog->bpf_prog, prog_flags); if (!ret) prog->is_frags = frags; return ret; } #endif // HAVE_LIBBPF_BPF_PROGRAM__FLAGS const char *xdp_program__name(const struct xdp_program *prog) { if (IS_ERR_OR_NULL(prog)) return libxdp_err_ptr(0, true); return prog->prog_name; } struct bpf_object *xdp_program__bpf_obj(struct xdp_program *prog) { if (IS_ERR_OR_NULL(prog)) return libxdp_err_ptr(0, true); return prog->bpf_obj; } const unsigned char *xdp_program__tag(const struct xdp_program *prog) { if (IS_ERR_OR_NULL(prog)) return libxdp_err_ptr(0, true); return prog->prog_tag; } uint32_t xdp_program__id(const struct xdp_program *prog) { if (IS_ERR_OR_NULL(prog)) return 0; return prog->prog_id; } int xdp_program__fd(const struct xdp_program *prog) { if (IS_ERR_OR_NULL(prog)) return errno = ENOENT, -1; return prog->prog_fd; } int xdp_program__print_chain_call_actions(const struct xdp_program *prog, char *buf, size_t buf_len) { bool first = true; char *pos = buf; int i, len = 0; if (IS_ERR_OR_NULL(prog) || !buf || !buf_len) return libxdp_err(-EINVAL); for (i = 0; i <= XDP_REDIRECT; i++) { if (xdp_program__chain_call_enabled(prog, i)) { if (!first) { if (!buf_len) goto err_len; *pos++ = ','; buf_len--; } else { first = false; } len = snprintf(pos, buf_len, "%s", xdp_action_names[i]); if (len < 0 || (size_t)len >= buf_len) goto err_len; pos += len; buf_len -= len; } } return 0; err_len: *pos = '\0'; return libxdp_err(-ENOSPC); } static const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) { const struct btf_type *t = btf__type_by_id(btf, id); if (res_id) *res_id = id; while (btf_is_mod(t) || btf_is_typedef(t)) { if (res_id) *res_id = t->type; t = btf__type_by_id(btf, t->type); } return t; } static bool get_field_int(const struct btf *btf, const char *t_name, const struct btf_type *t, __u32 *res) { const struct btf_array *arr_info; const struct btf_type *arr_t; if (!btf_is_ptr(t)) { pr_warn("attr '%s': expected PTR, got %u.\n", t_name, btf_kind(t)); return false; } arr_t = btf__type_by_id(btf, t->type); if (!arr_t) { pr_warn("attr '%s': type [%u] not found.\n", t_name, t->type); return false; } if (!btf_is_array(arr_t)) { pr_warn("attr '%s': expected ARRAY, got %u.\n", t_name, btf_kind(arr_t)); return false; } arr_info = btf_array(arr_t); *res = arr_info->nelems; return true; } static bool get_xdp_action(const char *act_name, unsigned int *act) { const char **name = xdp_action_names; unsigned int i; for (i = 0; i < ARRAY_SIZE(xdp_action_names); i++, name++) { if (!strcmp(act_name, *name)) { *act = i; return true; } } return false; } /* * Find BTF func definition for func_name, which may be a truncated prefix of * the real function name. * Return NULL on no, or ambiguous, match. */ static const struct btf_type *btf_get_function(const struct btf *btf, const char *func_name) { const struct btf_type *t, *match; size_t len, matches = 0; const char *name; int nr_types, i; if (!btf) { pr_debug("No BTF found for program\n"); return NULL; } len = strlen(func_name); nr_types = btf__type_cnt(btf); for (i = 1; i < nr_types; i++) { t = btf__type_by_id(btf, i); if (!btf_is_func(t)) continue; name = btf__name_by_offset(btf, t->name_off); if (!strncmp(name, func_name, len)) { pr_debug("Found func %s matching %s\n", name, func_name); if (strlen(name) == len) return t; /* exact match */ /* prefix, may not be unique */ matches++; match = t; } } if (matches == 1) /* unique match */ return match; pr_debug("Function '%s' not found or ambiguous (%zu matches).\n", func_name, matches); return NULL; } static const struct btf_type *btf_get_datasec(const struct btf *btf, const char *sec_name) { const struct btf_type *t; int nr_types, i; const char *name; if (!btf) { pr_debug("No BTF found for program\n"); return NULL; } nr_types = btf__type_cnt(btf); for (i = 1; i < nr_types; i++) { t = btf__type_by_id(btf, i); if (!btf_is_datasec(t)) continue; name = btf__name_by_offset(btf, t->name_off); if (strcmp(name, sec_name) == 0) return t; } pr_debug("DATASEC '%s' not found.\n", sec_name); return NULL; } static const struct btf_type *btf_get_section_var(const struct btf *btf, const struct btf_type *sec, const char *var_name, __u16 kind) { const struct btf_var_secinfo *vi; const struct btf_var *var_extra; const struct btf_type *var, *def; const char *name; int vlen, i; vlen = btf_vlen(sec); vi = btf_var_secinfos(sec); for (i = 0; i < vlen; i++, vi++) { var = btf__type_by_id(btf, vi->type); var_extra = btf_var(var); name = btf__name_by_offset(btf, var->name_off); if (strcmp(name, var_name)) continue; if (!btf_is_var(var)) { pr_warn("struct '%s': unexpected var kind %u.\n", name, btf_kind(var)); return ERR_PTR(-EINVAL); } if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && var_extra->linkage != BTF_VAR_STATIC) { pr_warn("struct '%s': unsupported var linkage %u.\n", name, var_extra->linkage); return ERR_PTR(-EOPNOTSUPP); } def = skip_mods_and_typedefs(btf, var->type, NULL); if (btf_kind(def) != kind) { pr_warn("var '%s': unexpected def kind %u.\n", name, btf_kind(def)); return ERR_PTR(-EINVAL); } return def; } return ERR_PTR(-ENOENT); } /** * This function parses the run config information attached to an XDP program. * * This information is specified using BTF, in a format similar to how * BTF-defined maps are done. The definition looks like this: * * struct { * __uint(priority, 10); * __uint(XDP_PASS, 1); * } XDP_RUN_CONFIG(FUNCNAME); * * The priority is simply an integer that will be used to sort programs as they * are attached on the interface (see cmp_xdp_programs() for full sort order). * In addition to the priority, the run config can define an integer value for * each XDP action. A non-zero value means that execution will continue to the * next loaded program if the current program returns that action. I.e., in the * above example, any return value other than XDP_PASS will cause the dispatcher * to exit with that return code, whereas XDP_PASS means execution will * continue. * * Since this information becomes part of the object file BTF info, it will * survive loading into the kernel, and so it can be retrieved for * already-loaded programs as well. */ static int xdp_program__parse_btf(struct xdp_program *xdp_prog, const struct btf *btf) { const struct btf_type *def, *sec; const struct btf_member *m; char struct_name[100]; int err, i, mlen; if (!btf) btf = xdp_program__btf(xdp_prog); /* If the program name is the maximum allowed object name in the kernel, * it may have been truncated, in which case we try to expand it by * looking for a match in the BTF data. */ if (strlen(xdp_prog->prog_name) >= BPF_OBJ_NAME_LEN - 1) { const struct btf_type *func; char *name; func = btf_get_function(btf, xdp_prog->prog_name); if (func) { name = strdup(btf__name_by_offset(btf, func->name_off)); if (!name) return -ENOMEM; free(xdp_prog->prog_name); xdp_prog->prog_name = name; } } err = try_snprintf(struct_name, sizeof(struct_name), "_%s", xdp_program__name(xdp_prog)); if (err) return err; sec = btf_get_datasec(btf, XDP_RUN_CONFIG_SEC); if (!sec) return -ENOENT; def = btf_get_section_var(btf, sec, struct_name, BTF_KIND_STRUCT); if (IS_ERR(def)) { pr_debug("Couldn't find run order struct %s\n", struct_name); return PTR_ERR(def); } mlen = btf_vlen(def); m = btf_members(def); for (i = 0; i < mlen; i++, m++) { const char *mname = btf__name_by_offset(btf, m->name_off); const struct btf_type *m_t; unsigned int val, act; if (!mname) { pr_warn("struct '%s': invalid field #%d.\n", struct_name, i); return -EINVAL; } m_t = skip_mods_and_typedefs(btf, m->type, NULL); if (!strcmp(mname, "priority")) { if (!get_field_int(btf, mname, m_t, &xdp_prog->run_prio)) return -EINVAL; continue; } else if (get_xdp_action(mname, &act)) { if (!get_field_int(btf, mname, m_t, &val)) return -EINVAL; xdp_program__set_chain_call_enabled(xdp_prog, act, val); } else { pr_warn("Invalid mname: %s\n", mname); return -ENOTSUP; } } return 0; } static struct xdp_program *xdp_program__new(void) { struct xdp_program *xdp_prog; xdp_prog = malloc(sizeof(*xdp_prog)); if (!xdp_prog) return ERR_PTR(-ENOMEM); memset(xdp_prog, 0, sizeof(*xdp_prog)); xdp_prog->prog_fd = -1; xdp_prog->link_fd = -1; xdp_prog->run_prio = XDP_DEFAULT_RUN_PRIO; xdp_prog->chain_call_actions = XDP_DEFAULT_CHAIN_CALL_ACTIONS; return xdp_prog; } void xdp_program__close(struct xdp_program *xdp_prog) { if (!xdp_prog) return; if (xdp_prog->link_fd >= 0) close(xdp_prog->link_fd); if (xdp_prog->prog_fd >= 0) close(xdp_prog->prog_fd); free(xdp_prog->prog_name); free(xdp_prog->attach_name); if (!xdp_prog->from_external_obj) { if (xdp_prog->bpf_obj) bpf_object__close(xdp_prog->bpf_obj); else if (xdp_prog->btf) btf__free(xdp_prog->btf); } free(xdp_prog); } static struct xdp_program *xdp_program__create_from_obj(struct bpf_object *obj, const char *section_name, const char *prog_name, bool external) { struct xdp_program *xdp_prog; struct bpf_program *bpf_prog; int err; if (!obj || (section_name && prog_name)) return ERR_PTR(-EINVAL); if (section_name) bpf_prog = bpf_program_by_section_name(obj, section_name); else if (prog_name) bpf_prog = bpf_object__find_program_by_name(obj, prog_name); else bpf_prog = bpf_object__next_program(obj, NULL); if (!bpf_prog) { pr_warn("Couldn't find xdp program in bpf object%s%s\n", section_name ? " section " : "", section_name ?: ""); return ERR_PTR(-ENOENT); } xdp_prog = xdp_program__new(); if (IS_ERR(xdp_prog)) return xdp_prog; xdp_prog->prog_name = strdup(bpf_program__name(bpf_prog)); if (!xdp_prog->prog_name) { err = -ENOMEM; goto err; } err = xdp_program__parse_btf(xdp_prog, bpf_object__btf(obj)); if (err && err != -ENOENT) goto err; xdp_prog->bpf_prog = bpf_prog; xdp_prog->bpf_obj = obj; xdp_prog->btf = bpf_object__btf(obj); xdp_prog->from_external_obj = external; return xdp_prog; err: xdp_program__close(xdp_prog); return ERR_PTR(err); } struct xdp_program *xdp_program__from_bpf_obj(struct bpf_object *obj, const char *section_name) { struct xdp_program *prog; prog = xdp_program__create_from_obj(obj, section_name, NULL, true); /* xdp_program__create_from_obj does not return NULL */ if (!IS_ERR(prog)) return prog; return libxdp_err_ptr(PTR_ERR(prog), false); } static struct bpf_object *open_bpf_obj(const char *filename, struct bpf_object_open_opts *opts) { struct bpf_object *obj; int err; obj = bpf_object__open_file(filename, opts); err = libbpf_get_error(obj); if (err) { if (err == -ENOENT) pr_debug( "Couldn't load the eBPF program (libbpf said 'no such file').\n" "Maybe the program was compiled with a too old " "version of LLVM (need v9.0+)?\n"); return ERR_PTR(err); } return obj; } static struct xdp_program *__xdp_program__open_file(const char *filename, const char *section_name, const char *prog_name, struct bpf_object_open_opts *opts) { struct xdp_program *xdp_prog; struct bpf_object *obj; int err; if (!filename) return ERR_PTR(-EINVAL); obj = open_bpf_obj(filename, opts); if (IS_ERR(obj)) { err = PTR_ERR(obj); return ERR_PTR(err); } xdp_prog = xdp_program__create_from_obj(obj, section_name, prog_name, false); if (IS_ERR(xdp_prog)) bpf_object__close(obj); return xdp_prog; } struct xdp_program *xdp_program__open_file(const char *filename, const char *section_name, struct bpf_object_open_opts *opts) { struct xdp_program *prog; prog = __xdp_program__open_file(filename, section_name, NULL, opts); /* __xdp_program__open_file does not return NULL */ if (!IS_ERR(prog)) return prog; return libxdp_err_ptr(PTR_ERR(prog), false); } static bool try_bpf_file(char *buf, size_t buf_size, char *path, const char *progname) { struct stat sb = {}; if (try_snprintf(buf, buf_size, "%s/%s", path, progname)) return false; pr_debug("Looking for '%s'\n", buf); if (stat(buf, &sb)) return false; return true; } static int find_bpf_file(char *buf, size_t buf_size, const char *progname) { static char *bpf_obj_paths[] = { #ifdef DEBUG ".", #endif BPF_OBJECT_PATH, NULL }; char *path, **p; path = secure_getenv(XDP_OBJECT_ENVVAR); if (path && try_bpf_file(buf, buf_size, path, progname)) { return 0; } else if (!path) { for (p = bpf_obj_paths; *p; p++) if (try_bpf_file(buf, buf_size, *p, progname)) return 0; } pr_warn("Couldn't find a BPF file with name %s\n", progname); return -ENOENT; } static struct xdp_program *__xdp_program__find_file(const char *filename, const char *section_name, const char *prog_name, struct bpf_object_open_opts *opts) { struct xdp_program *prog; char buf[PATH_MAX]; int err; prog = xdp_program__find_embedded(filename, section_name, prog_name, opts); if (prog) return prog; err = find_bpf_file(buf, sizeof(buf), filename); if (err) return ERR_PTR(err); pr_debug("Loading XDP program from '%s' section '%s'\n", buf, section_name ?: (prog_name ?: "(unknown)")); return __xdp_program__open_file(buf, section_name, prog_name, opts); } struct xdp_program *xdp_program__find_file(const char *filename, const char *section_name, struct bpf_object_open_opts *opts) { struct xdp_program *prog; prog = __xdp_program__find_file(filename, section_name, NULL, opts); /* __xdp_program__find_file does not return NULL */ if (!IS_ERR(prog)) return prog; return libxdp_err_ptr(PTR_ERR(prog), false); } static int xdp_program__fill_from_fd(struct xdp_program *xdp_prog, int fd) { struct bpf_prog_info info = {}; __u32 len = sizeof(info); struct btf *btf = NULL; int err = 0, prog_fd; if (!xdp_prog) return -EINVAL; /* Duplicate the descriptor, as we take ownership of the fd below */ prog_fd = fcntl(fd, F_DUPFD_CLOEXEC, MIN_FD); if (prog_fd < 0) { err = -errno; pr_debug("Error on fcntl: %s", strerror(-err)); return err; } err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); if (err) { err = -errno; pr_warn("couldn't get program info: %s", strerror(-err)); goto err; } if (!xdp_prog->prog_name) { xdp_prog->prog_name = strdup(info.name); if (!xdp_prog->prog_name) { err = -ENOMEM; pr_warn("failed to strdup program title"); goto err; } } if (info.btf_id && !xdp_prog->btf) { btf = btf__load_from_kernel_by_id(info.btf_id); if (!btf) { pr_warn("Couldn't get BTF for ID %ul\n", info.btf_id); goto err; } xdp_prog->btf = btf; } pr_debug("Duplicated fd %d to %d for prog %s\n", fd, prog_fd, xdp_prog->prog_name); memcpy(xdp_prog->prog_tag, info.tag, BPF_TAG_SIZE); xdp_prog->load_time = info.load_time; xdp_prog->prog_fd = prog_fd; xdp_prog->prog_id = info.id; xdp_prog->prog_type = info.type; return 0; err: close(prog_fd); btf__free(btf); return err; } struct xdp_program *xdp_program__from_fd(int fd) { struct xdp_program *xdp_prog = NULL; int err; xdp_prog = xdp_program__new(); if (IS_ERR(xdp_prog)) return libxdp_err_ptr(PTR_ERR(xdp_prog), false); err = xdp_program__fill_from_fd(xdp_prog, fd); if (err) goto err; err = xdp_program__parse_btf(xdp_prog, NULL); if (err && err != -ENOENT) goto err; return xdp_prog; err: xdp_program__close(xdp_prog); return libxdp_err_ptr(err, false); } struct xdp_program *xdp_program__from_id(__u32 id) { struct xdp_program *prog; int fd, err; fd = bpf_prog_get_fd_by_id(id); if (fd < 0) { err = -errno; pr_warn("couldn't get program fd: %s", strerror(-err)); return libxdp_err_ptr(err, false); } prog = xdp_program__from_fd(fd); // duplicated fd already in prog, close original close(fd); if (IS_ERR(prog)) { err = errno; errno = err; } return prog; } struct xdp_program *xdp_program__from_pin(const char *pin_path) { struct xdp_program *prog; int fd, err; fd = bpf_obj_get(pin_path); if (fd < 0) { err = -errno; pr_warn("couldn't get program fd from %s: %s", pin_path, strerror(-err)); return libxdp_err_ptr(err, false); } prog = xdp_program__from_fd(fd); // duplicated fd already in prog, close original close(fd); if (IS_ERR(prog)) { err = errno; errno = err; } return prog; } struct xdp_program *xdp_program__create(struct xdp_program_opts *opts) { const char *pin_path, *prog_name, *find_filename, *open_filename; struct bpf_object_open_opts *obj_opts; struct xdp_program *prog; struct bpf_object *obj; __u32 id; int fd; if (!opts || !OPTS_VALID(opts, xdp_program_opts)) goto err; obj = OPTS_GET(opts, obj, NULL); obj_opts = OPTS_GET(opts, opts, NULL); prog_name = OPTS_GET(opts, prog_name, NULL); find_filename = OPTS_GET(opts, find_filename, NULL); open_filename = OPTS_GET(opts, open_filename, NULL); pin_path = OPTS_GET(opts, pin_path, NULL); id = OPTS_GET(opts, id, 0); fd = OPTS_GET(opts, fd, 0); if (obj) { /* prog_name is optional */ if (obj_opts || find_filename || open_filename || pin_path || id || fd) goto err; prog = xdp_program__create_from_obj(obj, NULL, prog_name, true); } else if (find_filename) { /* prog_name, obj_opts is optional */ if (obj || open_filename || pin_path || id || fd) goto err; prog = __xdp_program__find_file(find_filename, NULL, prog_name, obj_opts); } else if (open_filename) { /* prog_name, obj_opts is optional */ if (obj || find_filename || pin_path || id || fd) goto err; prog = __xdp_program__open_file(open_filename, NULL, prog_name, obj_opts); } else if (pin_path) { if (obj || obj_opts || prog_name || find_filename || open_filename || id || fd) goto err; prog = xdp_program__from_pin(pin_path); } else if (id) { if (obj || obj_opts || prog_name || find_filename || open_filename || pin_path || fd) goto err; prog = xdp_program__from_id(id); } else if (fd) { if (obj || obj_opts || prog_name || find_filename || open_filename || pin_path || id) goto err; prog = xdp_program__from_fd(fd); } else { goto err; } if (IS_ERR(prog)) return libxdp_err_ptr(PTR_ERR(prog), true); return prog; err: return libxdp_err_ptr(-EINVAL, true); } static int cmp_xdp_programs(const void *_a, const void *_b) { const struct xdp_program *a = *(struct xdp_program * const *)_a; const struct xdp_program *b = *(struct xdp_program * const *)_b; int cmp; if (a->run_prio != b->run_prio) return a->run_prio < b->run_prio ? -1 : 1; cmp = strcmp(a->prog_name, b->prog_name); if (cmp) return cmp; /* Hopefully the two checks above will resolve most comparisons; in * cases where they don't, hopefully the checks below will keep the * order stable. */ /* loaded before non-loaded */ if (a->prog_fd >= 0 && b->prog_fd < 0) return -1; else if (a->prog_fd < 0 && b->prog_fd >= 0) return 1; /* two unloaded programs - compare by size */ if (a->bpf_prog && b->bpf_prog) { size_t size_a, size_b; size_a = bpf_program__insn_cnt(a->bpf_prog); size_b = bpf_program__insn_cnt(b->bpf_prog); if (size_a != size_b) return size_a < size_b ? -1 : 1; } cmp = memcmp(a->prog_tag, b->prog_tag, BPF_TAG_SIZE); if (cmp) return cmp; /* at this point we are really grasping for straws */ if (a->load_time != b->load_time) return a->load_time < b->load_time ? -1 : 1; return 0; } int xdp_program__pin(struct xdp_program *prog, const char *pin_path) { if (IS_ERR_OR_NULL(prog) || prog->prog_fd < 0) return libxdp_err(-EINVAL); return libxdp_err(bpf_program__pin(prog->bpf_prog, pin_path)); } static int xdp_program__load(struct xdp_program *prog) { bool is_loaded, autoload; int err; if (IS_ERR_OR_NULL(prog)) return -EINVAL; if (prog->prog_fd >= 0) return -EEXIST; if (!prog->bpf_obj || !prog->bpf_prog) return -EINVAL; /* bpf_program__set_autoload fails if the object is loaded, use this to * detect if it is (since libbpf doesn't expose an API to discover * this). This is necessary because of objects containing multiple * programs: if a user creates xdp_program references to programs in * such an object before loading it, they will get out of sync. */ autoload = bpf_program__autoload(prog->bpf_prog); is_loaded = !!bpf_program__set_autoload(prog->bpf_prog, autoload); if (is_loaded) { pr_debug("XDP program %s is already loaded with fd %d\n", xdp_program__name(prog), bpf_program__fd(prog->bpf_prog)); prog->is_frags = !!(bpf_program__flags(prog->bpf_prog) & BPF_F_XDP_HAS_FRAGS); } else { /* We got an explicit load request, make sure we actually load */ if (!autoload) bpf_program__set_autoload(prog->bpf_prog, true); /* Make sure we sync is_frags to internal state variable (in case it was * changed on bpf_prog since creation), and unset flag if we're loading * an EXT program (the dispatcher will have the flag set instead in this * case) */ prog->is_frags = xdp_program__xdp_frags_support(prog); #ifdef HAVE_LIBBPF_BPF_PROGRAM__FLAGS if (bpf_program__type(prog->bpf_prog) == BPF_PROG_TYPE_EXT) bpf_program__set_flags(prog->bpf_prog, bpf_program__flags(prog->bpf_prog) & ~BPF_F_XDP_HAS_FRAGS); #endif err = bpf_object__load(prog->bpf_obj); if (err) return err; pr_debug("Loaded XDP program %s, got fd %d\n", xdp_program__name(prog), bpf_program__fd(prog->bpf_prog)); } /* xdp_program__fill_from_fd() clones the fd and takes ownership of the clone */ return xdp_program__fill_from_fd(prog, bpf_program__fd(prog->bpf_prog)); } struct xdp_program *xdp_program__clone(struct xdp_program *prog, unsigned int flags) { if (IS_ERR_OR_NULL(prog) || flags || (prog->prog_fd < 0 && !prog->bpf_obj)) return libxdp_err_ptr(-EINVAL, false); if (prog->prog_fd >= 0) /* Clone a loaded program struct by creating a new object from the program fd; xdp_program__fill_from_fd() already duplicates the fd before filling in the object, so this creates a completely independent xdp_program object. */ return xdp_program__from_fd(prog->prog_fd); return xdp_program__create_from_obj(prog->bpf_obj, NULL, prog->prog_name, true); } #ifndef HAVE_LIBBPF_BPF_PROGRAM__FLAGS static bool kernel_has_frags_support(void) { pr_debug("Can't support frags with old version of libbpf that doesn't support setting program flags.\n"); return false; } #else static bool kernel_has_frags_support(void) { struct xdp_program *test_prog; bool ret = false; int err; pr_debug("Checking for kernel frags support\n"); test_prog = __xdp_program__find_file("xdp-dispatcher.o", NULL, "xdp_pass", NULL); if (IS_ERR(test_prog)) { err = PTR_ERR(test_prog); pr_warn("Couldn't open BPF file xdp-dispatcher.o\n"); return false; } bpf_program__set_flags(test_prog->bpf_prog, BPF_F_XDP_HAS_FRAGS); err = xdp_program__load(test_prog); if (!err) { pr_debug("Kernel supports XDP programs with frags\n"); ret = true; } else { pr_debug("Kernel DOES NOT support XDP programs with frags\n"); } xdp_program__close(test_prog); return ret; } #endif // HAVE_LIBBPF_BPF_PROGRAM__FLAGS static int xdp_program__attach_single(struct xdp_program *prog, int ifindex, enum xdp_attach_mode mode) { int err; if (prog->prog_fd < 0) { if (!kernel_has_frags_support()) xdp_program__set_xdp_frags_support(prog, false); bpf_program__set_type(prog->bpf_prog, BPF_PROG_TYPE_XDP); err = xdp_program__load(prog); if (err) return err; } if (prog->prog_fd < 0) return -EINVAL; return xdp_attach_fd(xdp_program__fd(prog), -1, ifindex, mode); } static int xdp_multiprog__main_fd(struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp)) return -EINVAL; if (!mp->main_prog) return -ENOENT; return mp->main_prog->prog_fd; } static __u32 xdp_multiprog__main_id(struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp) || !mp->main_prog) return 0; return mp->main_prog->prog_id; } static int xdp_multiprog__hw_fd(struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp)) return -EINVAL; if (!mp->hw_prog) return -ENOENT; return mp->hw_prog->prog_fd; } static __u32 xdp_multiprog__hw_id(struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp) || !mp->hw_prog) return 0; return mp->hw_prog->prog_id; } static int xdp_program__attach_hw(struct xdp_program *prog, int ifindex) { struct bpf_map *map; bpf_program__set_ifindex(prog->bpf_prog, ifindex); bpf_object__for_each_map (map, prog->bpf_obj) { bpf_map__set_ifindex(map, ifindex); } return xdp_program__attach_single(prog, ifindex, XDP_MODE_HW); } static int xdp_multiprog__detach_hw(struct xdp_multiprog *old_mp) { int err = 0, hw_fd = -1, ifindex = -1; if (!old_mp) return -EINVAL; ifindex = old_mp->ifindex; hw_fd = xdp_multiprog__hw_fd(old_mp); if (hw_fd < 0) return -EINVAL; err = xdp_attach_fd(-1, hw_fd, ifindex, XDP_MODE_HW); if (err < 0) return err; pr_debug("Detached hw program on ifindex '%d'\n", ifindex); return 0; } int xdp_program__attach_multi(struct xdp_program **progs, size_t num_progs, int ifindex, enum xdp_attach_mode mode, unsigned int flags) { struct xdp_multiprog *old_mp = NULL, *mp; int err = 0, retry_counter = 0; if (!progs || !num_progs || flags) return libxdp_err(-EINVAL); retry: old_mp = xdp_multiprog__get_from_ifindex(ifindex); if (IS_ERR_OR_NULL(old_mp)) old_mp = NULL; if (mode == XDP_MODE_HW) { bool old_hw_prog = xdp_multiprog__hw_prog(old_mp) != NULL; xdp_multiprog__close(old_mp); if (old_hw_prog) { pr_warn("XDP program already loaded in HW mode on ifindex %d; " "replacing HW mode programs not supported\n", ifindex); return libxdp_err(-EEXIST); } if (num_progs > 1) return libxdp_err(-EINVAL); return libxdp_err(xdp_program__attach_hw(progs[0], ifindex)); } if (num_progs == 1) { char *envval; envval = secure_getenv(XDP_SKIP_ENVVAR); if (envval && envval[0] == '1' && envval[1] == '\0') { pr_debug("Skipping dispatcher due to environment setting\n"); return libxdp_err(xdp_program__attach_single(progs[0], ifindex, mode)); } } mp = xdp_multiprog__generate(progs, num_progs, ifindex, old_mp, false); if (IS_ERR(mp)) { err = PTR_ERR(mp); mp = NULL; if (err == -EOPNOTSUPP) { if (num_progs == 1) { pr_info("Falling back to loading single prog " "without dispatcher\n"); return libxdp_err(xdp_program__attach_single(progs[0], ifindex, mode)); } else { pr_warn("Can't fall back to legacy load with %zu " "programs\n%s\n", num_progs, dispatcher_feature_err); } } goto out; } err = xdp_multiprog__pin(mp); if (err) { pr_warn("Failed to pin program: %s\n", strerror(-err)); goto out_close; } err = xdp_multiprog__attach(old_mp, mp, mode); if (err) { pr_debug("Failed to attach dispatcher on ifindex %d: %s\n", ifindex, strerror(-err)); xdp_multiprog__unpin(mp); if (err == -EAGAIN) { if (++retry_counter > MAX_RETRY) { pr_warn("Retried more than %d times, giving up\n", retry_counter); err = -EBUSY; goto out_close; } pr_debug("Existing dispatcher replaced while building replacement, retrying.\n"); xdp_multiprog__close(old_mp); xdp_multiprog__close(mp); usleep(1 << retry_counter); /* exponential backoff */ goto retry; } goto out_close; } if (old_mp) { err = xdp_multiprog__unpin(old_mp); if (err) { pr_warn("Failed to unpin old dispatcher: %s\n", strerror(-err)); err = 0; } } out_close: xdp_multiprog__close(mp); out: if (old_mp) xdp_multiprog__close(old_mp); return libxdp_err(err); } int xdp_program__attach(struct xdp_program *prog, int ifindex, enum xdp_attach_mode mode, unsigned int flags) { if (IS_ERR_OR_NULL(prog) || IS_ERR(prog)) return libxdp_err(-EINVAL); return libxdp_err(xdp_program__attach_multi(&prog, 1, ifindex, mode, flags)); } int xdp_program__detach_multi(struct xdp_program **progs, size_t num_progs, int ifindex, enum xdp_attach_mode mode, unsigned int flags) { struct xdp_multiprog *new_mp, *mp; int err = 0, retry_counter = 0; size_t i; if (flags || !num_progs || !progs) return libxdp_err(-EINVAL); retry: new_mp = NULL; mp = xdp_multiprog__get_from_ifindex(ifindex); if (IS_ERR_OR_NULL(mp)) { pr_warn("No XDP dispatcher found on ifindex %d\n", ifindex); return libxdp_err(-ENOENT); } if (mode == XDP_MODE_HW || xdp_multiprog__is_legacy(mp)) { __u32 id = (mode == XDP_MODE_HW) ? xdp_multiprog__hw_id(mp) : xdp_multiprog__main_id(mp); if (num_progs > 1) { pr_warn("Can only detach one program in legacy or HW mode\n"); err = -EINVAL; goto out; } if (!xdp_program__id(progs[0])) { pr_warn("Program 0 not loaded\n"); err = -EINVAL; goto out; } if (id != xdp_program__id(progs[0])) { pr_warn("Asked to unload prog %u but %u is loaded\n", xdp_program__id(progs[0]), id); err = -ENOENT; goto out; } } if (mode == XDP_MODE_HW) { err = xdp_multiprog__detach_hw(mp); goto out; } if (mode != XDP_MODE_UNSPEC && mp->attach_mode != mode) { pr_warn("XDP dispatcher attached in mode %d, requested %d\n", mp->attach_mode, mode); err = -ENOENT; goto out; } if (xdp_multiprog__is_legacy(mp)) { err = xdp_multiprog__attach(mp, NULL, mode); goto out; } /* fist pass - check progs and count number still loaded */ for (i = 0; i < num_progs; i++) { struct xdp_program *p = NULL; bool found = false; if (!progs[i]->prog_id) { pr_warn("Program %zu not loaded\n", i); err = -EINVAL; goto out; } while ((p = xdp_multiprog__next_prog(p, mp))) { if (progs[i]->prog_id == p->prog_id) found = true; } if (!found) { pr_warn("Couldn't find program with id %d on ifindex %d\n", progs[i]->prog_id, ifindex); err = -ENOENT; goto out; } } if (num_progs == mp->num_links) { err = xdp_multiprog__attach(mp, NULL, mp->attach_mode); if (err) goto out; err = xdp_multiprog__unpin(mp); if (err) goto out; } else { new_mp = xdp_multiprog__generate(progs, num_progs, ifindex, mp, true); if (IS_ERR(new_mp)) { err = PTR_ERR(new_mp); if (err == -EOPNOTSUPP) { pr_warn("Asked to detach %zu progs, but %zu loaded on ifindex %d, " "and partial detach is not supported by the kernel.\n", num_progs, mp->num_links, ifindex); } goto out; } err = xdp_multiprog__pin(new_mp); if (err) { pr_warn("Failed to pin program: %s\n", strerror(-err)); goto out; } err = xdp_multiprog__attach(mp, new_mp, mode); if (err) { pr_debug("Failed to attach dispatcher on ifindex %d: %s\n", ifindex, strerror(-err)); xdp_multiprog__unpin(new_mp); goto out; } err = xdp_multiprog__unpin(mp); if (err) { pr_warn("Failed to unpin old dispatcher: %s\n", strerror(-err)); err = 0; } } out: xdp_multiprog__close(mp); xdp_multiprog__close(new_mp); if (err == -EAGAIN) { if (++retry_counter > MAX_RETRY) { pr_warn("Retried more than %d times, giving up\n", retry_counter); return libxdp_err(-EBUSY); } pr_debug("Existing dispatcher replaced while building replacement, retrying.\n"); usleep(1 << retry_counter); /* exponential backoff */ goto retry; } return libxdp_err(err); } int xdp_program__detach(struct xdp_program *prog, int ifindex, enum xdp_attach_mode mode, unsigned int flags) { if (IS_ERR_OR_NULL(prog) || IS_ERR(prog)) return -EINVAL; return libxdp_err(xdp_program__detach_multi(&prog, 1, ifindex, mode, flags)); } int xdp_program__test_run(struct xdp_program *prog, struct bpf_test_run_opts *opts, unsigned int flags) { struct xdp_multiprog *mp = NULL; int err, prog_fd; if (IS_ERR_OR_NULL(prog) || flags) return libxdp_err(-EINVAL); if (prog->prog_fd < 0) { err = xdp_program__load(prog); if (err) return libxdp_err(err); } if (prog->prog_type == BPF_PROG_TYPE_EXT) { mp = xdp_multiprog__generate(&prog, 1, 0, NULL, false); if (IS_ERR(mp)) { err = PTR_ERR(mp); if (err == -EOPNOTSUPP) pr_warn("Program was already attached to a dispatcher, " "and kernel doesn't support multiple attachments\n"); return libxdp_err(err); } prog_fd = xdp_multiprog__main_fd(mp); } else if (prog->prog_type != BPF_PROG_TYPE_XDP) { pr_warn("Can't test_run non-XDP programs\n"); return libxdp_err(-ENOEXEC); } else { prog_fd = prog->prog_fd; } err = bpf_prog_test_run_opts(prog_fd, opts); if (err) err = -errno; if (mp) xdp_multiprog__close(mp); return libxdp_err(err); } void xdp_multiprog__close(struct xdp_multiprog *mp) { struct xdp_program *p, *next = NULL; if (IS_ERR_OR_NULL(mp)) return; xdp_program__close(mp->main_prog); for (p = mp->first_prog; p; p = next) { next = p->next; xdp_program__close(p); } xdp_program__close(mp->hw_prog); free(mp); } static struct xdp_multiprog *xdp_multiprog__new(int ifindex) { struct xdp_multiprog *mp; mp = malloc(sizeof *mp); if (!mp) return ERR_PTR(-ENOMEM); memset(mp, 0, sizeof(*mp)); mp->ifindex = ifindex; mp->version = XDP_DISPATCHER_VERSION; return mp; } static int xdp_multiprog__load(struct xdp_multiprog *mp) { char buf[100]; int err = 0; if (IS_ERR_OR_NULL(mp) || !mp->main_prog || mp->is_loaded || xdp_multiprog__is_legacy(mp)) return -EINVAL; pr_debug("Loading multiprog dispatcher for %d programs %s frags support\n", mp->config.num_progs_enabled, mp->config.is_xdp_frags ? "with" : "without"); if (mp->config.is_xdp_frags) xdp_program__set_xdp_frags_support(mp->main_prog, true); err = xdp_program__load(mp->main_prog); if (err) { pr_info("Failed to load dispatcher: %s\n", libxdp_strerror_r(err, buf, sizeof(buf))); err = -EOPNOTSUPP; goto out; } mp->is_loaded = true; out: return err; } int check_xdp_prog_version(const struct btf *btf, const char *name, __u32 *version) { const struct btf_type *sec, *def; sec = btf_get_datasec(btf, XDP_METADATA_SECTION); if (!sec) return libxdp_err(-ENOENT); def = btf_get_section_var(btf, sec, name, BTF_KIND_PTR); if (IS_ERR(def)) return libxdp_err(PTR_ERR(def)); if (!get_field_int(btf, name, def, version)) return libxdp_err(-ENOENT); return 0; } static int check_dispatcher_version(struct xdp_multiprog *mp, const char *prog_name, const struct btf *btf, __u32 nr_maps, __u32 map_id) { __u32 version = 0, map_key = 0, info_len = sizeof(struct bpf_map_info); const char *name = "dispatcher_version"; struct bpf_map_info map_info = {}; int err, map_fd, i; __u8 *buf = NULL; if (prog_name && strcmp(prog_name, "xdp_dispatcher")) { pr_debug("XDP program with name '%s' is not a dispatcher\n", prog_name); return -ENOENT; } if (nr_maps != 1) { pr_warn("Expected a single map for dispatcher, found %u\n", nr_maps); return -ENOENT; } map_fd = bpf_map_get_fd_by_id(map_id); if (map_fd < 0) { err = -errno; pr_warn("Could not get config map fd for id %u: %s\n", map_id, strerror(-err)); return err; } err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); if (err) { err = -errno; pr_warn("Couldn't get map info: %s\n", strerror(-err)); goto out; } if (map_info.key_size != sizeof(map_key) || map_info.value_size < 2 || map_info.max_entries != 1 || !(map_info.map_flags & BPF_F_RDONLY_PROG)) { pr_warn("Map flags or key/value size mismatch\n"); err = -EINVAL; goto out; } buf = malloc(map_info.value_size); if (!buf) { err = -ENOMEM; goto out; } err = bpf_map_lookup_elem(map_fd, &map_key, buf); if (err) { err = -errno; pr_warn("Could not lookup map value: %s\n", strerror(-err)); goto out; } if (buf[0] == XDP_DISPATCHER_MAGIC) { version = buf[1]; } else { err = check_xdp_prog_version(btf, name, &version); if (err) goto out; } switch (version) { case XDP_DISPATCHER_VERSION_V1: { struct xdp_dispatcher_config_v1 *config = (void *)buf; for (i = 0; i < MAX_DISPATCHER_ACTIONS; i++) { mp->config.chain_call_actions[i] = config->chain_call_actions[i]; mp->config.run_prios[i] = config->run_prios[i]; } mp->config.num_progs_enabled = config->num_progs_enabled; break; } case XDP_DISPATCHER_VERSION: if (map_info.value_size != sizeof(mp->config)) { pr_warn("Dispatcher version matches, but map size %u != expected %zu\n", map_info.value_size, sizeof(mp->config)); err = -EINVAL; goto out; } memcpy(&mp->config, buf, sizeof(mp->config)); break; default: pr_warn("XDP dispatcher version %u higher than supported %u\n", version, XDP_DISPATCHER_VERSION); err = -EOPNOTSUPP; goto out; } pr_debug("Verified XDP dispatcher version %d <= %d\n", version, XDP_DISPATCHER_VERSION); mp->version = version; out: close(map_fd); free(buf); return err; } static int xdp_multiprog__link_pinned_progs(struct xdp_multiprog *mp) { char buf[PATH_MAX], pin_path[PATH_MAX]; struct xdp_program *prog, *p = NULL; const char *bpffs_dir; int err, lock_fd, i; struct stat sb = {}; if (IS_ERR_OR_NULL(mp) || mp->first_prog) return -EINVAL; bpffs_dir = get_bpffs_dir(); if (IS_ERR(bpffs_dir)) return PTR_ERR(bpffs_dir); err = try_snprintf(pin_path, sizeof(pin_path), "%s/dispatch-%d-%d", bpffs_dir, mp->ifindex, mp->main_prog->prog_id); if (err) return err; lock_fd = xdp_lock_acquire(); if (lock_fd < 0) return lock_fd; pr_debug("Reading multiprog component programs from pinned directory\n"); err = stat(pin_path, &sb); if (err) { err = -errno; pr_debug("Couldn't stat pin_path '%s': %s\n", pin_path, strerror(-err)); goto out; } for (i = 0; i < mp->config.num_progs_enabled; i++) { err = try_snprintf(buf, sizeof(buf), "%s/prog%d-prog", pin_path, i); if (err) goto err; prog = xdp_program__from_pin(buf); if (IS_ERR(prog)) { err = PTR_ERR(prog); goto err; } err = try_snprintf(buf, sizeof(buf), "prog%d", i); if (err) goto err; prog->attach_name = strdup(buf); if (!prog->attach_name) { err = -ENOMEM; goto err; } prog->chain_call_actions = (mp->config.chain_call_actions[i] & ~(1U << XDP_DISPATCHER_RETVAL)); prog->run_prio = mp->config.run_prios[i]; prog->is_frags = !!(mp->config.program_flags[i] & BPF_F_XDP_HAS_FRAGS); if (!p) { mp->first_prog = prog; p = mp->first_prog; } else { p->next = prog; p = prog; } mp->num_links++; } out: xdp_lock_release(lock_fd); return err; err: prog = mp->first_prog; while (prog) { p = prog->next; xdp_program__close(prog); prog = p; } mp->first_prog = NULL; goto out; } static int xdp_multiprog__fill_from_fd(struct xdp_multiprog *mp, int prog_fd, int hw_fd) { struct bpf_prog_info info = {}; __u32 info_len, map_id = 0; struct xdp_program *prog; struct btf *btf = NULL; int err = 0; if (IS_ERR_OR_NULL(mp)) return -EINVAL; if (prog_fd > 0) { info.nr_map_ids = 1; info.map_ids = (uintptr_t)&map_id; info_len = sizeof(info); err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); if (err) { pr_warn("couldn't get program info for fd: %d", prog_fd); return -EINVAL; } if (!info.btf_id) { pr_debug("No BTF for prog ID %u\n", info.id); mp->is_legacy = true; goto legacy; } btf = btf__load_from_kernel_by_id(info.btf_id); if (!btf) { pr_warn("Couldn't get BTF for ID %ul\n", info.btf_id); goto out; } err = check_dispatcher_version(mp, info.name, btf, info.nr_map_ids, map_id); if (err) { if (err != -ENOENT) { pr_warn("Dispatcher version check failed for ID %d\n", info.id); goto out; } else { /* no dispatcher, mark as legacy prog */ mp->is_legacy = true; err = 0; goto legacy; } } legacy: prog = xdp_program__from_fd(prog_fd); if (IS_ERR(prog)) { err = PTR_ERR(prog); goto out; } mp->main_prog = prog; if (!xdp_multiprog__is_legacy(mp)) { err = xdp_multiprog__link_pinned_progs(mp); if (err) { pr_warn("Unable to read pinned progs: %s\n", strerror(-err)); mp->is_legacy = true; err = 0; } } pr_debug("Found %s with id %d and %zu component progs\n", xdp_multiprog__is_legacy(mp) ? "legacy program" : "multiprog", mp->main_prog->prog_id, mp->num_links); } if (hw_fd > 0) { prog = xdp_program__from_fd(hw_fd); if (IS_ERR(prog)) { err = PTR_ERR(prog); goto out; } if (mp->first_prog == NULL) mp->is_legacy = true; mp->hw_prog = prog; pr_debug("Found hw program with id %d\n", mp->hw_prog->prog_id); } mp->is_loaded = true; out: btf__free(btf); return err; } static struct xdp_multiprog *xdp_multiprog__from_fd(int fd, int hw_fd, int ifindex) { struct xdp_multiprog *mp = NULL; int err; mp = xdp_multiprog__new(ifindex); if (IS_ERR(mp)) return mp; err = xdp_multiprog__fill_from_fd(mp, fd, hw_fd); if (err) goto err; return mp; err: xdp_multiprog__close(mp); return ERR_PTR(err); } static struct xdp_multiprog *xdp_multiprog__from_id(__u32 id, __u32 hw_id, int ifindex) { struct xdp_multiprog *mp; int hw_fd = 0; int fd = 0; int err; if (id) { fd = bpf_prog_get_fd_by_id(id); if (fd < 0) { err = -errno; pr_warn("couldn't get program fd: %s", strerror(-err)); goto err; } } if (hw_id) { hw_fd = bpf_prog_get_fd_by_id(hw_id); if (hw_fd < 0) { err = -errno; pr_warn("couldn't get program fd: %s", strerror(-err)); goto err; } } mp = xdp_multiprog__from_fd(fd, hw_fd, ifindex); if (IS_ERR(mp)) { err = PTR_ERR(mp); goto err; } // duplicated fd/hw_fd already in prog, close originals if (fd > 0) close(fd); if (hw_fd > 0) close(hw_fd); return mp; err: if (fd > 0) close(fd); if (hw_fd > 0) close(hw_fd); return ERR_PTR(err); } static int xdp_get_ifindex_prog_id(int ifindex, __u32 *prog_id, __u32 *hw_prog_id, enum xdp_attach_mode *mode) { __u32 _prog_id, _drv_prog_id, _hw_prog_id, _skb_prog_id; enum xdp_attach_mode _mode; __u8 _attach_mode; if (!hw_prog_id) hw_prog_id = &_prog_id; if (!mode) mode = &_mode; int err; #ifdef HAVE_LIBBPF_BPF_XDP_ATTACH LIBBPF_OPTS(bpf_xdp_query_opts, opts); err = bpf_xdp_query(ifindex, 0, &opts); if (err) return err; _drv_prog_id = opts.drv_prog_id; _skb_prog_id = opts.skb_prog_id; _hw_prog_id = opts.hw_prog_id; _attach_mode = opts.attach_mode; #else struct xdp_link_info xinfo = {}; err = bpf_get_link_xdp_info(ifindex, &xinfo, sizeof(xinfo), 0); if (err) return err; _drv_prog_id = xinfo.drv_prog_id; _skb_prog_id = xinfo.skb_prog_id; _hw_prog_id = xinfo.hw_prog_id; _attach_mode = xinfo.attach_mode; #endif switch (_attach_mode) { case XDP_ATTACHED_SKB: *prog_id = _skb_prog_id; *mode = XDP_MODE_SKB; break; case XDP_ATTACHED_DRV: *prog_id = _drv_prog_id; *mode = XDP_MODE_NATIVE; break; case XDP_ATTACHED_MULTI: if (_drv_prog_id) { *prog_id = _drv_prog_id; *mode = XDP_MODE_NATIVE; } else if (_skb_prog_id) { *prog_id = _skb_prog_id; *mode = XDP_MODE_SKB; } *hw_prog_id = _hw_prog_id; break; case XDP_ATTACHED_HW: *hw_prog_id = _hw_prog_id; *mode = XDP_MODE_UNSPEC; break; case XDP_ATTACHED_NONE: default: *mode = XDP_MODE_UNSPEC; break; } return 0; } struct xdp_multiprog *xdp_multiprog__get_from_ifindex(int ifindex) { enum xdp_attach_mode mode = XDP_MODE_UNSPEC; int err, retry_counter = 0; struct xdp_multiprog *mp; __u32 hw_prog_id = 0; __u32 prog_id = 0; retry: err = xdp_get_ifindex_prog_id(ifindex, &prog_id, &hw_prog_id, &mode); if (err) return libxdp_err_ptr(err, false); if (!prog_id && !hw_prog_id) return libxdp_err_ptr(-ENOENT, false); mp = xdp_multiprog__from_id(prog_id, hw_prog_id, ifindex); if (!IS_ERR_OR_NULL(mp)) mp->attach_mode = mode; else if (IS_ERR(mp)) { err = PTR_ERR(mp); if (err == -ENOENT) { if (++retry_counter > MAX_RETRY) { pr_warn("Retried more than %d times, giving up\n", retry_counter); err = -EBUSY; } else { pr_debug("Dispatcher disappeared before we could load it, retrying.\n"); usleep(1 << retry_counter); /* exponential backoff */ goto retry; } } mp = libxdp_err_ptr(err, false); } else mp = libxdp_err_ptr(0, true); return mp; } int libxdp_check_kern_compat(void) { struct xdp_program *tgt_prog = NULL, *test_prog = NULL; const char *bpffs_dir; char buf[PATH_MAX]; int lock_fd; int err = 0; bpffs_dir = get_bpffs_dir(); if (IS_ERR(bpffs_dir)) { err = PTR_ERR(bpffs_dir); pr_warn("Can't use dispatcher without a working bpffs\n"); return -EOPNOTSUPP; } if (kernel_compat > COMPAT_UNKNOWN) goto skip; pr_debug("Checking dispatcher compatibility\n"); tgt_prog = __xdp_program__find_file("xdp-dispatcher.o", NULL, "xdp_pass", NULL); if (IS_ERR(tgt_prog)) { err = PTR_ERR(tgt_prog); pr_warn("Couldn't open BPF file xdp-dispatcher.o\n"); return err; } test_prog = __xdp_program__find_file("xdp-dispatcher.o", NULL, "xdp_pass", NULL); if (IS_ERR(test_prog)) { err = PTR_ERR(test_prog); pr_warn("Couldn't open BPF file xdp-dispatcher.o\n"); return err; } err = xdp_program__load(tgt_prog); if (err) { pr_debug("Couldn't load XDP program: %s\n", strerror(-err)); goto out; } err = bpf_program__set_attach_target(test_prog->bpf_prog, tgt_prog->prog_fd, "xdp_pass"); if (err) { pr_debug("Failed to set attach target: %s\n", strerror(-err)); goto out; } bpf_program__set_type(test_prog->bpf_prog, BPF_PROG_TYPE_EXT); bpf_program__set_expected_attach_type(test_prog->bpf_prog, 0); err = xdp_program__load(test_prog); if (err) { char buf[100] = {}; libxdp_strerror(err, buf, sizeof(buf)); pr_debug("Failed to load program %s: %s\n", xdp_program__name(test_prog), buf); goto out; } test_prog->link_fd = bpf_raw_tracepoint_open(NULL, test_prog->prog_fd); if (test_prog->link_fd < 0) { err = -errno; pr_debug("Failed to attach test program to dispatcher: %s\n", strerror(-err)); goto out; } err = try_snprintf(buf, sizeof(buf), "%s/prog-test-link-%i-%i", bpffs_dir, IFINDEX_LO, test_prog->prog_id); if (err) goto out; lock_fd = xdp_lock_acquire(); if (lock_fd < 0) { err = lock_fd; goto out; } err = bpf_obj_pin(test_prog->link_fd, buf); if (err) { err = -errno; pr_warn("Couldn't pin link FD at %s: %s\n", buf, strerror(-err)); goto out_locked; } err = unlink(buf); if (err) { err = -errno; pr_warn("Couldn't unlink file %s: %s\n", buf, strerror(-err)); goto out_locked; } kernel_compat = COMPAT_SUPPORTED; out_locked: xdp_lock_release(lock_fd); out: xdp_program__close(test_prog); xdp_program__close(tgt_prog); if (err) { pr_info("Compatibility check for dispatcher program failed: %s\n", strerror(-err)); kernel_compat = COMPAT_UNSUPPORTED; } skip: return kernel_compat == COMPAT_SUPPORTED ? 0 : -EOPNOTSUPP; } static int find_prog_btf_id(const char *name, __u32 attach_prog_fd) { struct bpf_prog_info info = {}; __u32 info_size = sizeof(info); int err = -EINVAL; struct btf *btf; err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_size); if (err) { err = -errno; pr_warn("failed get_prog_info for FD %d\n", attach_prog_fd); return err; } if (!info.btf_id) { pr_warn("The target program doesn't have BTF\n"); return -EINVAL; } btf = btf__load_from_kernel_by_id(info.btf_id); if (!btf) { pr_warn("Failed to get BTF of the program\n"); return -EINVAL; } err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); btf__free(btf); if (err <= 0) pr_warn("%s is not found in prog's BTF\n", name); return err; } static int xdp_multiprog__link_prog(struct xdp_multiprog *mp, struct xdp_program *prog) { DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); struct xdp_program *new_prog, *p; bool was_loaded = false; char buf[PATH_MAX]; int err, lfd = -1; char *attach_func; __s32 btf_id; if (IS_ERR_OR_NULL(mp) || IS_ERR_OR_NULL(prog) || !mp->is_loaded || mp->num_links >= mp->config.num_progs_enabled) return -EINVAL; err = libxdp_check_kern_compat(); if (err) return err; if (!prog->btf) { pr_warn("Program %s has no BTF information, so we can't load it as multiprog\n", xdp_program__name(prog)); return -EOPNOTSUPP; } pr_debug("Linking prog %s as multiprog entry %zu\n", xdp_program__name(prog), mp->num_links); err = try_snprintf(buf, sizeof(buf), "prog%zu", mp->num_links); if (err) goto err; if (mp->config.num_progs_enabled == 1) attach_func = "xdp_dispatcher"; else attach_func = buf; btf_id = find_prog_btf_id(attach_func, mp->main_prog->prog_fd); if (btf_id <= 0) { err = btf_id; pr_debug("Couldn't find BTF ID for %s: %d\n", attach_func, err); goto err; } if (prog->prog_fd < 0) { err = bpf_program__set_attach_target(prog->bpf_prog, mp->main_prog->prog_fd, attach_func); if (err) { pr_debug("Failed to set attach target: %s\n", strerror(-err)); goto err; } bpf_program__set_type(prog->bpf_prog, BPF_PROG_TYPE_EXT); bpf_program__set_expected_attach_type(prog->bpf_prog, 0); err = xdp_program__load(prog); if (err) { if (err == -E2BIG) { pr_debug("Got 'argument list too long' error while " "loading component program.\n"); err = -EOPNOTSUPP; } else { char buf[100] = {}; libxdp_strerror(err, buf, sizeof(buf)); pr_debug("Failed to load program %s: %s\n", xdp_program__name(prog), buf); } goto err; } was_loaded = true; } /* clone the xdp_program ref so we can keep it */ new_prog = xdp_program__clone(prog, 0); if (IS_ERR(new_prog)) { err = PTR_ERR(new_prog); pr_warn("Failed to clone xdp_program: %s\n", strerror(-err)); goto err; } opts.target_btf_id = btf_id; /* The attach will disappear once this fd is closed */ lfd = bpf_link_create(new_prog->prog_fd, mp->main_prog->prog_fd, 0, &opts); if (lfd < 0) { err = -errno; if (err == -EINVAL) { if (!was_loaded) { pr_debug("Kernel doesn't support re-attaching " "freplace programs.\n"); err = -EOPNOTSUPP; } else { pr_debug("Got EINVAL, retrying " "raw_tracepoint_open() without target\n"); /* we just loaded the program, so should be able * to attach the old way */ lfd = bpf_raw_tracepoint_open(NULL, new_prog->prog_fd); if (lfd < 0) err = -errno; else goto attach_ok; } } if (err == -EPERM) { pr_debug("Got 'permission denied' error while " "attaching program to dispatcher.\n%s\n", dispatcher_feature_err); err = -EOPNOTSUPP; } else { pr_warn("Failed to attach program %s to dispatcher: %s\n", xdp_program__name(new_prog), strerror(-err)); } goto err_free; } attach_ok: new_prog->attach_name = strdup(buf); if (!new_prog->attach_name) { err = -ENOMEM; goto err_free; } pr_debug( "Attached prog '%s' with priority %d in dispatcher entry '%s' with fd %d\n", xdp_program__name(new_prog), xdp_program__run_prio(new_prog), new_prog->attach_name, lfd); new_prog->link_fd = lfd; if (!mp->first_prog) { mp->first_prog = new_prog; } else { p = mp->first_prog; while (p->next) p = p->next; p->next = new_prog; } mp->num_links++; return 0; err_free: if (lfd >= 0) close(lfd); xdp_program__close(new_prog); err: return err; } /* * xdp_multiprog__generate - generate a new multiprog dispatcher * * This generates a new multiprog dispatcher for the programs in progs. If * old_mp is set, the progs will either be added to or removed from the existing * set of programs in the dispatcher represented by old_mp, depending on the * value of remove_progs. If old_mp is not set, a new dispatcher will be created * just holding the programs in progs. In both cases, the full set of programs * will be sorted according to their run order (see cmp_xdp_programs). * * When called with remove_progs set, the caller is responsible for checking * that all the programs in progs are actually present in old_mp. */ static struct xdp_multiprog *xdp_multiprog__generate(struct xdp_program **progs, size_t num_progs, int ifindex, struct xdp_multiprog *old_mp, bool remove_progs) { size_t num_new_progs = old_mp ? old_mp->num_links : 0; struct xdp_program **new_progs = NULL; struct xdp_program *dispatcher; struct xdp_multiprog *mp; struct bpf_map *map; size_t i; int err; if (!progs || !num_progs || (!old_mp && remove_progs)) return ERR_PTR(-EINVAL); num_new_progs += remove_progs ? -num_progs : num_progs; if (num_new_progs > MAX_DISPATCHER_ACTIONS) { pr_warn("Not enough free slots in the dispatcher.\n"); return ERR_PTR(-E2BIG); } pr_debug("Generating multi-prog dispatcher for %zu programs\n", num_new_progs); mp = xdp_multiprog__new(ifindex); if (IS_ERR(mp)) return mp; mp->kernel_frags_support = kernel_has_frags_support(); if (old_mp) { struct xdp_program *prog; size_t j; if (xdp_multiprog__is_legacy(old_mp)) { pr_warn("Existing program is not using a dispatcher, can't replace; unload first\n"); err = -EBUSY; goto err; } if (old_mp->version < mp->version) { pr_warn("Existing dispatcher version %u is older than our version %u. " "Refusing transparent upgrade, unload first\n", old_mp->version, mp->version); err = -EBUSY; goto err; } new_progs = calloc(num_new_progs, sizeof(*new_progs)); if (!new_progs) { err = -ENOMEM; goto err; } for (i = 0, prog = old_mp->first_prog; prog; prog = prog->next) { if (remove_progs) { /* remove_new means new_progs is an array of * programs we should remove from old_mp instead * of adding them. */ bool found = false; for (j = 0; j < num_progs; j++) if (progs[j]->prog_id == prog->prog_id) found = true; if (found) continue; /* Sanity check: caller should ensure all * programs to remove actually exist; check here * anyway to ensure we don't overrun the array * if this is not done correctly. */ if (i >= num_new_progs) { pr_warn("Not all programs to remove were found\n"); err = -EINVAL; goto err; } } new_progs[i++] = prog; } if (!remove_progs) for (j = 0; i < num_new_progs; i++, j++) new_progs[i] = progs[j]; } else { new_progs = progs; } if (num_new_progs > 1) qsort(new_progs, num_new_progs, sizeof(*new_progs), cmp_xdp_programs); dispatcher = __xdp_program__find_file("xdp-dispatcher.o", NULL, "xdp_dispatcher", NULL); if (IS_ERR(dispatcher)) { err = PTR_ERR(dispatcher); pr_warn("Couldn't open BPF file 'xdp-dispatcher.o'\n"); goto err; } mp->main_prog = dispatcher; map = bpf_object__next_map(mp->main_prog->bpf_obj, NULL); if (!map) { pr_warn("Couldn't find rodata map in object file 'xdp-dispatcher.o'\n"); err = -ENOENT; goto err; } mp->config.magic = XDP_DISPATCHER_MAGIC; mp->config.dispatcher_version = mp->version; mp->config.num_progs_enabled = num_new_progs; mp->config.is_xdp_frags = mp->kernel_frags_support; for (i = 0; i < num_new_progs; i++) { mp->config.chain_call_actions[i] = (new_progs[i]->chain_call_actions | (1U << XDP_DISPATCHER_RETVAL)); mp->config.run_prios[i] = new_progs[i]->run_prio; if (xdp_program__xdp_frags_support(new_progs[i])) mp->config.program_flags[i] = BPF_F_XDP_HAS_FRAGS; else mp->config.is_xdp_frags = false; } if (mp->kernel_frags_support) { if (!mp->config.is_xdp_frags) pr_debug("At least one attached program doesn't " "support frags, disabling it for the " "dispatcher\n"); else pr_debug("All attached programs support frags, " "enabling it for the dispatcher\n"); } err = bpf_map__set_initial_value(map, &mp->config, sizeof(mp->config)); if (err) { pr_warn("Failed to set rodata for object file 'xdp-dispatcher.o'\n"); goto err; } err = xdp_multiprog__load(mp); if (err) goto err; for (i = 0; i < num_new_progs; i++) { err = xdp_multiprog__link_prog(mp, new_progs[i]); if (err) goto err; } if (old_mp) free(new_progs); return mp; err: if (old_mp) free(new_progs); xdp_multiprog__close(mp); return ERR_PTR(err); } static int xdp_multiprog__pin(struct xdp_multiprog *mp) { char pin_path[PATH_MAX], buf[PATH_MAX]; struct xdp_program *prog; const char *bpffs_dir; int err = 0, lock_fd; if (IS_ERR_OR_NULL(mp) || xdp_multiprog__is_legacy(mp)) return -EINVAL; bpffs_dir = get_bpffs_dir(); if (IS_ERR(bpffs_dir)) return PTR_ERR(bpffs_dir); err = try_snprintf(pin_path, sizeof(pin_path), "%s/dispatch-%d-%d", bpffs_dir, mp->ifindex, mp->main_prog->prog_id); if (err) return err; lock_fd = xdp_lock_acquire(); if (lock_fd < 0) return lock_fd; pr_debug("Pinning multiprog fd %d beneath %s\n", mp->main_prog->prog_fd, pin_path); err = mkdir(pin_path, S_IRWXU); if (err && errno != EEXIST) { err = -errno; goto out; } for (prog = mp->first_prog; prog; prog = prog->next) { if (prog->link_fd < 0) { err = -EINVAL; pr_warn("Prog %s not linked\n", prog->prog_name); goto err_unpin; } err = try_snprintf(buf, sizeof(buf), "%s/%s-link", pin_path, prog->attach_name); if (err) goto err_unpin; err = bpf_obj_pin(prog->link_fd, buf); if (err) { err = -errno; pr_warn("Couldn't pin link FD at %s: %s\n", buf, strerror(-err)); goto err_unpin; } pr_debug("Pinned link for prog %s at %s\n", prog->prog_name, buf); err = try_snprintf(buf, sizeof(buf), "%s/%s-prog", pin_path, prog->attach_name); if (err) goto err_unpin; err = bpf_obj_pin(prog->prog_fd, buf); if (err) { err = -errno; pr_warn("Couldn't pin prog FD at %s: %s\n", buf, strerror(-err)); goto err_unpin; } pr_debug("Pinned prog %s at %s\n", prog->prog_name, buf); } out: xdp_lock_release(lock_fd); return err; err_unpin: for (prog = mp->first_prog; prog; prog = prog->next) { if (!try_snprintf(buf, sizeof(buf), "%s/%s-link", pin_path, prog->attach_name)) unlink(buf); if (!try_snprintf(buf, sizeof(buf), "%s/%s-prog", pin_path, prog->attach_name)) unlink(buf); } rmdir(pin_path); goto out; } static int xdp_multiprog__unpin(struct xdp_multiprog *mp) { char pin_path[PATH_MAX], buf[PATH_MAX]; struct xdp_program *prog; const char *bpffs_dir; int err = 0, lock_fd; if (IS_ERR_OR_NULL(mp) || xdp_multiprog__is_legacy(mp)) return -EINVAL; bpffs_dir = get_bpffs_dir(); if (IS_ERR(bpffs_dir)) return PTR_ERR(bpffs_dir); err = try_snprintf(pin_path, sizeof(pin_path), "%s/dispatch-%d-%d", bpffs_dir, mp->ifindex, mp->main_prog->prog_id); if (err) return err; lock_fd = xdp_lock_acquire(); if (lock_fd < 0) return lock_fd; pr_debug("Unpinning multiprog fd %d beneath %s\n", mp->main_prog->prog_fd, pin_path); for (prog = mp->first_prog; prog; prog = prog->next) { err = try_snprintf(buf, sizeof(buf), "%s/%s-link", pin_path, prog->attach_name); if (err) goto out; err = unlink(buf); if (err) { err = -errno; pr_warn("Couldn't unlink file %s: %s\n", buf, strerror(-err)); goto out; } pr_debug("Unpinned link for prog %s from %s\n", prog->prog_name, buf); err = try_snprintf(buf, sizeof(buf), "%s/%s-prog", pin_path, prog->attach_name); if (err) goto out; err = unlink(buf); if (err) { err = -errno; pr_warn("Couldn't unlink file %s: %s\n", buf, strerror(-err)); goto out; } pr_debug("Unpinned prog %s from %s\n", prog->prog_name, buf); } err = rmdir(pin_path); if (err) err = -errno; pr_debug("Removed pin directory %s\n", pin_path); out: xdp_lock_release(lock_fd); return err; } static int xdp_detach_link(__u32 ifindex, __u32 prog_id) { struct bpf_link_info link_info; __u32 link_info_len, id = 0; int err, fd; while (true) { err = bpf_link_get_next_id(id, &id); if (err) { err = -errno; pr_debug("Can't get next link for id %u: %s", id, strerror(errno)); return err; } fd = bpf_link_get_fd_by_id(id); if (fd < 0) { err = -errno; pr_debug("Can't get link by id %u: %s", id, strerror(errno)); return err; } memset(&link_info, 0, sizeof(link_info)); link_info_len = sizeof(link_info); err = bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len); if (err) { err = -errno; pr_debug("Can't get link info for %u: %s", id, strerror(errno)); break; } if (link_info.type == BPF_LINK_TYPE_XDP && link_info.xdp.ifindex == ifindex && link_info.prog_id == prog_id) { pr_debug("Detach link for id %u for prog %u on interface %u", id, prog_id, ifindex); err = bpf_link_detach(fd); if (err) { err = -errno; pr_warn("Can't detach link %u: %s", id, strerror(errno)); } break; } close(fd); } close(fd); return err; } static int xdp_multiprog__attach(struct xdp_multiprog *old_mp, struct xdp_multiprog *mp, enum xdp_attach_mode mode) { int err = 0, prog_fd = -1, old_fd = -1, ifindex = -1; if (IS_ERR_OR_NULL(mp) && !old_mp) return -EINVAL; if (mode == XDP_MODE_HW) return -EINVAL; if (mp) { prog_fd = xdp_multiprog__main_fd(mp); if (prog_fd < 0) return -EINVAL; ifindex = mp->ifindex; } if (old_mp) { old_fd = xdp_multiprog__main_fd(old_mp); if (old_fd < 0) return -EINVAL; if (ifindex > -1 && ifindex != old_mp->ifindex) return -EINVAL; ifindex = old_mp->ifindex; } err = xdp_attach_fd(prog_fd, old_fd, ifindex, mode); if (err < 0) { if (errno == EBUSY && !mp) { pr_debug("Detaching link on ifindex %d\n", ifindex); return xdp_detach_link(ifindex, xdp_multiprog__main_id(old_mp)); } goto err; } if (mp) pr_debug("Loaded %zu programs on ifindex %d%s\n", mp->num_links, ifindex, mode == XDP_MODE_SKB ? " in skb mode" : ""); else pr_debug("Detached %s on ifindex %d%s\n", xdp_multiprog__is_legacy(old_mp) ? "program" : "multiprog", ifindex, mode == XDP_MODE_SKB ? " in skb mode" : ""); return 0; err: return err; } int xdp_multiprog__detach(struct xdp_multiprog *mp) { int err = 0; if (IS_ERR_OR_NULL(mp) || !mp->is_loaded) return libxdp_err(-EINVAL); if (mp->hw_prog) { err = xdp_multiprog__detach_hw(mp); if (err) return libxdp_err(err); } if (mp->main_prog) { err = xdp_multiprog__attach(mp, NULL, mp->attach_mode); if (err) return libxdp_err(err); if (!xdp_multiprog__is_legacy(mp)) err = xdp_multiprog__unpin(mp); } return libxdp_err(err); } struct xdp_program *xdp_multiprog__next_prog(const struct xdp_program *prog, const struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp) || xdp_multiprog__is_legacy(mp)) return libxdp_err_ptr(0, true); if (prog) return prog->next; return mp->first_prog; } struct xdp_program *xdp_multiprog__hw_prog(const struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp)) return libxdp_err_ptr(0, true); return mp->hw_prog; } enum xdp_attach_mode xdp_multiprog__attach_mode(const struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp)) return XDP_MODE_UNSPEC; return mp->attach_mode; } struct xdp_program *xdp_multiprog__main_prog(const struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp)) return libxdp_err_ptr(0, true); return mp->main_prog; } bool xdp_multiprog__is_legacy(const struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp)) return false; return mp->is_legacy; } int xdp_multiprog__program_count(const struct xdp_multiprog *mp) { if (IS_ERR_OR_NULL(mp)) return libxdp_err(-EINVAL); return mp->num_links; } bool xdp_multiprog__xdp_frags_support(const struct xdp_multiprog *mp) { return !xdp_multiprog__is_legacy(mp) && mp->config.is_xdp_frags; } static int remove_pin_dir(const char *subdir) { char prog_path[PATH_MAX], pin_path[PATH_MAX]; int err; DIR *d; const char *dir = get_bpffs_dir(); if (IS_ERR(dir)) return PTR_ERR(dir); err = try_snprintf(pin_path, sizeof(pin_path), "%s/%s", dir, subdir); if (err) return err; d = opendir(pin_path); if (!d) { err = -errno; pr_warn("Failed to open pin directory: %s\n", strerror(-err)); return err; } for (struct dirent *dent = readdir(d); dent; dent = readdir(d)) { /* skip . and .. */ if (dent->d_type == DT_DIR) continue; err = try_snprintf(prog_path, sizeof(prog_path), "%s/%s", pin_path, dent->d_name); if (err) goto err; err = unlink(prog_path); if (err) { err = -errno; pr_warn("Couldn't unlink file %s/%s: %s\n", subdir, dent->d_name, strerror(-err)); goto err; } } err = rmdir(pin_path); if (err) { err = -errno; pr_warn("Failed to remove pin directory %s: %s\n", pin_path, strerror(-err)); } err: closedir(d); return err; } int libxdp_clean_references(int ifindex) { int err = 0, lock_fd, path_ifindex; __u32 dir_prog_id, prog_id = 0; DIR *d; const char *dir = get_bpffs_dir(); if (IS_ERR(dir)) return libxdp_err(PTR_ERR(dir)); lock_fd = xdp_lock_acquire(); if (lock_fd < 0) return libxdp_err(lock_fd); d = opendir(dir); if (!d) { err = -errno; pr_debug("Failed to open bpffs directory: %s\n", strerror(-err)); goto out; } for (struct dirent *dent = readdir(d); dent; dent = readdir(d)) { if (dent->d_type != DT_DIR) continue; if (sscanf(dent->d_name, "dispatch-%d-%"PRIu32"", &path_ifindex, &dir_prog_id) != 2) continue; /* If ifindex is set, skip this dir if it doesn't match */ if (ifindex && path_ifindex != ifindex) continue; xdp_get_ifindex_prog_id(path_ifindex, &prog_id, NULL, NULL); if (!prog_id || prog_id != dir_prog_id) { pr_info("Prog id %"PRIu32" no longer attached on ifindex %d, removing pin directory %s\n", dir_prog_id, path_ifindex, dent->d_name); err = remove_pin_dir(dent->d_name); if (err) break; } } closedir(d); out: xdp_lock_release(lock_fd); return libxdp_err(err); } xdp-tools-1.5.4/lib/libxdp/Makefile0000644000175100001660000001337515003640462016516 0ustar runnerdocker# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) LIB_DIR = .. include libxdp.mk include $(LIB_DIR)/defines.mk OBJDIR ?= . SHARED_OBJDIR := $(OBJDIR)/sharedobjs STATIC_OBJDIR := $(OBJDIR)/staticobjs OBJS := libxdp.o xsk.o XDP_OBJS := xdp-dispatcher.o xsk_def_xdp_prog.o xsk_def_xdp_prog_5.3.o EMBEDDED_XDP_OBJS := $(addsuffix .embed.o,$(basename $(XDP_OBJS))) SHARED_OBJS := $(addprefix $(SHARED_OBJDIR)/,$(OBJS)) STATIC_OBJS := $(addprefix $(STATIC_OBJDIR)/,$(OBJS)) $(EMBEDDED_XDP_OBJS) STATIC_LIBS := $(OBJDIR)/libxdp.a MAN_PAGE := libxdp.3 MAN_OBJ := ${MAN_PAGE:.3=.man} MAN_FILES := $(MAN_PAGE) TEST_DIR := tests SHARED_CFLAGS += -fPIC -DSHARED STATIC_CFLAGS += -D LIBXDP_STATIC=1 LIB_HEADERS := $(wildcard $(HEADER_DIR)/xdp/*.h) BPF_HEADERS := $(wildcard $(HEADER_DIR)/bpf/*.h) $(wildcard $(HEADER_DIR)/xdp/*.h) EXTRA_LIB_DEPS := $(OBJECT_LIBBPF) $(LIBMK) $(LIB_OBJS) $(LIB_HEADERS) compat.h libxdp_internal.h xsk_def_xdp_prog.h bpf_instr.h PC_FILE := $(OBJDIR)/libxdp.pc TEMPLATED_SOURCES := xdp-dispatcher.c CFLAGS += -I$(HEADER_DIR) BPF_CFLAGS += -I$(HEADER_DIR) $(ARCH_INCLUDES) ifndef BUILD_STATIC_ONLY SHARED_LIBS := $(OBJDIR)/libxdp.so \ $(OBJDIR)/libxdp.so.$(LIBXDP_MAJOR_VERSION) \ $(OBJDIR)/libxdp.so.$(LIBXDP_VERSION) VERSION_SCRIPT := libxdp.map CHECK_RULES := check_abi endif all: $(STATIC_LIBS) $(SHARED_LIBS) $(XDP_OBJS) $(PC_FILE) check man clean: $(Q)rm -f $(STATIC_LIBS) $(STATIC_OBJS) $(SHARED_LIBS) $(SHARED_OBJS) $(XDP_OBJS) $(PC_FILE) $(MAN_OBJ) $(TEMPLATED_SOURCES) *.ll $(Q)for d in $(SHARED_OBJDIR) $(STATIC_OBJDIR); do \ [ -d "$$d" ] && rmdir "$$d"; done || true $(Q)$(MAKE) -C $(TEST_DIR) clean install: all $(Q)install -d -m 0755 $(DESTDIR)$(HDRDIR) $(Q)install -d -m 0755 $(DESTDIR)$(LIBDIR) $(Q)install -d -m 0755 $(DESTDIR)$(LIBDIR)/pkgconfig $(Q)install -d -m 0755 $(DESTDIR)$(BPF_OBJECT_DIR) $(Q)install -m 0644 $(LIB_HEADERS) $(DESTDIR)$(HDRDIR)/ $(Q)install -m 0644 $(PC_FILE) $(DESTDIR)$(LIBDIR)/pkgconfig/ $(Q)cp -fpR $(SHARED_LIBS) $(STATIC_LIBS) $(DESTDIR)$(LIBDIR) $(Q)install -m 0644 $(XDP_OBJS) $(DESTDIR)$(BPF_OBJECT_DIR) $(if $(MAN_FILES),$(Q)install -m 0755 -d $(DESTDIR)$(MANDIR)/man3) $(if $(MAN_FILES),$(Q)install -m 0644 $(MAN_FILES) $(DESTDIR)$(MANDIR)/man3) $(OBJDIR)/libxdp.a: $(STATIC_OBJS) $(QUIET_LINK)$(AR) rcs $@ $^ $(OBJDIR)/libxdp.so: $(OBJDIR)/libxdp.so.$(LIBXDP_MAJOR_VERSION) $(Q)ln -sf $(^F) $@ $(OBJDIR)/libxdp.so.$(LIBXDP_MAJOR_VERSION): $(OBJDIR)/libxdp.so.$(LIBXDP_VERSION) $(Q)ln -sf $(^F) $@ $(OBJDIR)/libxdp.so.$(LIBXDP_VERSION): $(SHARED_OBJS) $(QUIET_LINK)$(CC) -shared -Wl,-soname,libxdp.so.$(LIBXDP_MAJOR_VERSION) \ -Wl,--version-script=$(VERSION_SCRIPT) \ $^ $(LDFLAGS) $(LDLIBS) -o $@ $(OBJDIR)/libxdp.pc: $(Q)sed -e "s|@PREFIX@|$(PREFIX)|" \ -e "s|@LIBDIR@|$(LIBDIR)|" \ -e "s|@VERSION@|$(TOOLS_VERSION)|" \ < libxdp.pc.template > $@ $(STATIC_OBJDIR): $(Q)mkdir -p $(STATIC_OBJDIR) $(SHARED_OBJDIR): $(Q)mkdir -p $(SHARED_OBJDIR) $(STATIC_OBJDIR)/%.o: %.c $(EXTRA_LIB_DEPS) | $(STATIC_OBJDIR) $(QUIET_CC)$(CC) $(CFLAGS) $(CPPFLAGS) $(STATIC_CFLAGS) -Wall -I../../headers -c $< -o $@ $(SHARED_OBJDIR)/%.o: %.c $(EXTRA_LIB_DEPS) | $(SHARED_OBJDIR) $(QUIET_CC)$(CC) $(CFLAGS) $(CPPFLAGS) $(SHARED_CFLAGS) -Wall -I../../headers -c $< -o $@ XDP_IN_SHARED := $(SHARED_OBJDIR)/libxdp.o $(SHARED_OBJDIR)/xsk.o GLOBAL_SYM_COUNT = $(shell $(READELF) -s --wide $(XDP_IN_SHARED) | \ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ sed 's/\[.*\]//' | \ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ sort -u | wc -l) VERSIONED_SYM_COUNT = $(shell $(READELF) --dyn-syms --wide $(OBJDIR)/libxdp.so | \ grep -Eo '[^ ]+@LIBXDP_' | cut -d@ -f1 | sort -u | wc -l) check: $(CHECK_RULES) check_abi: $(OBJDIR)/libxdp.so @if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \ echo "Warning: Num of global symbols in $(XDP_IN_SHARED)" \ "($(GLOBAL_SYM_COUNT)) does NOT match with num of" \ "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \ "Please make sure all symbols are" \ "versioned in $(VERSION_SCRIPT)." >&2; \ $(READELF) -s --wide $(XDP_IN_SHARED) | \ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ sed 's/\[.*\]//' | \ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ sort -u > $(OUTPUT)libxdp_global_syms.tmp; \ $(READELF) --dyn-syms --wide $(OUTPUT)libxdp.so | \ grep -Eo '[^ ]+@LIBXDP_' | cut -d@ -f1 | \ sort -u > $(OUTPUT)libxdp_versioned_syms.tmp; \ diff -u $(OUTPUT)libxdp_global_syms.tmp \ $(OUTPUT)libxdp_versioned_syms.tmp; \ rm $(OUTPUT)libxdp_global_syms.tmp \ $(OUTPUT)libxdp_versioned_syms.tmp; \ exit 1; \ fi $(TEMPLATED_SOURCES): %.c: %.c.in Makefile $(QUIET_M4)$(M4) $(DEFINES) $< > $@ || ( ret=$$?; rm -f $@; exit $$ret ) $(EMBEDDED_XDP_OBJS): %.embed.o: %.o $(QUIET_CC)$(CC) -r -nostdlib -Wl,-z,noexecstack,--format=binary $(LDFLAGS) -o $@ $< $(Q)$(OBJCOPY) --rename-section .data=.rodata,alloc,load,readonly,data,contents $@ $(XDP_OBJS): %.o: %.c $(BPF_HEADERS) $(LIBMK) $(QUIET_CLANG)$(CLANG) -target $(BPF_TARGET) $(BPF_CFLAGS) -O2 -c -g -o $@ $< .PHONY: man ifeq ($(EMACS),) man: ; else man: $(MAN_PAGE) $(MAN_OBJ): README.org $(LIBMK) $(Q)$(EMACS) -Q --batch --find-file $< --eval "(progn (require 'ox-man)(org-man-export-to-man))" $(Q)touch -r $< $@ $(MAN_PAGE): $(MAN_OBJ) $(LIBMK) $(QUIET_GEN)MODDATE=$$(git log -1 --pretty="format:%cI" README.org 2>/dev/null); \ [ "$$?" -eq "0" ] && DATE=$$(date '+%B %_d, %Y' -d "$$MODDATE") || DATE=$$(date '+%B %_d, %Y'); \ sed -e "1 s/DATE/$$DATE/" -e "1 s/VERSION/v$(TOOLS_VERSION)/" -e '1,5 s/^.SH "\([^"]\+\) - \([^"]\+\)"/.SH "NAME"\n\1 \\- \2\n.SH "SYNOPSIS"/' $< > $@ endif .PHONY: test test: all $(Q)$(MAKE) -C $(TEST_DIR) run xdp-tools-1.5.4/lib/libxdp/libxdp.mk0000644000175100001660000000031715003640462016661 0ustar runnerdockerLIBXDP_VERSION := $(shell sed -ne "/LIBXDP_[0-9\.]\+ {/ {s/LIBXDP_\([0-9\.]\+\) {/\1/;p;}" $(LIB_DIR)/libxdp/libxdp.map | tail -n 1) LIBXDP_MAJOR_VERSION := $(shell echo $(LIBXDP_VERSION) | sed 's/\..*//') xdp-tools-1.5.4/lib/README.org0000644000175100001660000000031615003640462015231 0ustar runnerdocker* Library files This directory contains common Makefile definitions, and common code used by the different utilities. The libbpf subdir is a git submodule linking to the upstream libbpf github repository. xdp-tools-1.5.4/lib/util/0000755000175100001660000000000015003640462014540 5ustar runnerdockerxdp-tools-1.5.4/lib/util/stats.h0000644000175100001660000000120615003640462016046 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __STATS_H #define __STATS_H #include #include "xdp/xdp_stats_kern_user.h" struct record { __u64 timestamp; bool enabled; struct xdp_stats_record total; /* defined in common_kern_user.h */ }; struct stats_record { struct record stats[XDP_ACTION_MAX]; }; int stats_print_one(struct stats_record *stats_rec); int stats_print(struct stats_record *stats_rec, struct stats_record *stats_prev); int stats_collect(int map_fd, __u32 map_type, struct stats_record *stats_rec); int stats_poll(int map_fd, int interval, bool *exit, const char *pin_dir, const char *map_name); #endif xdp-tools-1.5.4/lib/util/xdp_sample.h0000644000175100001660000001234215003640462017047 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0-only #ifndef XDP_SAMPLE_USER_H #define XDP_SAMPLE_USER_H #include #include #include #include "compat.h" enum stats_mask { _SAMPLE_REDIRECT_MAP = 1U << 0, SAMPLE_RX_CNT = 1U << 1, SAMPLE_REDIRECT_ERR_CNT = 1U << 2, SAMPLE_CPUMAP_ENQUEUE_CNT = 1U << 3, SAMPLE_CPUMAP_KTHREAD_CNT = 1U << 4, SAMPLE_EXCEPTION_CNT = 1U << 5, SAMPLE_DEVMAP_XMIT_CNT = 1U << 6, SAMPLE_REDIRECT_CNT = 1U << 7, SAMPLE_REDIRECT_MAP_CNT = SAMPLE_REDIRECT_CNT | _SAMPLE_REDIRECT_MAP, SAMPLE_REDIRECT_ERR_MAP_CNT = SAMPLE_REDIRECT_ERR_CNT | _SAMPLE_REDIRECT_MAP, SAMPLE_DEVMAP_XMIT_CNT_MULTI = 1U << 8, SAMPLE_SKIP_HEADING = 1U << 9, SAMPLE_RXQ_STATS = 1U << 10, SAMPLE_DROP_OK = 1U << 11, }; enum sample_compat { SAMPLE_COMPAT_CPUMAP_KTHREAD, __SAMPLE_COMPAT_MAX }; #define SAMPLE_COMPAT_MAX __SAMPLE_COMPAT_MAX /* Exit return codes */ #define EXIT_OK 0 #define EXIT_FAIL 1 #define EXIT_FAIL_OPTION 2 #define EXIT_FAIL_XDP 3 #define EXIT_FAIL_BPF 4 #define EXIT_FAIL_MEM 5 int sample_setup_maps(struct bpf_map **maps, const char *ifname); int __sample_init(int mask, int ifindex_from, int ifindex_to); void sample_teardown(void); int sample_run(unsigned int interval, void (*post_cb)(void *), void *ctx); bool sample_is_compat(enum sample_compat compat_value); bool sample_probe_cpumap_compat(void); bool sample_probe_xdp_load_bytes(void); void sample_check_cpumap_compat(struct bpf_program *prog, struct bpf_program *prog_compat); void sample_switch_mode(void); const char *get_driver_name(int ifindex); int get_mac_addr(int ifindex, void *mac_addr); #pragma GCC diagnostic push #if !defined(__clang__) && (__GNUC__ > 7) #pragma GCC diagnostic ignored "-Wstringop-truncation" #endif __attribute__((unused)) static inline char *safe_strncpy(char *dst, const char *src, size_t size) { if (!size) return dst; strncpy(dst, src, size - 1); dst[size - 1] = '\0'; return dst; } #pragma GCC diagnostic pop #define __attach_tp(name) \ ({ \ if (bpf_program__type(skel->progs.name) != BPF_PROG_TYPE_TRACING)\ return -EINVAL; \ skel->links.name = bpf_program__attach(skel->progs.name); \ if (!skel->links.name) \ return -errno; \ }) #define __attach_tp_compat(name, name_compat, _compat) \ ({ \ if (sample_is_compat(SAMPLE_COMPAT_ ## _compat)) \ __attach_tp(name); \ else \ __attach_tp(name_compat); \ }) #define sample_init_pre_load(skel, ifname) \ ({ \ skel->rodata->nr_cpus = libbpf_num_possible_cpus(); \ sample_check_cpumap_compat(skel->progs.tp_xdp_cpumap_kthread, \ skel->progs.tp_xdp_cpumap_compat); \ sample_setup_maps((struct bpf_map *[]){ \ skel->maps.rx_cnt, skel->maps.rxq_cnt, \ skel->maps.redir_err_cnt, \ skel->maps.cpumap_enqueue_cnt, \ skel->maps.cpumap_kthread_cnt, \ skel->maps.exception_cnt, skel->maps.devmap_xmit_cnt, \ skel->maps.devmap_xmit_cnt_multi}, ifname); \ }) #define DEFINE_SAMPLE_INIT(name) \ static int sample_init(struct name *skel, int sample_mask, \ int ifindex_from, int ifindex_to) \ { \ int ret; \ ret = __sample_init(sample_mask, ifindex_from, \ ifindex_to); \ if (ret < 0) \ return ret; \ if (sample_mask & SAMPLE_REDIRECT_MAP_CNT) \ __attach_tp(tp_xdp_redirect_map); \ if (sample_mask & SAMPLE_REDIRECT_CNT) \ __attach_tp(tp_xdp_redirect); \ if (sample_mask & SAMPLE_REDIRECT_ERR_MAP_CNT) \ __attach_tp(tp_xdp_redirect_map_err); \ if (sample_mask & SAMPLE_REDIRECT_ERR_CNT) \ __attach_tp(tp_xdp_redirect_err); \ if (sample_mask & SAMPLE_CPUMAP_ENQUEUE_CNT) \ __attach_tp(tp_xdp_cpumap_enqueue); \ if (sample_mask & SAMPLE_CPUMAP_KTHREAD_CNT) \ __attach_tp_compat(tp_xdp_cpumap_kthread, \ tp_xdp_cpumap_compat, \ CPUMAP_KTHREAD); \ if (sample_mask & SAMPLE_EXCEPTION_CNT) \ __attach_tp(tp_xdp_exception); \ if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT) \ __attach_tp(tp_xdp_devmap_xmit); \ if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) \ __attach_tp(tp_xdp_devmap_xmit_multi); \ return 0; \ } #endif xdp-tools-1.5.4/lib/util/util.mk0000644000175100001660000000034415003640462016047 0ustar runnerdockerLIB_DIR ?= .. include $(LIB_DIR)/defines.mk UTIL_OBJS := params.o logging.o util.o stats.o xpcapng.o UTIL_BPF_OBJS := ifneq ($(BPFTOOL),) UTIL_OBJS += xdp_sample.o UTIL_BPF_OBJS += xdp_sample.bpf.o xdp_load_bytes.bpf.o endif xdp-tools-1.5.4/lib/util/params.h0000644000175100001660000000720715003640462016202 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PARAMS_H #define __PARAMS_H #include #include #include #include #include #include #include enum option_type { OPT_NONE, OPT_BOOL, OPT_FLAGS, OPT_STRING, OPT_U16, OPT_U32, OPT_U32_MULTI, OPT_MACADDR, OPT_IFNAME, OPT_IFNAME_MULTI, OPT_IPADDR, OPT_ENUM, OPT_MULTISTRING, __OPT_MAX }; struct prog_option { enum option_type type; size_t cfg_size; size_t cfg_offset; size_t opt_size; char *name; char short_opt; char *help; char *metavar; void *typearg; bool required; bool positional; bool hidden; unsigned int min_num; unsigned int max_num; unsigned int num_set; }; struct flag_val { const char *flagstring; unsigned int flagval; }; struct enum_val { const char *name; unsigned int value; }; struct multistring { const char **strings; size_t num_strings; }; struct u32_multi { __u32 *vals; size_t num_vals; }; struct iface { struct iface *next; char *ifname; int ifindex; }; struct ip_addr { int af; union { struct in_addr addr4; struct in6_addr addr6; } addr; }; struct mac_addr { unsigned char addr[ETH_ALEN]; }; #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) #define DEFINE_OPTION(_name, _type, _cfgtype, _cfgmember, ...) \ { \ .cfg_size = sizeof(_cfgtype), \ .opt_size = sizeof_field(_cfgtype, _cfgmember), \ .cfg_offset = offsetof(_cfgtype, _cfgmember), .name = _name, \ .type = _type, __VA_ARGS__ \ } #define END_OPTIONS \ { \ } #define FOR_EACH_OPTION(_options, _opt) \ for (_opt = _options; _opt->type != OPT_NONE; _opt++) struct prog_command { const char *name; int (*func)(const void *cfg, const char *pin_root_path); struct prog_option *options; const void *default_cfg; char *doc; bool no_cfg; }; #define DEFINE_COMMAND_NAME(_name, _func, _doc) \ { \ .name = _name, .func = do_##_func, \ .options = _func##_options, .default_cfg = &defaults_##_func, \ .doc = _doc \ } #define DEFINE_COMMAND(_name, _doc) DEFINE_COMMAND_NAME(textify(_name), _name, _doc) #define DEFINE_COMMAND_NODEF(_name, _doc) \ { \ .name = textify(_name), .func = do_##_name, \ .options = _name##_options, .doc = _doc \ } #define END_COMMANDS \ { \ } const char *get_enum_name(const struct enum_val *vals, unsigned int value); void print_flags(char *buf, size_t buf_len, const struct flag_val *flags, unsigned long flags_val); void print_addr(char *buf, size_t buf_len, const struct ip_addr *addr); void print_macaddr(char *buf, size_t buf_len, const struct mac_addr *addr); bool macaddr_is_null(const struct mac_addr *addr); bool ipaddr_is_null(const struct ip_addr *addr); bool is_prefix(const char *prefix, const char *string); void usage(const char *prog_name, const char *doc, const struct prog_option *long_options, bool full); int parse_cmdline_args(int argc, char **argv, struct prog_option *long_options, void *cfg, size_t cfg_size, const char *prog, const char *usage_cmd, const char *doc, const void *defaults); int dispatch_commands(const char *argv0, int argc, char **argv, const struct prog_command *cmds, size_t cfg_size, const char *prog_name, bool needs_bpffs); #endif /* __COMMON_PARAMS_H */ xdp-tools-1.5.4/lib/util/xdp_load_bytes.bpf.c0000644000175100001660000000072615003640462020457 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 #include #include #ifndef HAVE_LIBBPF_BPF_PROGRAM__TYPE static long (*bpf_xdp_load_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 189; #endif SEC("xdp") int xdp_probe_prog(struct xdp_md *ctx) { __u8 buf[10]; int err; err = bpf_xdp_load_bytes(ctx, 0, buf, sizeof(buf)); if (err) return XDP_ABORTED; return XDP_PASS; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/lib/util/util.h0000644000175100001660000000607615003640462015677 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __UTIL_H #define __UTIL_H #include #include #include "params.h" #ifndef PATH_MAX #define PATH_MAX 4096 #endif #define STRERR_BUFSIZE 1024 #define _textify(x) #x #define textify(x) _textify(x) #define __unused __attribute__((unused)) #ifndef BPF_DIR_MNT #define BPF_DIR_MNT "/sys/fs/bpf" #endif #ifndef BPF_OBJECT_PATH #define BPF_OBJECT_PATH "/usr/lib/bpf" #endif #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) #define FOR_EACH_MAP_KEY(_err, _map_fd, _map_key, _prev_key) \ for (_err = bpf_map_get_next_key(_map_fd, NULL, &_map_key); \ !_err; \ _prev_key = _map_key, \ _err = bpf_map_get_next_key(_map_fd, &_prev_key, &_map_key)) #define min(x, y) ((x) < (y) ? x : y) #define max(x, y) ((x) > (y) ? x : y) #ifndef offsetof #define offsetof(type, member) ((size_t) & ((type *)0)->member) #endif #ifndef container_of #define container_of(ptr, type, member) \ ({ \ const typeof(((type *)0)->member) *__mptr = (ptr); \ (type *)((char *)__mptr - offsetof(type, member)); \ }) #endif #ifndef roundup #define roundup(x, y) \ ({ \ typeof(y) __y = y; \ (((x) + (__y - 1)) / __y) * __y; \ }) #endif int try_snprintf(char *buf, size_t buf_len, const char *format, ...); int make_dir_subdir(const char *parent, const char *dir); int check_bpf_environ(void); int double_rlimit(void); int attach_xdp_program(struct xdp_program *prog, const struct iface *iface, enum xdp_attach_mode mode, const char *pin_root_dir); int detach_xdp_program(struct xdp_program *prog, const struct iface *iface, enum xdp_attach_mode mode, const char *pin_root_dir); int find_bpf_file(char *buf, size_t buf_size, const char *progname); struct bpf_object *open_bpf_file(const char *progname, struct bpf_object_open_opts *opts); typedef int (*program_callback)(const struct iface *iface, struct xdp_program *prog, enum xdp_attach_mode mode, void *arg); typedef int (*multiprog_callback)(const struct iface *iface, const struct xdp_multiprog *mp, void *arg); int get_pinned_program(const struct iface *iface, const char *pin_root_path, enum xdp_attach_mode *mode, struct xdp_program **prog); int iterate_pinned_programs(const char *pin_root_path, program_callback cb, void *arg); int iterate_iface_multiprogs(multiprog_callback cb, void *arg); int get_bpf_root_dir(char *buf, size_t buf_len, const char *subdir, bool fatal); int get_pinned_map_fd(const char *bpf_root, const char *map_name, struct bpf_map_info *info); int unlink_pinned_map(int dir_fd, const char *map_name); const char *action2str(__u32 action); int prog_lock_acquire(const char *directory); int prog_lock_release(int lock_fd); const char *get_libbpf_version(void); int iface_print_status(const struct iface *iface); #endif xdp-tools-1.5.4/lib/util/params.c0000644000175100001660000004125515003640462016176 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include /* XDP_FLAGS_* depend on kernel-headers installed */ #include #include #include "params.h" #include "logging.h" #include "util.h" #define BUFSIZE 30 #define FIRST_PRINTABLE 65 /* ord('A') = 65 */ #define VERSION_SHORT_OPT 0 static bool opt_needs_arg(const struct prog_option *opt) { return opt->type > OPT_BOOL && !opt->positional; } static bool opt_is_multi(const struct prog_option *opt) { return opt->type == OPT_MULTISTRING || opt->type == OPT_IFNAME_MULTI || opt->type == OPT_U32_MULTI; } static int handle_bool(__unused char *optarg, void *tgt, __unused struct prog_option *opt) { bool *opt_set = tgt; *opt_set = true; return 0; } static int handle_string(char *optarg, void *tgt, __unused struct prog_option *opt) { char **opt_set = tgt; *opt_set = optarg; return 0; } static int handle_multistring(char *optarg, void *tgt, __unused struct prog_option *opt) { struct multistring *opt_set = tgt; void *ptr; if (opt_set->num_strings +1 > SIZE_MAX / sizeof(*opt_set->strings)) return -ENOMEM; ptr = realloc(opt_set->strings, sizeof(*opt_set->strings) * (opt_set->num_strings +1)); if (!ptr) return -errno; opt_set->strings = ptr; opt_set->strings[opt_set->num_strings++] = optarg; return 0; } static int handle_u32(char *optarg, void *tgt, __unused struct prog_option *opt) { __u32 *opt_set = tgt; unsigned long val; errno = 0; val = strtoul(optarg, NULL, 10); if (errno || val > 0xffffffff) return -EINVAL; *opt_set = val; return 0; } static int handle_u32_multi(char *optarg, void *tgt, struct prog_option *opt) { struct u32_multi *opt_set = tgt; __u32 val; void *ptr; int ret; if (opt_set->num_vals +1 > SIZE_MAX / sizeof(*opt_set->vals)) return -ENOMEM; ret = handle_u32(optarg, &val, opt); if (ret) return ret; ptr = realloc(opt_set->vals, sizeof(*opt_set->vals) * (opt_set->num_vals +1)); if (!ptr) return -errno; opt_set->vals = ptr; opt_set->vals[opt_set->num_vals++] = val; return 0; } static int handle_u16(char *optarg, void *tgt, __unused struct prog_option *opt) { __u16 *opt_set = tgt; unsigned long val; errno = 0; val = strtoul(optarg, NULL, 10); if (errno || val > 0xffff) return -EINVAL; *opt_set = val; return 0; } static int parse_mac(char *str, unsigned char mac[ETH_ALEN]) { unsigned int v[ETH_ALEN]; int len, i; /* Based on https://stackoverflow.com/a/20553913 */ len = sscanf(str, "%x:%x:%x:%x:%x:%x%*c", &v[0], &v[1], &v[2], &v[3], &v[4], &v[5]); if (len != ETH_ALEN) return -EINVAL; for (i = 0; i < ETH_ALEN; i++) { if (v[i] > 0xFF) return -EINVAL; mac[i] = v[i]; } return 0; } static int handle_macaddr(char *optarg, void *tgt, __unused struct prog_option *opt) { struct mac_addr *opt_set = tgt; int err; err = parse_mac(optarg, opt_set->addr); if (err) pr_warn("Invalid MAC address: %s\n", optarg); return err; } void print_macaddr(char *buf, size_t buf_len, const struct mac_addr *addr) { int i, len; for (i = 0; buf_len > 0 && i < ETH_ALEN; i++) { len = snprintf(buf, buf_len, "%02x", addr->addr[i]); if (len < 0 || (size_t)len >= buf_len) break; buf += len; buf_len -= len; if (i < ETH_ALEN - 1) { *buf++ = ':'; buf_len -= 1; } } *buf = '\0'; } bool macaddr_is_null(const struct mac_addr *addr) { static struct mac_addr nulladdr = {}; return memcmp(addr, &nulladdr, sizeof(nulladdr)) == 0; } static const struct flag_val *find_flag(const struct flag_val *flag_vals, const char *chr) { while (flag_vals->flagstring) { if (strcmp(chr, flag_vals->flagstring) == 0) return flag_vals; flag_vals++; } return NULL; } static int handle_flags(char *optarg, void *tgt, struct prog_option *opt) { const struct flag_val *flag, *flag_vals = opt->typearg; unsigned int *opt_set = tgt; unsigned int flagval = 0; char *c = NULL; while (*optarg) { c = strchr(optarg, ','); if (c) *c = '\0'; flag = find_flag(flag_vals, optarg); if (!flag) return -EINVAL; flagval |= flag->flagval; if (!c) break; optarg = c + 1; } *opt_set = flagval; return 0; } static int get_ifindex(const char *ifname) { int ifindex; ifindex = if_nametoindex(ifname); if (!ifindex) { pr_warn("Couldn't find network interface '%s'.\n", ifname); return -ENOENT; } return ifindex; } static int handle_ifname(char *optarg, void *tgt, __unused struct prog_option *opt) { struct iface *iface = tgt; int ifindex; ifindex = get_ifindex(optarg); if (ifindex < 0) return ifindex; iface->ifname = optarg; iface->ifindex = ifindex; return 0; } static int handle_ifname_multi(char *optarg, void *tgt, __unused struct prog_option *opt) { struct iface **ifaces = tgt; struct iface *iface, *tmp; int ifindex; ifindex = get_ifindex(optarg); if (ifindex < 0) return ifindex; iface = calloc(1, sizeof(*iface)); if (!iface) return -ENOMEM; iface->ifname = optarg; iface->ifindex = ifindex; if (!*ifaces) { *ifaces = iface; return 0; } tmp = *ifaces; while(tmp->next) tmp = tmp->next; tmp->next = iface; return 0; } void print_addr(char *buf, size_t buf_len, const struct ip_addr *addr) { inet_ntop(addr->af, &addr->addr, buf, buf_len); } bool ipaddr_is_null(const struct ip_addr *addr) { static struct ip_addr nulladdr = {}; return memcmp(addr, &nulladdr, sizeof(nulladdr)) == 0; } static int handle_ipaddr(char *optarg, void *tgt, __unused struct prog_option *opt) { struct ip_addr *addr = tgt; int af; af = strchr(optarg, ':') ? AF_INET6 : AF_INET; if (inet_pton(af, optarg, &addr->addr) != 1) { pr_warn("Invalid IP address: %s\n", optarg); return -ENOENT; /* caller won't print error on ENOENT */ } addr->af = af; return 0; } static const struct enum_val *find_enum(const struct enum_val *enum_vals, const char *chr) { while (enum_vals->name) { if (strcmp(chr, enum_vals->name) == 0) return enum_vals; enum_vals++; } return NULL; } static int handle_enum(char *optarg, void *tgt, struct prog_option *opt) { const struct enum_val *val, *all_vals = opt->typearg; unsigned int *opt_set = tgt; val = find_enum(all_vals, optarg); if (!val) return -EINVAL; *opt_set = val->value; return 0; } static void print_enum_vals(char *buf, size_t buf_len, const struct enum_val *vals) { const struct enum_val *val; bool first = true; for (val = vals; buf_len && val->name; val++) { int len; if (!first) { *buf++ = ','; buf_len--; } first = false; len = snprintf(buf, buf_len, "%s", val->name); if (len < 0 || (size_t)len >= buf_len) break; buf += len; buf_len -= len; } *buf = '\0'; } const char *get_enum_name(const struct enum_val *vals, unsigned int value) { const struct enum_val *val; for (val = vals; val->name; val++) if (val->value == value) return val->name; return NULL; } static const struct opthandler { int (*func)(char *optarg, void *tgt, struct prog_option *opt); } handlers[__OPT_MAX] = { {NULL}, {handle_bool}, {handle_flags}, {handle_string}, {handle_u16}, {handle_u32}, {handle_u32_multi}, {handle_macaddr}, {handle_ifname}, {handle_ifname_multi}, {handle_ipaddr}, {handle_enum}, {handle_multistring} }; void print_flags(char *buf, size_t buf_len, const struct flag_val *flags, unsigned long flags_set) { const struct flag_val *flag; bool first = true; for (flag = flags; buf_len && flag->flagstring; flag++) { int len; if (!(flag->flagval & flags_set)) continue; if (!first) { *buf++ = ','; buf_len--; } first = false; len = snprintf(buf, buf_len, "%s", flag->flagstring); if (len < 0 || (size_t)len >= buf_len) break; buf += len; buf_len -= len; } *buf = '\0'; } static void print_help_flags(const struct prog_option *opt) { char buf[100] = {}; if (!opt->typearg) pr_warn("Missing typearg for opt %s\n", opt->name); else print_flags(buf, sizeof(buf), opt->typearg, -1); printf(" %s (valid values: %s)", opt->help, buf); } static void print_help_enum(const struct prog_option *opt) { char buf[100] = {}; if (!opt->typearg) pr_warn("Missing typearg for opt %s\n", opt->name); else print_enum_vals(buf, sizeof(buf), opt->typearg); printf(" %s (valid values: %s)", opt->help, buf); } static const struct helprinter { void (*func)(const struct prog_option *opt); } help_printers[__OPT_MAX] = { {NULL}, {NULL}, {print_help_flags}, {NULL}, {NULL}, {NULL}, {NULL}, {NULL}, {NULL}, {NULL}, {NULL}, {print_help_enum}, {NULL} }; static void _print_positional(const struct prog_option *long_options) { const struct prog_option *opt; FOR_EACH_OPTION (long_options, opt) { if (!opt->positional) continue; printf(" %s", opt->metavar ?: opt->name); } } static void _print_options(const struct prog_option *poptions, bool required) { const struct prog_option *opt; FOR_EACH_OPTION (poptions, opt) { if (opt->required != required || opt->hidden) continue; if (opt->positional) { printf(" %-30s", opt->metavar ?: opt->name); } else { char buf[BUFSIZE]; int pos; if (opt->short_opt >= FIRST_PRINTABLE) printf(" -%c,", opt->short_opt); else printf(" "); pos = snprintf(buf, BUFSIZE, " --%s", opt->name); if (pos < 0 || pos >= BUFSIZE) { pr_warn("opt name too long: %s\n", opt->name); continue; } if (opt->metavar) snprintf(&buf[pos], BUFSIZE - pos, " %s", opt->metavar); printf("%-28s", buf); } if (help_printers[opt->type].func != NULL) help_printers[opt->type].func(opt); else if (opt->help) printf(" %s", opt->help); printf("\n"); } } bool is_prefix(const char *pfx, const char *str) { if (!pfx) return false; if (strlen(str) < strlen(pfx)) return false; return !memcmp(str, pfx, strlen(pfx)); } void usage(const char *prog_name, const char *doc, const struct prog_option *poptions, bool full) { const struct prog_option *opt; int num_req = 0; printf("\nUsage: %s [options]", prog_name); _print_positional(poptions); printf("\n"); if (!full) { printf("Use --help (or -h) to see full option list.\n"); return; } FOR_EACH_OPTION (poptions, opt) if (opt->required) num_req++; printf("\n %s\n\n", doc); if (num_req) { printf("Required parameters:\n"); _print_options(poptions, true); printf("\n"); } printf("Options:\n"); _print_options(poptions, false); printf(" -v, --verbose Enable verbose logging (-vv: more verbose)\n"); printf(" --version Display version information\n"); printf(" -h, --help Show this help\n"); printf("\n"); } static int prog_options_to_options(struct prog_option *poptions, struct option **options, char **optstring) { int num = 0, num_cmn = 0, n_sopt = VERSION_SHORT_OPT + 1; struct option *new_options, *nopt; struct prog_option *opt; char buf[100], *c = buf; struct option common_opts[] = { {"help", no_argument, NULL, 'h'}, {"verbose", no_argument, NULL, 'v'}, {"version", no_argument, NULL, VERSION_SHORT_OPT}, {} }; for (nopt = common_opts; nopt->name; nopt++) { num++; num_cmn++; if (nopt->val != VERSION_SHORT_OPT) *c++ = nopt->val; } FOR_EACH_OPTION (poptions, opt) if (!opt->positional) num++; new_options = calloc(num + 1, sizeof(struct option)); if (!new_options) return -ENOMEM; memcpy(new_options, &common_opts, sizeof(struct option) * num_cmn); nopt = new_options + num_cmn; FOR_EACH_OPTION (poptions, opt) { if (opt->positional) continue; if (opt->short_opt) { *(c++) = opt->short_opt; if (opt_needs_arg(opt)) *(c++) = ':'; } else { /* getopt expects options to have unique values in the * 'val' field, however we want to be able to define * options that don't have a short opt. So get around * that, just number such options sequentially. */ if (n_sopt >= FIRST_PRINTABLE) { pr_warn("Too many options with no short opt\n"); goto err; } opt->short_opt = n_sopt++; } nopt->has_arg = opt_needs_arg(opt) ? required_argument : no_argument; nopt->name = opt->name; nopt->val = opt->short_opt; nopt->flag = NULL; nopt++; } *(c++) = '\0'; *optstring = strdup(buf); if (!*optstring) goto err; /* Make sure we clear the last option, or else we crash. */ memset(new_options + num, 0, sizeof(struct option)); *options = new_options; return 0; err: free(new_options); return -EINVAL; } static struct prog_option *find_opt(struct prog_option *all_opts, int optchar) { struct prog_option *opt; FOR_EACH_OPTION (all_opts, opt) if (opt->short_opt == optchar) return opt; return NULL; } static int _set_opt(void *cfg, struct prog_option *opt, char *optarg) { int ret; if (opt->max_num && opt->num_set + 1 > opt->max_num) { pr_warn("Too many parameters for %s (max %u)\n", opt->metavar ?: opt->name, opt->max_num); return -E2BIG; } ret = handlers[opt->type].func(optarg, (cfg + opt->cfg_offset), opt); if (!ret) opt->num_set++; else if (ret != -ENOENT) pr_warn("Couldn't parse option %s: %s.\n", opt->name, strerror(-ret)); return ret; } static int set_opt(void *cfg, struct prog_option *all_opts, int optchar, char *optarg) { struct prog_option *opt; if (!cfg) return -EFAULT; opt = find_opt(all_opts, optchar); if (!opt) return -ENOENT; return _set_opt(cfg, opt, optarg); } static int set_pos_opt(void *cfg, struct prog_option *all_opts, char *optarg) { struct prog_option *o, *opt = NULL; FOR_EACH_OPTION (all_opts, o) { if (o->positional && (!o->num_set || opt_is_multi(o))) { opt = o; break; } } if (!opt) return -ENOENT; return _set_opt(cfg, opt, optarg); } int parse_cmdline_args(int argc, char **argv, struct prog_option *poptions, void *cfg, size_t cfg_size, const char *prog, const char *usage_cmd, const char *doc, const void *defaults) { struct prog_option *opt_iter; struct option *long_options; bool full_help = false; int i, opt, err = 0; int longindex = 0; char *optstring; if (prog_options_to_options(poptions, &long_options, &optstring)) { pr_warn("Unable to malloc()\n"); return -ENOMEM; } if (defaults) memcpy(cfg, defaults, cfg_size); /* Parse commands line args */ while ((opt = getopt_long(argc, argv, optstring, long_options, &longindex)) != -1) { switch (opt) { case 'h': usage(usage_cmd, doc, poptions, true); err = EXIT_FAILURE; goto out; case 'v': increase_log_level(); break; case VERSION_SHORT_OPT: printf("%s version %s using libbpf version %s\n", prog, TOOLS_VERSION, get_libbpf_version()); err = EXIT_FAILURE; goto out; default: if (set_opt(cfg, poptions, opt, optarg)) { usage(prog, doc, poptions, full_help); err = EXIT_FAILURE; goto out; } break; } } for (i = optind; i < argc; i++) { if (set_pos_opt(cfg, poptions, argv[i])) { usage(usage_cmd, doc, poptions, full_help); err = EXIT_FAILURE; goto out; } } FOR_EACH_OPTION (poptions, opt_iter) { if (opt_iter->num_set && (!opt_iter->min_num || opt_iter->num_set >= opt_iter->min_num)) continue; if (opt_iter->required) { if (opt_iter->positional) pr_warn("Missing required parameter %s\n", opt_iter->metavar ?: opt_iter->name); else pr_warn("Missing required option '--%s'\n", opt_iter->name); usage(prog, doc, poptions, full_help); err = EXIT_FAILURE; goto out; } } out: free(long_options); free(optstring); return err; } int dispatch_commands(const char *argv0, int argc, char **argv, const struct prog_command *cmds, size_t cfg_size, const char *prog_name, bool needs_bpffs) { const struct prog_command *c, *cmd = NULL; int ret = EXIT_FAILURE, err, len; char pin_root_path[PATH_MAX]; char usagebuf[100]; void *cfg; for (c = cmds; c->name; c++) { if (is_prefix(argv0, c->name)) { cmd = c; break; } } if (!cmd) { pr_warn("Command '%s' is unknown, try '%s help'.\n", argv0, prog_name); return EXIT_FAILURE; } if (cmd->no_cfg) return cmd->func(NULL, NULL); cfg = calloc(1, cfg_size); if (!cfg) { pr_warn("Couldn't allocate memory\n"); return EXIT_FAILURE; } len = snprintf(usagebuf, sizeof(usagebuf), "%s %s", prog_name, cmd->name); if (len < 0 || (size_t)len >= sizeof(usagebuf)) goto out; err = parse_cmdline_args(argc, argv, cmd->options, cfg, cfg_size, prog_name, usagebuf, cmd->doc, cmd->default_cfg); if (err) goto out; err = get_bpf_root_dir(pin_root_path, sizeof(pin_root_path), prog_name, needs_bpffs); if (err && needs_bpffs) goto out; err = check_bpf_environ(); if (err) goto out; ret = cmd->func(cfg, pin_root_path); out: free(cfg); return ret; } xdp-tools-1.5.4/lib/util/compat.h0000644000175100001660000000165515003640462016203 0ustar runnerdocker#ifndef __COMPAT_H #define __COMPAT_H #include #include #ifndef HAVE_LIBBPF_BTF__TYPE_CNT static inline __u32 btf__type_cnt(const struct btf *btf) { /* old function didn't include 'void' type in count */ return btf__get_nr_types(btf) + 1; } #endif #ifndef HAVE_LIBBPF_BPF_PROGRAM__TYPE static inline enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) { return bpf_program__get_type((struct bpf_program *)prog); } #endif #ifndef HAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM static inline struct bpf_program *bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog) { return bpf_program__next(prog, obj); } #endif #ifndef HAVE_LIBBPF_BPF_PROGRAM__EXPECTED_ATTACH_TYPE static inline enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog) { return bpf_program__get_expected_attach_type((struct bpf_program *)prog); } #endif #endif xdp-tools-1.5.4/lib/util/util.c0000644000175100001660000004526015003640462015670 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include #include #include #include #include #include #include #include #include #include #include #include /* Need XDP flags */ #include /* BPF FS magic */ #include /* ERR_PTR */ #include #include #include #include "util.h" #include "logging.h" static struct enum_val xdp_modes[] = { {"native", XDP_MODE_NATIVE}, {"skb", XDP_MODE_SKB}, {"hw", XDP_MODE_HW}, {"unspecified", XDP_MODE_UNSPEC}, {NULL, 0} }; int try_snprintf(char *buf, size_t buf_len, const char *format, ...) { va_list args; int len; va_start(args, format); len = vsnprintf(buf, buf_len, format, args); va_end(args); if (len < 0) return -EINVAL; else if ((size_t)len >= buf_len) return -ENAMETOOLONG; return 0; } static int set_rlimit(unsigned int min_limit) { struct rlimit limit; int err = 0; err = getrlimit(RLIMIT_MEMLOCK, &limit); if (err) { err = -errno; pr_warn("Couldn't get current rlimit\n"); return err; } if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur == 0) { pr_debug("Current rlimit is infinity or 0. Not raising\n"); return -ENOMEM; } if (min_limit) { if (limit.rlim_cur >= min_limit) { pr_debug("Current rlimit %ju already >= minimum %u\n", (uintmax_t)limit.rlim_cur, min_limit); return 0; } pr_debug("Setting rlimit to minimum %u\n", min_limit); limit.rlim_cur = min_limit; } else { pr_debug("Doubling current rlimit of %ju\n", (uintmax_t)limit.rlim_cur); limit.rlim_cur <<= 1; } limit.rlim_max = max(limit.rlim_cur, limit.rlim_max); err = setrlimit(RLIMIT_MEMLOCK, &limit); if (err) { err = -errno; pr_warn("Couldn't raise rlimit: %s\n", strerror(-err)); return err; } return 0; } int double_rlimit(void) { pr_debug("Permission denied when loading eBPF object; " "raising rlimit and retrying\n"); return set_rlimit(0); } static const char *_libbpf_compile_version = LIBBPF_VERSION; static char _libbpf_version[10] = {}; const char *get_libbpf_version(void) { /* Start by copying compile-time version into buffer so we have a * fallback value in case we are dynamically linked, or can't find a * version in /proc/self/maps below. */ strncpy(_libbpf_version, _libbpf_compile_version, sizeof(_libbpf_version)-1); #ifdef LIBBPF_DYNAMIC char path[PATH_MAX], buf[PATH_MAX], *s; bool found = false; FILE *fp; /* When dynamically linking against libbpf, we can't be sure that the * version we discovered at compile time is actually the one we are * using at runtime. This can lead to hard-to-debug errors, so we try to * discover the correct version at runtime. * * The simple solution to this would be if libbpf itself exported a * version in its API. But since it doesn't, we work around this by * parsing the mappings of the binary at runtime, looking for the full * filename of libbpf.so and using that. */ fp = fopen("/proc/self/maps", "r"); if (fp == NULL) goto out; while ((s = fgets(buf, sizeof(buf), fp)) != NULL) { /* We are looking for a line like: * 7f63c2105000-7f63c2106000 rw-p 00032000 fe:02 4200947 /usr/lib/libbpf.so.0.1.0 */ if (sscanf(s, "%*x-%*x %*4c %*x %*5c %*d %s\n", path) == 1 && (s = strstr(path, "libbpf.so.")) != NULL) { strncpy(_libbpf_version, s+10, sizeof(_libbpf_version)-1); found = true; break; } } fclose(fp); out: if (!found) pr_warn("Couldn't find runtime libbpf version - falling back to compile-time value!\n"); #endif _libbpf_version[sizeof(_libbpf_version)-1] = '\0'; return _libbpf_version; } int find_bpf_file(char *buf, size_t buf_size, const char *progname) { static char *bpf_obj_paths[] = { #ifdef DEBUG ".", #endif BPF_OBJECT_PATH, NULL }; struct stat sb = {}; char **path; int err; for (path = bpf_obj_paths; *path; path++) { err = try_snprintf(buf, buf_size, "%s/%s", *path, progname); if (err) return err; pr_debug("Looking for '%s'\n", buf); err = stat(buf, &sb); if (err) continue; return 0; } pr_warn("Couldn't find a BPF file with name %s\n", progname); return -ENOENT; } struct bpf_object *open_bpf_file(const char *progname, struct bpf_object_open_opts *opts) { char buf[PATH_MAX]; int err; err = find_bpf_file(buf, sizeof(buf), progname); if (err) return ERR_PTR(err); pr_debug("Loading bpf file '%s' from '%s'\n", progname, buf); return bpf_object__open_file(buf, opts); } static int get_pinned_object_fd(const char *path, void *info, __u32 *info_len) { char errmsg[STRERR_BUFSIZE]; int pin_fd, err; pin_fd = bpf_obj_get(path); if (pin_fd < 0) { err = -errno; libbpf_strerror(-err, errmsg, sizeof(errmsg)); pr_debug("Couldn't retrieve pinned object '%s': %s\n", path, errmsg); return err; } if (info) { err = bpf_obj_get_info_by_fd(pin_fd, info, info_len); if (err) { err = -errno; libbpf_strerror(-err, errmsg, sizeof(errmsg)); pr_debug("Couldn't retrieve object info: %s\n", errmsg); return err; } } return pin_fd; } int make_dir_subdir(const char *parent, const char *dir) { char path[PATH_MAX]; int err; err = try_snprintf(path, sizeof(path), "%s/%s", parent, dir); if (err) return err; err = mkdir(parent, S_IRWXU); if (err && errno != EEXIST) { err = -errno; return err; } err = mkdir(path, S_IRWXU); if (err && errno != EEXIST) { err = -errno; return err; } return 0; } int attach_xdp_program(struct xdp_program *prog, const struct iface *iface, enum xdp_attach_mode mode, const char *pin_root_path) { char pin_path[PATH_MAX]; int err = 0; if (!prog || !pin_root_path) return -EINVAL; err = make_dir_subdir(pin_root_path, "programs"); if (err) { pr_warn("Unable to create pin directory: %s\n", strerror(-err)); return err; } err = try_snprintf(pin_path, sizeof(pin_path), "%s/programs/%s/%s", pin_root_path, iface->ifname, xdp_program__name(prog)); if (err) return err; err = xdp_program__attach(prog, iface->ifindex, mode, 0); if (err) { if (pin_root_path && err != -EEXIST) unlink(pin_path); return err; } pr_debug("Program '%s' loaded on interface '%s'%s\n", xdp_program__name(prog), iface->ifname, mode == XDP_MODE_SKB ? " in skb mode" : ""); err = xdp_program__pin(prog, pin_path); if (err) { pr_warn("Unable to pin XDP program at %s: %s\n", pin_path, strerror(-err)); goto unload; } pr_debug("XDP program pinned at %s\n", pin_path); return err; unload: xdp_program__detach(prog, iface->ifindex, mode, 0); return err; } int detach_xdp_program(struct xdp_program *prog, const struct iface *iface, enum xdp_attach_mode mode, const char *pin_root_path) { char pin_path[PATH_MAX]; int err; err = xdp_program__detach(prog, iface->ifindex, mode, 0); if (err) goto out; err = try_snprintf(pin_path, sizeof(pin_path), "%s/programs/%s/%s", pin_root_path, iface->ifname, xdp_program__name(prog)); if (err) return err; err = unlink(pin_path); if (err && errno != ENOENT) goto out; err = try_snprintf(pin_path, sizeof(pin_path), "%s/programs/%s", pin_root_path, iface->ifname); if (err) goto out; err = rmdir(pin_path); if (err && errno == ENOENT) err = 0; else if (err) err = -errno; out: return err; } int get_pinned_program(const struct iface *iface, const char *pin_root_path, enum xdp_attach_mode *mode, struct xdp_program **xdp_prog) { int ret = -ENOENT, err, ifindex = iface->ifindex; char pin_path[PATH_MAX]; bool remove_all = false; enum xdp_attach_mode m; struct dirent *de; DIR *dr; err = try_snprintf(pin_path, sizeof(pin_path), "%s/programs/%s", pin_root_path, iface->ifname); if (err) return err; dr = opendir(pin_path); if (!dr) { err = -errno; pr_debug("Couldn't open pin directory %s: %s\n", pin_path, strerror(-err)); return err; } if (!ifindex) ifindex = if_nametoindex(iface->ifname); if (!ifindex) { pr_debug("Interface %s no longer exists\n", iface->ifname); remove_all = true; ret = -ENODEV; } while ((de = readdir(dr)) != NULL) { DECLARE_LIBXDP_OPTS(xdp_program_opts, opts, 0); struct xdp_program *prog; if (!strcmp(".", de->d_name) || !strcmp("..", de->d_name)) continue; err = try_snprintf(pin_path, sizeof(pin_path), "%s/programs/%s/%s", pin_root_path, iface->ifname, de->d_name); if (err) goto out; if (remove_all) { err = unlink(pin_path); if (err) ret = err; continue; } opts.pin_path = pin_path; prog = xdp_program__create(&opts); if (libxdp_get_error(prog) || !(m = xdp_program__is_attached(prog, iface->ifindex))) { ret = libxdp_get_error(prog) ?: -ENOENT; pr_debug("Program %s no longer loaded on %s: %s\n", de->d_name, iface->ifname, strerror(-ret)); err = unlink(pin_path); if (err) ret = err; if (prog) xdp_program__close(prog); } else { if (strcmp(xdp_program__name(prog), de->d_name)) { pr_warn("Pinned and kernel prog names differ: %s/%s\n", xdp_program__name(prog), de->d_name); ret = -EFAULT; xdp_program__close(prog); } else { ret = 0; *xdp_prog = prog; if (mode) *mode = m; } break; } } out: closedir(dr); return ret; } int iterate_pinned_programs(const char *pin_root_path, program_callback cb, void *arg) { char pin_path[PATH_MAX]; struct dirent *de; int err = 0; DIR *dr; err = try_snprintf(pin_path, sizeof(pin_path), "%s/programs", pin_root_path); if (err) return err; dr = opendir(pin_path); if (!dr) return -ENOENT; while ((de = readdir(dr)) != NULL) { enum xdp_attach_mode mode = XDP_MODE_UNSPEC; struct xdp_program *prog = NULL; struct iface iface = {}; if (!strcmp(".", de->d_name) || !strcmp("..", de->d_name)) continue; iface.ifname = de->d_name; iface.ifindex = if_nametoindex(iface.ifname); err = try_snprintf(pin_path, sizeof(pin_path), "%s/programs/%s", pin_root_path, iface.ifname); if (err) goto out; err = get_pinned_program(&iface, pin_root_path, &mode, &prog); if (err == -ENOENT || err == -ENODEV) { err = rmdir(pin_path); if (err) goto out; continue; } else if (err) { goto out; } err = cb(&iface, prog, mode, arg); xdp_program__close(prog); if (err) goto out; } out: closedir(dr); return err; } int iterate_iface_multiprogs(multiprog_callback cb, void *arg) { struct if_nameindex *idx, *indexes = NULL; int err = 0; indexes = if_nameindex(); if (!indexes) { err = -errno; pr_warn("Couldn't get list of interfaces: %s\n", strerror(-err)); return err; } for (idx = indexes; idx->if_index; idx++) { struct xdp_multiprog *mp; struct iface iface = { .ifindex = idx->if_index, .ifname = idx->if_name, }; mp = xdp_multiprog__get_from_ifindex(iface.ifindex); if (IS_ERR_OR_NULL(mp)) { if (PTR_ERR(mp) != -ENOENT) { err = PTR_ERR(mp); pr_warn("Error getting XDP status for interface %s: %s\n", idx->if_name, strerror(-err)); goto out; } mp = NULL; } err = cb(&iface, mp, arg); xdp_multiprog__close(mp); if (err) goto out; } out: if_freenameindex(indexes); return err; } static bool bpf_is_valid_mntpt(const char *mnt, unsigned long magic) { struct statfs st_fs; if (statfs(mnt, &st_fs) < 0) return false; if ((unsigned long)st_fs.f_type != magic) return false; return true; } static const char *bpf_find_mntpt_single(unsigned long magic, char *mnt, int len, const char *mntpt) { if (bpf_is_valid_mntpt(mntpt, magic)) { strncpy(mnt, mntpt, len - 1); mnt[len - 1] = '\0'; return mnt; } return NULL; } static const char *bpf_find_mntpt(const char *fstype, unsigned long magic, char *mnt, int len, const char * const *known_mnts) { const char * const *ptr; char type[100]; FILE *fp; if (known_mnts) { ptr = known_mnts; while (*ptr) { if (bpf_find_mntpt_single(magic, mnt, len, *ptr)) return mnt; ptr++; } } if (len != PATH_MAX) return NULL; fp = fopen("/proc/mounts", "r"); if (fp == NULL) return NULL; while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n", mnt, type) == 2) { if (strcmp(type, fstype) == 0) break; } fclose(fp); if (strcmp(type, fstype) != 0) return NULL; return mnt; } static int bpf_mnt_check_target(const char *target) { int ret; ret = mkdir(target, S_IRWXU); if (ret && errno != EEXIST) { ret = -errno; pr_warn("mkdir %s failed: %s\n", target, strerror(-ret)); return ret; } return 0; } /* simplified version of code from iproute2 */ static const char *bpf_get_work_dir() { static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT; static char bpf_wrk_dir[PATH_MAX]; static const char *mnt; static bool bpf_mnt_cached; static const char *const bpf_known_mnts[] = { BPF_DIR_MNT, "/bpf", 0, }; int ret; if (bpf_mnt_cached) return mnt; mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp, sizeof(bpf_tmp), bpf_known_mnts); if (!mnt) { mnt = BPF_DIR_MNT; ret = bpf_mnt_check_target(mnt); if (ret || !bpf_is_valid_mntpt(mnt, BPF_FS_MAGIC)) { mnt = NULL; goto out; } } strncpy(bpf_wrk_dir, mnt, sizeof(bpf_wrk_dir)); bpf_wrk_dir[sizeof(bpf_wrk_dir) - 1] = '\0'; mnt = bpf_wrk_dir; out: bpf_mnt_cached = true; return mnt; } int get_bpf_root_dir(char *buf, size_t buf_len, const char *subdir, bool fatal) { const char *bpf_dir; bpf_dir = bpf_get_work_dir(); if (!bpf_dir) { logging_print(fatal ? LOG_WARN : LOG_DEBUG, "Could not find BPF working dir - bpffs not mounted?\n"); return -ENOENT; } if (subdir) return try_snprintf(buf, buf_len, "%s/%s", bpf_dir, subdir); else return try_snprintf(buf, buf_len, "%s", bpf_dir); } int get_pinned_map_fd(const char *bpf_root, const char *map_name, struct bpf_map_info *info) { __u32 info_len = sizeof(*info); char buf[PATH_MAX]; int err; err = try_snprintf(buf, sizeof(buf), "%s/%s", bpf_root, map_name); if (err) return err; pr_debug("Getting pinned object from %s\n", buf); return get_pinned_object_fd(buf, info, &info_len); } int unlink_pinned_map(int dir_fd, const char *map_name) { struct stat statbuf = {}; int err; err = fstatat(dir_fd, map_name, &statbuf, 0); if (err && errno == ENOENT) { pr_debug("Map name %s not pinned\n", map_name); return 0; } else if (err) { err = -errno; pr_warn("Couldn't stat pinned map %s: %s\n", map_name, strerror(-err)); return err; } pr_debug("Unlinking pinned map %s\n", map_name); err = unlinkat(dir_fd, map_name, 0); if (err) { err = -errno; pr_warn("Couldn't unlink pinned map %s: %s\n", map_name, strerror(-err)); return -errno; } return 0; } #define XDP_UNKNOWN (XDP_REDIRECT + 1) #ifndef XDP_ACTION_MAX #define XDP_ACTION_MAX (XDP_UNKNOWN + 1) #endif static const char *xdp_action_names[XDP_ACTION_MAX] = { [XDP_ABORTED] = "XDP_ABORTED", [XDP_DROP] = "XDP_DROP", [XDP_PASS] = "XDP_PASS", [XDP_TX] = "XDP_TX", [XDP_REDIRECT] = "XDP_REDIRECT", [XDP_UNKNOWN] = "XDP_UNKNOWN", }; const char *action2str(__u32 action) { if (action < XDP_ACTION_MAX) return xdp_action_names[action]; return NULL; } int check_bpf_environ(void) { init_lib_logging(); if (geteuid() != 0) { pr_warn("This program must be run as root.\n"); return 1; } /* Try to avoid probing errors due to rlimit exhaustion by starting out * with an rlimit of 1 MiB. This is not going to solve all issues, but * it will at least make things work when there is nothing else loaded. * * Ignore return code because an error shouldn't abort running. */ set_rlimit(1024 * 1024); return 0; } int prog_lock_acquire(const char *dir) { int lock_fd, err = 0; retry: lock_fd = open(dir, O_DIRECTORY); if (lock_fd < 0) { if (errno == ENOENT && !mkdir(dir, S_IRWXU)) goto retry; err = -errno; pr_warn("Couldn't open lock directory at %s: %s\n", dir, strerror(-err)); return err; } err = flock(lock_fd, LOCK_EX); if (err) { err = -errno; pr_warn("Couldn't flock fd %d: %s\n", lock_fd, strerror(-err)); close(lock_fd); return err; } pr_debug("Acquired lock from %s with fd %d\n", dir, lock_fd); return lock_fd; } int prog_lock_release(int lock_fd) { int err; err = flock(lock_fd, LOCK_UN); if (err) { err = -errno; pr_warn("Couldn't unlock fd %d: %s\n", lock_fd, strerror(-err)); } else { pr_debug("Released lock fd %d\n", lock_fd); } close(lock_fd); return err; } static char *print_bpf_tag(char buf[BPF_TAG_SIZE * 2 + 1], const unsigned char tag[BPF_TAG_SIZE]) { int i; for (i = 0; i < BPF_TAG_SIZE; i++) sprintf(&buf[i * 2], "%02x", tag[i]); buf[BPF_TAG_SIZE * 2] = '\0'; return buf; } static int print_iface_status(const struct iface *iface, const struct xdp_multiprog *mp, __unused void *arg) { struct xdp_program *prog, *dispatcher, *hw_prog; char tag[BPF_TAG_SIZE * 2 + 1]; char buf[STRERR_BUFSIZE]; int err; if (!mp) { printf("%-22s \n", iface->ifname); return 0; } hw_prog = xdp_multiprog__hw_prog(mp); if (hw_prog) { printf("%-16s %-5s %-17s %-8s %-4d %-17s\n", iface->ifname, "", xdp_program__name(hw_prog), get_enum_name(xdp_modes, XDP_MODE_HW), xdp_program__id(hw_prog), print_bpf_tag(tag, xdp_program__tag(hw_prog))); } dispatcher = xdp_multiprog__main_prog(mp); if (dispatcher) { printf("%-16s %-5s %-17s %-8s %-4d %-17s\n", iface->ifname, "", xdp_program__name(dispatcher), get_enum_name(xdp_modes, xdp_multiprog__attach_mode(mp)), xdp_program__id(dispatcher), print_bpf_tag(tag, xdp_program__tag(dispatcher))); for (prog = xdp_multiprog__next_prog(NULL, mp); prog; prog = xdp_multiprog__next_prog(prog, mp)) { err = xdp_program__print_chain_call_actions(prog, buf, sizeof(buf)); if (err) return err; printf("%-16s %-5d %-16s %-8s %-4u %-17s %s\n", " =>", xdp_program__run_prio(prog), xdp_program__name(prog), "", xdp_program__id(prog), print_bpf_tag(tag, xdp_program__tag(prog)), buf); } } return 0; } int iface_print_status(const struct iface *iface) { int err = 0; printf("%-16s %-5s %-17s Mode ID %-17s %s\n", "Interface", "Prio", "Program name", "Tag", "Chain actions"); printf("--------------------------------------------------------------------------------------\n"); if (iface) { struct xdp_multiprog *mp; mp = xdp_multiprog__get_from_ifindex(iface->ifindex); if (IS_ERR_OR_NULL(mp)) { if (PTR_ERR(mp) != -ENOENT) { err = PTR_ERR(mp); pr_warn("Error getting XDP status for interface %s: %s\n", iface->ifname, strerror(-err)); goto out; } mp = NULL; } print_iface_status(iface, mp, NULL); } else { err = iterate_iface_multiprogs(print_iface_status, NULL); } printf("\n"); out: return err; } xdp-tools-1.5.4/lib/util/logging.h0000644000175100001660000000203215003640462016334 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LOGGING_H #define __LOGGING_H /* This matches the libbpf logging levels, but with an additional VERBOSE level; * we demote all libbpf messages by one level so debug messages only show up on * VERBOSE. */ enum logging_print_level { LOG_WARN, LOG_INFO, LOG_DEBUG, LOG_VERBOSE, }; extern void logging_print(enum logging_print_level level, const char *format, ...) __attribute__((format(printf, 2, 3))); #define __pr(level, fmt, ...) \ do { \ logging_print(level, fmt, ##__VA_ARGS__); \ } while (0) #define pr_warn(fmt, ...) __pr(LOG_WARN, fmt, ##__VA_ARGS__) #define pr_info(fmt, ...) __pr(LOG_INFO, fmt, ##__VA_ARGS__) #define pr_debug(fmt, ...) __pr(LOG_DEBUG, fmt, ##__VA_ARGS__) void init_lib_logging(void); void silence_libbpf_logging(void); void silence_libxdp_logging(void); enum logging_print_level set_log_level(enum logging_print_level level); enum logging_print_level increase_log_level(); #endif xdp-tools-1.5.4/lib/util/xdp_sample.c0000644000175100001660000012171115003640462017043 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0-only #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xdp_sample.h" #include "logging.h" #include "xdp_sample.skel.h" #include "xdp_load_bytes.skel.h" #define __sample_print(fmt, cond, ...) \ ({ \ if (cond) \ printf(fmt, ##__VA_ARGS__); \ }) #define print_always(fmt, ...) __sample_print(fmt, 1, ##__VA_ARGS__) #define print_default(fmt, ...) \ __sample_print(fmt, sample_log_level & LL_DEFAULT, ##__VA_ARGS__) #define __print_err(err, fmt, ...) \ ({ \ __sample_print(fmt, err > 0 || sample_log_level & LL_DEFAULT, \ ##__VA_ARGS__); \ sample_err_exp = sample_err_exp ? true : err > 0; \ }) #define print_err(err, fmt, ...) __print_err(err, fmt, ##__VA_ARGS__) #define __COLUMN(x) "%'10" x " %-13s" #define FMT_COLUMNf __COLUMN(".0f") #define FMT_COLUMNd __COLUMN("d") #define FMT_COLUMNl __COLUMN(PRIu64) #define RX(rx) rx, "rx/s" #define PPS(pps) pps, "pkt/s" #define DROP(drop) drop, "drop/s" #define ERR(err) err, "error/s" #define HITS(hits) hits, "hit/s" #define XMIT(xmit) xmit, "xmit/s" #define PASS(pass) pass, "pass/s" #define REDIR(redir) redir, "redir/s" #define NANOSEC_PER_SEC 1000000000 /* 10^9 */ #define XDP_UNKNOWN (XDP_REDIRECT + 1) #define XDP_ACTION_MAX (XDP_UNKNOWN + 1) #define XDP_REDIRECT_ERR_MAX 7 enum map_type { MAP_RX, MAP_RXQ, MAP_REDIRECT_ERR, MAP_CPUMAP_ENQUEUE, MAP_CPUMAP_KTHREAD, MAP_EXCEPTION, MAP_DEVMAP_XMIT, MAP_DEVMAP_XMIT_MULTI, NUM_MAP, }; enum log_level { LL_DEFAULT = 1U << 0, LL_SIMPLE = 1U << 1, LL_DEBUG = 1U << 2, }; struct record { __u64 timestamp; struct datarec total; union { struct datarec *cpu; struct datarec *rxq; }; }; struct map_entry { struct hlist_node node; __u64 pair; struct record val; }; struct stats_record { struct record rx_cnt; struct record rxq_cnt; struct record redir_err[XDP_REDIRECT_ERR_MAX]; struct record kthread; struct record exception[XDP_ACTION_MAX]; struct record devmap_xmit; DECLARE_HASHTABLE(xmit_map, 5); struct record enq[]; }; struct sample_output { struct { uint64_t rx; uint64_t redir; uint64_t drop; uint64_t drop_xmit; uint64_t err; uint64_t err_pps; uint64_t xmit; } totals; struct { union { uint64_t pps; uint64_t num; }; uint64_t drop; uint64_t err; } rx_cnt; struct { uint64_t suc; uint64_t err; } redir_cnt; struct { uint64_t hits; } except_cnt; struct { uint64_t pps; uint64_t drop; uint64_t err; double bavg; } xmit_cnt; }; struct datarec *sample_mmap[NUM_MAP]; struct bpf_map *sample_map[NUM_MAP]; size_t sample_map_count[NUM_MAP]; enum log_level sample_log_level; struct sample_output sample_out; unsigned long sample_interval; __u64 sample_start_time; bool sample_err_exp; int sample_xdp_cnt; int sample_n_cpus; int sample_n_rxqs; int sample_sig_fd; int sample_mask; int ifindex[2]; static struct { bool checked; bool compat; } sample_compat[SAMPLE_COMPAT_MAX] = {}; bool sample_is_compat(enum sample_compat compat_value) { return sample_compat[compat_value].compat; } bool sample_probe_cpumap_compat(void) { struct xdp_sample *skel; bool res; skel = xdp_sample__open_and_load(); res = !!skel; xdp_sample__destroy(skel); return res; } bool sample_probe_xdp_load_bytes(void) { struct xdp_load_bytes *skel; bool res; skel = xdp_load_bytes__open_and_load(); res = !!skel; xdp_load_bytes__destroy(skel); return res; } void sample_check_cpumap_compat(struct bpf_program *prog, struct bpf_program *prog_compat) { bool res = sample_compat[SAMPLE_COMPAT_CPUMAP_KTHREAD].compat; if (!sample_compat[SAMPLE_COMPAT_CPUMAP_KTHREAD].checked) { res = sample_probe_cpumap_compat(); sample_compat[SAMPLE_COMPAT_CPUMAP_KTHREAD].checked = true; sample_compat[SAMPLE_COMPAT_CPUMAP_KTHREAD].compat = res; } if (res) { pr_debug("Kernel supports 5-arg xdp_cpumap_kthread tracepoint\n"); bpf_program__set_autoload(prog_compat, false); } else { pr_debug("Kernel does not support 5-arg xdp_cpumap_kthread tracepoint, using compat version\n"); bpf_program__set_autoload(prog, false); } } static const char *xdp_redirect_err_names[XDP_REDIRECT_ERR_MAX] = { /* Key=1 keeps unknown errors */ "Success", "Unknown", "EINVAL", "ENETDOWN", "EMSGSIZE", "EOPNOTSUPP", "ENOSPC", }; static const char *xdp_action_names[XDP_ACTION_MAX] = { [XDP_ABORTED] = "XDP_ABORTED", [XDP_DROP] = "XDP_DROP", [XDP_PASS] = "XDP_PASS", [XDP_TX] = "XDP_TX", [XDP_REDIRECT] = "XDP_REDIRECT", [XDP_UNKNOWN] = "XDP_UNKNOWN", }; static __u64 gettime(void) { struct timespec t; int res; res = clock_gettime(CLOCK_MONOTONIC, &t); if (res < 0) { pr_warn("Error with gettimeofday! (%i)\n", res); return UINT64_MAX; } return (__u64)t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec; } static const char *xdp_action2str(int action) { if (action < XDP_ACTION_MAX) return xdp_action_names[action]; return NULL; } static struct datarec *alloc_records(int nr_entries) { struct datarec *array; if (nr_entries <= 0) return NULL; array = calloc(nr_entries, sizeof(*array)); if (!array) { pr_warn("Failed to allocate memory (nr_entries: %u)\n", nr_entries); return NULL; } return array; } static int map_entry_init(struct map_entry *e, __u64 pair) { e->pair = pair; INIT_HLIST_NODE(&e->node); e->val.timestamp = gettime(); e->val.cpu = alloc_records(libbpf_num_possible_cpus()); if (!e->val.cpu) return -ENOMEM; return 0; } static void map_collect_rxqs(struct datarec *values, struct record *rec) { int i; /* Get time as close as possible to reading map contents */ rec->timestamp = gettime(); /* Record and sum values from each RXQ */ for (i = 0; i < sample_n_rxqs; i++) { pr_debug("%d: %lx %lx\n", i, (unsigned long)&rec->rxq[i], (unsigned long)&values[i]); rec->rxq[i].processed = READ_ONCE(values[i].processed); rec->rxq[i].dropped = READ_ONCE(values[i].dropped); rec->rxq[i].issue = READ_ONCE(values[i].issue); rec->rxq[i].xdp_pass = READ_ONCE(values[i].xdp_pass); rec->rxq[i].xdp_drop = READ_ONCE(values[i].xdp_drop); rec->rxq[i].xdp_redirect = READ_ONCE(values[i].xdp_redirect); } } static void map_collect_percpu(struct datarec *values, struct record *rec) { /* For percpu maps, userspace gets a value per possible CPU */ int nr_cpus = libbpf_num_possible_cpus(); __u64 sum_xdp_redirect = 0; __u64 sum_processed = 0; __u64 sum_xdp_pass = 0; __u64 sum_xdp_drop = 0; __u64 sum_dropped = 0; __u64 sum_issue = 0; int i; /* Get time as close as possible to reading map contents */ rec->timestamp = gettime(); /* Record and sum values from each CPU */ for (i = 0; i < nr_cpus; i++) { rec->cpu[i].processed = READ_ONCE(values[i].processed); rec->cpu[i].dropped = READ_ONCE(values[i].dropped); rec->cpu[i].issue = READ_ONCE(values[i].issue); rec->cpu[i].xdp_pass = READ_ONCE(values[i].xdp_pass); rec->cpu[i].xdp_drop = READ_ONCE(values[i].xdp_drop); rec->cpu[i].xdp_redirect = READ_ONCE(values[i].xdp_redirect); sum_processed += rec->cpu[i].processed; sum_dropped += rec->cpu[i].dropped; sum_issue += rec->cpu[i].issue; sum_xdp_pass += rec->cpu[i].xdp_pass; sum_xdp_drop += rec->cpu[i].xdp_drop; sum_xdp_redirect += rec->cpu[i].xdp_redirect; } rec->total.processed = sum_processed; rec->total.dropped = sum_dropped; rec->total.issue = sum_issue; rec->total.xdp_pass = sum_xdp_pass; rec->total.xdp_drop = sum_xdp_drop; rec->total.xdp_redirect = sum_xdp_redirect; } static int map_collect_percpu_devmap(int map_fd, struct stats_record *rec) { int nr_cpus = libbpf_num_possible_cpus(); int i, ret, count = 32; struct datarec *values; bool init = false; __u32 batch; __u64 *keys; keys = calloc(count, sizeof(__u64)); if (!keys) return -ENOMEM; values = calloc(count * nr_cpus, sizeof(struct datarec)); if (!values) { free(keys); return -ENOMEM; } for (;;) { bool exit = false; ret = bpf_map_lookup_batch(map_fd, init ? &batch : NULL, &batch, keys, values, (__u32 *)&count, NULL); if (ret < 0 && errno != ENOENT) break; if (errno == ENOENT) exit = true; init = true; for (i = 0; i < count; i++) { struct map_entry *e, *x = NULL; __u64 pair = keys[i]; struct datarec *arr; arr = &values[i * nr_cpus]; hash_for_each_possible(rec->xmit_map, e, node, pair) { if (e->pair == pair) { x = e; break; } } if (!x) { x = calloc(1, sizeof(*x)); if (!x) goto cleanup; if (map_entry_init(x, pair) < 0) { free(x); goto cleanup; } hash_add(rec->xmit_map, &x->node, pair); } map_collect_percpu(arr, &x->val); } if (exit) break; count = 32; } free(values); free(keys); return 0; cleanup: free(values); free(keys); return -ENOMEM; } static struct stats_record *alloc_stats_record(void) { struct stats_record *rec; int i; rec = calloc(1, sizeof(*rec) + sample_n_cpus * sizeof(struct record)); if (!rec) { pr_warn("Failed to allocate memory\n"); return NULL; } if (sample_mask & SAMPLE_RX_CNT) { rec->rx_cnt.cpu = alloc_records(libbpf_num_possible_cpus()); if (!rec->rx_cnt.cpu) { pr_warn("Failed to allocate rx_cnt per-CPU array\n"); goto end_rec; } } if (sample_mask & SAMPLE_RXQ_STATS) { if (sample_n_rxqs <= 0) { pr_warn("Invalid number of RXQs: %d\n", sample_n_rxqs); goto end_rx_cnt; } rec->rxq_cnt.rxq = alloc_records(sample_n_rxqs); if (!rec->rxq_cnt.rxq) { pr_warn("Failed to allocate rxq_cnt per RXQ array\n"); goto end_rx_cnt; } } if (sample_mask & (SAMPLE_REDIRECT_CNT | SAMPLE_REDIRECT_ERR_CNT)) { for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) { rec->redir_err[i].cpu = alloc_records(libbpf_num_possible_cpus()); if (!rec->redir_err[i].cpu) { pr_warn("Failed to allocate redir_err per-CPU array for \"%s\" case\n", xdp_redirect_err_names[i]); while (i--) free(rec->redir_err[i].cpu); goto end_rxq_cnt; } } } if (sample_mask & SAMPLE_CPUMAP_KTHREAD_CNT) { rec->kthread.cpu = alloc_records(libbpf_num_possible_cpus()); if (!rec->kthread.cpu) { pr_warn("Failed to allocate kthread per-CPU array\n"); goto end_redir; } } if (sample_mask & SAMPLE_EXCEPTION_CNT) { for (i = 0; i < XDP_ACTION_MAX; i++) { rec->exception[i].cpu = alloc_records(libbpf_num_possible_cpus()); if (!rec->exception[i].cpu) { pr_warn("Failed to allocate exception per-CPU array for \"%s\" case\n", xdp_action2str(i)); while (i--) free(rec->exception[i].cpu); goto end_kthread; } } } if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT) { rec->devmap_xmit.cpu = alloc_records(libbpf_num_possible_cpus()); if (!rec->devmap_xmit.cpu) { pr_warn("Failed to allocate devmap_xmit per-CPU array\n"); goto end_exception; } } if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) hash_init(rec->xmit_map); if (sample_mask & SAMPLE_CPUMAP_ENQUEUE_CNT) { for (i = 0; i < sample_n_cpus; i++) { rec->enq[i].cpu = alloc_records(libbpf_num_possible_cpus()); if (!rec->enq[i].cpu) { pr_warn("Failed to allocate enqueue per-CPU array for CPU %d\n", i); while (i--) free(rec->enq[i].cpu); goto end_devmap_xmit; } } } return rec; end_devmap_xmit: free(rec->devmap_xmit.cpu); end_exception: for (i = 0; i < XDP_ACTION_MAX; i++) free(rec->exception[i].cpu); end_kthread: free(rec->kthread.cpu); end_redir: for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) free(rec->redir_err[i].cpu); end_rxq_cnt: free(rec->rxq_cnt.rxq); end_rx_cnt: free(rec->rx_cnt.cpu); end_rec: free(rec); return NULL; } static void free_stats_record(struct stats_record *r) { struct hlist_node *tmp; struct map_entry *e; unsigned int bkt; int i; for (i = 0; i < sample_n_cpus; i++) free(r->enq[i].cpu); hash_for_each_safe(r->xmit_map, bkt, tmp, e, node) { hash_del(&e->node); free(e->val.cpu); free(e); } free(r->devmap_xmit.cpu); for (i = 0; i < XDP_ACTION_MAX; i++) free(r->exception[i].cpu); free(r->kthread.cpu); for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) free(r->redir_err[i].cpu); free(r->rx_cnt.cpu); free(r); } static double calc_period(struct record *r, struct record *p) { double period_ = 0; __u64 period = 0; period = r->timestamp - p->timestamp; if (period > 0) period_ = ((double)period / NANOSEC_PER_SEC); return period_; } static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; __u64 pps = 0; if (period_ > 0) { packets = r->processed - p->processed; pps = round(packets / period_); } return pps; } static __u64 calc_pkts(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; if (period_ > 0) { packets = r->processed - p->processed; } return packets; } static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; __u64 pps = 0; if (period_ > 0) { packets = r->dropped - p->dropped; pps = round(packets / period_); } return pps; } static __u64 calc_drop_pkts(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; if (period_ > 0) { packets = r->dropped - p->dropped; } return packets; } static __u64 calc_errs_pps(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; __u64 pps = 0; if (period_ > 0) { packets = r->issue - p->issue; pps = round(packets / period_); } return pps; } static __u64 calc_errs_pkts(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; if (period_ > 0) { packets = r->issue - p->issue; } return packets; } static __u64 calc_info_pps(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; __u64 pps = 0; if (period_ > 0) { packets = r->info - p->info; pps = round(packets / period_); } return pps; } static void calc_xdp_pps(struct datarec *r, struct datarec *p, double *xdp_pass, double *xdp_drop, double *xdp_redirect, double period_) { *xdp_pass = 0, *xdp_drop = 0, *xdp_redirect = 0; if (period_ > 0) { *xdp_redirect = (r->xdp_redirect - p->xdp_redirect) / period_; *xdp_pass = (r->xdp_pass - p->xdp_pass) / period_; *xdp_drop = (r->xdp_drop - p->xdp_drop) / period_; } } static void stats_get_rx_cnt(struct stats_record *stats_rec, struct stats_record *stats_prev, int nr_cpus, struct sample_output *out) { struct record *rec, *prev; double t, pps, drop, err; int i; rec = &stats_rec->rx_cnt; prev = &stats_prev->rx_cnt; t = calc_period(rec, prev); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); drop = calc_drop_pps(r, p, t); err = calc_errs_pps(r, p, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", str, PPS(pps), DROP(drop), ERR(err)); } if (out) { err = calc_errs_pps(&rec->total, &prev->total, t); out->rx_cnt.pps = calc_pps(&rec->total, &prev->total, t); out->rx_cnt.drop = calc_drop_pps(&rec->total, &prev->total, t); out->rx_cnt.err = err; out->totals.rx += calc_pkts(&rec->total, &prev->total, t); out->totals.drop += calc_drop_pkts(&rec->total, &prev->total, t); out->totals.err += calc_errs_pkts(&rec->total, &prev->total, t); out->totals.err_pps += err; } } static void stats_get_rxq_cnt(struct stats_record *stats_rec, struct stats_record *stats_prev) { struct record *rec, *prev; double t, pps, drop, err; int i; rec = &stats_rec->rxq_cnt; prev = &stats_prev->rxq_cnt; t = calc_period(rec, prev); print_default("\n"); for (i = 0; i < sample_n_rxqs; i++) { struct datarec *r = &rec->rxq[i]; struct datarec *p = &prev->rxq[i]; char str[64]; pps = calc_pps(r, p, t); drop = calc_drop_pps(r, p, t); err = calc_errs_pps(r, p, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "rxq:%d", i); print_default(" %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", str, PPS(pps), DROP(drop), ERR(err)); } } static void stats_get_cpumap_enqueue(struct stats_record *stats_rec, struct stats_record *stats_prev, int nr_cpus) { struct record *rec, *prev; double t, pps, drop, err; int i, to_cpu; /* cpumap enqueue stats */ for (to_cpu = 0; to_cpu < sample_n_cpus; to_cpu++) { rec = &stats_rec->enq[to_cpu]; prev = &stats_prev->enq[to_cpu]; t = calc_period(rec, prev); pps = calc_pps(&rec->total, &prev->total, t); drop = calc_drop_pps(&rec->total, &prev->total, t); err = calc_errs_pps(&rec->total, &prev->total, t); if (pps > 0 || drop > 0) { char str[64]; snprintf(str, sizeof(str), "enqueue to cpu %d", to_cpu); if (err > 0) err = pps / err; /* calc average bulk size */ print_err(drop, " %-20s " FMT_COLUMNf FMT_COLUMNf __COLUMN( ".2f") "\n", str, PPS(pps), DROP(drop), err, "bulk-avg"); } for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); drop = calc_drop_pps(r, p, t); err = calc_errs_pps(r, p, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d->%d", i, to_cpu); if (err > 0) err = pps / err; /* calc average bulk size */ print_default( " %-18s " FMT_COLUMNf FMT_COLUMNf __COLUMN( ".2f") "\n", str, PPS(pps), DROP(drop), err, "bulk-avg"); } } } static void stats_get_cpumap_remote(struct stats_record *stats_rec, struct stats_record *stats_prev, int nr_cpus) { double xdp_pass, xdp_drop, xdp_redirect; struct record *rec, *prev; double t; int i; rec = &stats_rec->kthread; prev = &stats_prev->kthread; t = calc_period(rec, prev); calc_xdp_pps(&rec->total, &prev->total, &xdp_pass, &xdp_drop, &xdp_redirect, t); if (xdp_pass || xdp_drop || xdp_redirect) { print_err(xdp_drop, " %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", "xdp_stats", PASS(xdp_pass), DROP(xdp_drop), REDIR(xdp_redirect)); } for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; calc_xdp_pps(r, p, &xdp_pass, &xdp_drop, &xdp_redirect, t); if (!xdp_pass && !xdp_drop && !xdp_redirect) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-16s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", str, PASS(xdp_pass), DROP(xdp_drop), REDIR(xdp_redirect)); } } static void stats_get_cpumap_kthread(struct stats_record *stats_rec, struct stats_record *stats_prev, int nr_cpus) { struct record *rec, *prev; double t, pps, drop, err; int i; rec = &stats_rec->kthread; prev = &stats_prev->kthread; t = calc_period(rec, prev); pps = calc_pps(&rec->total, &prev->total, t); drop = calc_drop_pps(&rec->total, &prev->total, t); err = calc_errs_pps(&rec->total, &prev->total, t); print_err(drop, " %-20s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", pps ? "kthread total" : "kthread", PPS(pps), DROP(drop), err, "sched"); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); drop = calc_drop_pps(r, p, t); err = calc_errs_pps(r, p, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", str, PPS(pps), DROP(drop), err, "sched"); } } static void stats_get_redirect_cnt(struct stats_record *stats_rec, struct stats_record *stats_prev, int nr_cpus, struct sample_output *out) { struct record *rec, *prev; double t, pps; int i; rec = &stats_rec->redir_err[0]; prev = &stats_prev->redir_err[0]; t = calc_period(rec, prev); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); if (!pps) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-18s " FMT_COLUMNf "\n", str, REDIR(pps)); } if (out) { out->redir_cnt.suc = calc_pps(&rec->total, &prev->total, t); out->totals.redir += calc_pkts(&rec->total, &prev->total, t); } } static void stats_get_redirect_err_cnt(struct stats_record *stats_rec, struct stats_record *stats_prev, int nr_cpus, struct sample_output *out) { double t, drop, sum_pps = 0, sum_pkts = 0; struct record *rec, *prev; int rec_i, i; for (rec_i = 1; rec_i < XDP_REDIRECT_ERR_MAX; rec_i++) { char str[64]; rec = &stats_rec->redir_err[rec_i]; prev = &stats_prev->redir_err[rec_i]; t = calc_period(rec, prev); drop = calc_drop_pps(&rec->total, &prev->total, t); if (drop > 0 && !out) { snprintf(str, sizeof(str), sample_log_level & LL_DEFAULT ? "%s total" : "%s", xdp_redirect_err_names[rec_i]); print_err(drop, " %-18s " FMT_COLUMNf "\n", str, ERR(drop)); } for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; double drop; drop = calc_drop_pps(r, p, t); if (!drop) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-16s" FMT_COLUMNf "\n", str, ERR(drop)); } sum_pps += drop; sum_pkts += calc_drop_pkts(&rec->total, &prev->total, t); } if (out) { out->redir_cnt.err = sum_pps; out->totals.err += sum_pkts; out->totals.err_pps += sum_pps; } } static void stats_get_exception_cnt(struct stats_record *stats_rec, struct stats_record *stats_prev, int nr_cpus, struct sample_output *out) { double t, drop, sum_pps = 0, sum_pkts = 0; struct record *rec, *prev; int rec_i, i; for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) { rec = &stats_rec->exception[rec_i]; prev = &stats_prev->exception[rec_i]; t = calc_period(rec, prev); drop = calc_drop_pps(&rec->total, &prev->total, t); sum_pps += drop; sum_pkts += calc_drop_pkts(&rec->total, &prev->total, t); /* Fold out errors after heading */ if (drop > 0 && !out) { print_always(" %-18s " FMT_COLUMNf "\n", xdp_action2str(rec_i), ERR(drop)); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; double drop; drop = calc_drop_pps(r, p, t); if (!drop) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-16s" FMT_COLUMNf "\n", str, ERR(drop)); } } } if (out) { out->except_cnt.hits = sum_pps; out->totals.err += sum_pkts; out->totals.err_pps += sum_pps; } } static void stats_get_devmap_xmit(struct stats_record *stats_rec, struct stats_record *stats_prev, int nr_cpus, struct sample_output *out) { double pps, drop, info, err; struct record *rec, *prev; double t; int i; rec = &stats_rec->devmap_xmit; prev = &stats_prev->devmap_xmit; t = calc_period(rec, prev); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); drop = calc_drop_pps(r, p, t); err = calc_errs_pps(r, p, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d", i); info = calc_info_pps(r, p, t); if (info > 0) info = (pps + drop) / info; /* calc avg bulk */ print_default(" %-18s" FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf __COLUMN(".2f") "\n", str, XMIT(pps), DROP(drop), err, "drv_err/s", info, "bulk-avg"); } if (out) { pps = calc_pps(&rec->total, &prev->total, t); drop = calc_drop_pps(&rec->total, &prev->total, t); err = calc_errs_pps(&rec->total, &prev->total, t); info = calc_info_pps(&rec->total, &prev->total, t); if (info > 0) out->xmit_cnt.bavg = (pps + drop) / info; /* calc avg bulk */ out->xmit_cnt.pps = pps; out->xmit_cnt.drop = drop; out->xmit_cnt.err = err; out->totals.xmit += calc_pkts(&rec->total, &prev->total, t); out->totals.drop_xmit += calc_drop_pkts(&rec->total, &prev->total, t);; out->totals.err += calc_errs_pkts(&rec->total, &prev->total, t);; out->totals.err_pps += err; } } static void stats_get_devmap_xmit_multi(struct stats_record *stats_rec, struct stats_record *stats_prev, int nr_cpus, struct sample_output *out) { double pps, drop, info, err; struct map_entry *entry; struct record *r, *p; unsigned int bkt; double t; hash_for_each(stats_rec->xmit_map, bkt, entry, node) { struct map_entry *e, *x = NULL; char ifname_from[IFNAMSIZ]; char ifname_to[IFNAMSIZ]; const char *fstr, *tstr; unsigned long prev_time; struct record beg = {}; __u32 from_idx, to_idx; char str[128]; __u64 pair; int i; prev_time = sample_interval * NANOSEC_PER_SEC; pair = entry->pair; from_idx = pair >> 32; to_idx = pair & 0xFFFFFFFF; r = &entry->val; beg.timestamp = r->timestamp - prev_time; /* Find matching entry from stats_prev map */ hash_for_each_possible(stats_prev->xmit_map, e, node, pair) { if (e->pair == pair) { x = e; break; } } if (x) p = &x->val; else p = &beg; t = calc_period(r, p); pps = calc_pps(&r->total, &p->total, t); drop = calc_drop_pps(&r->total, &p->total, t); info = calc_info_pps(&r->total, &p->total, t); if (info > 0) info = (pps + drop) / info; /* calc avg bulk */ err = calc_errs_pps(&r->total, &p->total, t); if (out) { out->xmit_cnt.pps += pps; out->xmit_cnt.drop += drop; out->xmit_cnt.err += err; /* We are responsible for filling out totals */ out->totals.xmit += calc_pkts(&r->total, &p->total, t); out->totals.drop_xmit += calc_drop_pkts(&r->total, &p->total, t); out->totals.err += calc_errs_pkts(&r->total, &p->total, t); out->totals.err_pps += calc_errs_pps(&r->total, &p->total, t); continue; } fstr = tstr = NULL; if (if_indextoname(from_idx, ifname_from)) fstr = ifname_from; if (if_indextoname(to_idx, ifname_to)) tstr = ifname_to; snprintf(str, sizeof(str), "xmit %s->%s", fstr ?: "?", tstr ?: "?"); /* Skip idle streams of redirection */ if (pps || drop || err) { print_err(drop * !(sample_mask & SAMPLE_DROP_OK), " %-20s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf __COLUMN(".2f") "\n", str, XMIT(pps), DROP(drop), err, "drv_err/s", info, "bulk-avg"); } for (i = 0; i < nr_cpus; i++) { struct datarec *rc = &r->cpu[i]; struct datarec *pc, p_beg = {}; char str[64]; pc = p == &beg ? &p_beg : &p->cpu[i]; pps = calc_pps(rc, pc, t); drop = calc_drop_pps(rc, pc, t); err = calc_errs_pps(rc, pc, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d", i); info = calc_info_pps(rc, pc, t); if (info > 0) info = (pps + drop) / info; /* calc avg bulk */ print_default(" %-18s" FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf __COLUMN(".2f") "\n", str, XMIT(pps), DROP(drop), err, "drv_err/s", info, "bulk-avg"); } } } static void stats_print(const char *prefix, int mask, struct stats_record *r, struct stats_record *p, struct sample_output *out) { int nr_cpus = libbpf_num_possible_cpus(); const char *str; print_always("%-23s", prefix ?: "Summary"); if (mask & SAMPLE_RX_CNT) print_always(FMT_COLUMNl, RX(out->rx_cnt.pps)); if (mask & SAMPLE_REDIRECT_CNT) print_always(FMT_COLUMNl, REDIR(out->redir_cnt.suc)); printf(FMT_COLUMNl, out->totals.err_pps + ((out->rx_cnt.drop + out->xmit_cnt.drop) * !(mask & SAMPLE_DROP_OK)), (mask & SAMPLE_DROP_OK) ? "err/s" : "err,drop/s"); if (mask & SAMPLE_DEVMAP_XMIT_CNT || mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) printf(FMT_COLUMNl, XMIT(out->xmit_cnt.pps)); printf("\n"); if (mask & SAMPLE_RX_CNT) { str = (sample_log_level & LL_DEFAULT) && out->rx_cnt.pps ? "receive total" : "receive"; print_err((out->rx_cnt.err || (out->rx_cnt.drop && !(mask & SAMPLE_DROP_OK))), " %-20s " FMT_COLUMNl FMT_COLUMNl FMT_COLUMNl "\n", str, PPS(out->rx_cnt.pps), DROP(out->rx_cnt.drop), ERR(out->rx_cnt.err)); stats_get_rx_cnt(r, p, nr_cpus, NULL); } if (mask & SAMPLE_RXQ_STATS) stats_get_rxq_cnt(r, p); if (mask & SAMPLE_CPUMAP_ENQUEUE_CNT) stats_get_cpumap_enqueue(r, p, nr_cpus); if (mask & SAMPLE_CPUMAP_KTHREAD_CNT) { stats_get_cpumap_kthread(r, p, nr_cpus); stats_get_cpumap_remote(r, p, nr_cpus); } if (mask & SAMPLE_REDIRECT_CNT) { str = out->redir_cnt.suc ? "redirect total" : "redirect"; print_default(" %-20s " FMT_COLUMNl "\n", str, REDIR(out->redir_cnt.suc)); stats_get_redirect_cnt(r, p, nr_cpus, NULL); } if (mask & SAMPLE_REDIRECT_ERR_CNT) { str = (sample_log_level & LL_DEFAULT) && out->redir_cnt.err ? "redirect_err total" : "redirect_err"; print_err(out->redir_cnt.err, " %-20s " FMT_COLUMNl "\n", str, ERR(out->redir_cnt.err)); stats_get_redirect_err_cnt(r, p, nr_cpus, NULL); } if (mask & SAMPLE_EXCEPTION_CNT) { str = out->except_cnt.hits ? "xdp_exception total" : "xdp_exception"; print_err(out->except_cnt.hits, " %-20s " FMT_COLUMNl "\n", str, HITS(out->except_cnt.hits)); stats_get_exception_cnt(r, p, nr_cpus, NULL); } if (mask & SAMPLE_DEVMAP_XMIT_CNT) { str = (sample_log_level & LL_DEFAULT) && out->xmit_cnt.pps ? "devmap_xmit total" : "devmap_xmit"; print_err(out->xmit_cnt.err || out->xmit_cnt.drop, " %-20s " FMT_COLUMNl FMT_COLUMNl FMT_COLUMNl __COLUMN(".2f") "\n", str, XMIT(out->xmit_cnt.pps), DROP(out->xmit_cnt.drop), (uint64_t)out->xmit_cnt.err, "drv_err/s", out->xmit_cnt.bavg, "bulk-avg"); stats_get_devmap_xmit(r, p, nr_cpus, NULL); } if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) stats_get_devmap_xmit_multi(r, p, nr_cpus, NULL); if (sample_log_level & LL_DEFAULT || ((sample_log_level & LL_SIMPLE) && sample_err_exp)) { sample_err_exp = false; printf("\n"); } fflush(stdout); fflush(stderr); // Flushing both outputs to "bypass" buffering } static int get_num_rxqs(const char *ifname) { struct ethtool_channels ch = { .cmd = ETHTOOL_GCHANNELS, }; struct ifreq ifr = { .ifr_data = (void *)&ch, }; int fd, ret; if (!ifname || strlen(ifname) > sizeof(ifr.ifr_name) - 1) return 0; strcpy(ifr.ifr_name, ifname); fd = socket(AF_UNIX, SOCK_DGRAM, 0); if (fd < 0) { ret = -errno; pr_warn("Couldn't open socket socket: %s\n", strerror(-ret)); return ret; } ret = ioctl(fd, SIOCETHTOOL, &ifr); if (ret < 0) { ret = -errno; pr_debug("Error in ethtool ioctl: %s\n", strerror(-ret)); goto out; } ret = ch.rx_count + ch.combined_count; pr_debug("Got %d queues for ifname %s\n", ret, ifname); out: close(fd); return ret; } int sample_setup_maps(struct bpf_map **maps, const char *ifname) { sample_n_cpus = libbpf_num_possible_cpus(); for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) { sample_map[i] = maps[i]; int n_cpus; switch (i) { case MAP_RX: case MAP_CPUMAP_KTHREAD: case MAP_DEVMAP_XMIT: sample_map_count[i] = sample_n_cpus; break; case MAP_RXQ: sample_n_rxqs = get_num_rxqs(ifname); sample_map_count[i] = sample_n_rxqs > 0 ? sample_n_rxqs : 1; break; case MAP_REDIRECT_ERR: sample_map_count[i] = XDP_REDIRECT_ERR_MAX * sample_n_cpus; break; case MAP_EXCEPTION: sample_map_count[i] = XDP_ACTION_MAX * sample_n_cpus; break; case MAP_CPUMAP_ENQUEUE: if (__builtin_mul_overflow(sample_n_cpus, sample_n_cpus, &n_cpus)) return -EOVERFLOW; sample_map_count[i] = n_cpus; break; default: return -EINVAL; } if (bpf_map__set_max_entries(sample_map[i], sample_map_count[i]) < 0) return -errno; } sample_map[MAP_DEVMAP_XMIT_MULTI] = maps[MAP_DEVMAP_XMIT_MULTI]; return 0; } static int sample_setup_maps_mappings(void) { for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) { size_t size = sample_map_count[i] * sizeof(struct datarec); sample_mmap[i] = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, bpf_map__fd(sample_map[i]), 0); if (sample_mmap[i] == MAP_FAILED) return -errno; } return 0; } int __sample_init(int mask, int ifindex_from, int ifindex_to) { sigset_t st; if (mask & SAMPLE_RXQ_STATS && sample_n_rxqs <= 0) { pr_warn("Couldn't retrieve the number of RXQs, so can't enable RXQ stats\n"); return -EINVAL; } sigemptyset(&st); sigaddset(&st, SIGQUIT); sigaddset(&st, SIGINT); sigaddset(&st, SIGTERM); if (sigprocmask(SIG_BLOCK, &st, NULL) < 0) return -errno; sample_sig_fd = signalfd(-1, &st, SFD_CLOEXEC | SFD_NONBLOCK); if (sample_sig_fd < 0) return -errno; sample_mask = mask; ifindex[0] = ifindex_from; ifindex[1] = ifindex_to; return sample_setup_maps_mappings(); } static void sample_summary_print(void) { __u64 start = sample_start_time; __u64 now = gettime(); double dur_s = ((double)now - start) / NANOSEC_PER_SEC; print_always(" Duration : %.1fs\n", dur_s); if (sample_out.totals.rx) { double pkts = sample_out.totals.rx; print_always(" Packets received : %'-10" PRIu64 "\n", (uint64_t)sample_out.totals.rx); print_always(" Average packets/s : %'-10.0f\n", round(pkts / dur_s)); } if (sample_out.totals.redir) { double pkts = sample_out.totals.redir; print_always(" Packets redirected : %'-10" PRIu64 "\n", sample_out.totals.redir); print_always(" Average redir/s : %'-10.0f\n", round(pkts / dur_s)); } if (sample_out.totals.drop) print_always(" Rx dropped : %'-10" PRIu64 "\n", sample_out.totals.drop); if (sample_out.totals.drop_xmit) print_always(" Tx dropped : %'-10" PRIu64 "\n", sample_out.totals.drop_xmit); if (sample_out.totals.err) print_always(" Errors recorded : %'-10" PRIu64 "\n", sample_out.totals.err); if (sample_out.totals.xmit) { double pkts = sample_out.totals.xmit; print_always(" Packets transmitted : %'-10" PRIu64 "\n", sample_out.totals.xmit); print_always(" Average transmit/s : %'-10.0f\n", round(pkts / dur_s)); } } void sample_teardown(void) { size_t size; for (int i = 0; i < NUM_MAP; i++) { size = sample_map_count[i] * sizeof(**sample_mmap); munmap(sample_mmap[i], size); } sample_summary_print(); close(sample_sig_fd); } static int sample_stats_collect(struct stats_record *rec) { int i; if (sample_mask & SAMPLE_RX_CNT) map_collect_percpu(sample_mmap[MAP_RX], &rec->rx_cnt); if (sample_mask & SAMPLE_RXQ_STATS) map_collect_rxqs(sample_mmap[MAP_RXQ], &rec->rxq_cnt); if (sample_mask & SAMPLE_REDIRECT_CNT) map_collect_percpu(sample_mmap[MAP_REDIRECT_ERR], &rec->redir_err[0]); if (sample_mask & SAMPLE_REDIRECT_ERR_CNT) { for (i = 1; i < XDP_REDIRECT_ERR_MAX; i++) map_collect_percpu(&sample_mmap[MAP_REDIRECT_ERR][i * sample_n_cpus], &rec->redir_err[i]); } if (sample_mask & SAMPLE_CPUMAP_ENQUEUE_CNT) for (i = 0; i < sample_n_cpus; i++) map_collect_percpu(&sample_mmap[MAP_CPUMAP_ENQUEUE][i * sample_n_cpus], &rec->enq[i]); if (sample_mask & SAMPLE_CPUMAP_KTHREAD_CNT) map_collect_percpu(sample_mmap[MAP_CPUMAP_KTHREAD], &rec->kthread); if (sample_mask & SAMPLE_EXCEPTION_CNT) for (i = 0; i < XDP_ACTION_MAX; i++) map_collect_percpu(&sample_mmap[MAP_EXCEPTION][i * sample_n_cpus], &rec->exception[i]); if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT) map_collect_percpu(sample_mmap[MAP_DEVMAP_XMIT], &rec->devmap_xmit); if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) { if (map_collect_percpu_devmap(bpf_map__fd(sample_map[MAP_DEVMAP_XMIT_MULTI]), rec) < 0) return -EINVAL; } return 0; } static void sample_summary_update(struct sample_output *out) { sample_out.totals.rx += out->totals.rx; sample_out.totals.redir += out->totals.redir; sample_out.totals.drop += out->totals.drop; sample_out.totals.drop_xmit += out->totals.drop_xmit; sample_out.totals.err += out->totals.err; sample_out.totals.xmit += out->totals.xmit; } static void sample_stats_print(int mask, struct stats_record *cur, struct stats_record *prev, char *prog_name) { struct sample_output out = {}; if (mask & SAMPLE_RX_CNT) stats_get_rx_cnt(cur, prev, 0, &out); if (mask & SAMPLE_REDIRECT_CNT) stats_get_redirect_cnt(cur, prev, 0, &out); if (mask & SAMPLE_REDIRECT_ERR_CNT) stats_get_redirect_err_cnt(cur, prev, 0, &out); if (mask & SAMPLE_EXCEPTION_CNT) stats_get_exception_cnt(cur, prev, 0, &out); if (mask & SAMPLE_DEVMAP_XMIT_CNT) stats_get_devmap_xmit(cur, prev, 0, &out); else if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) stats_get_devmap_xmit_multi(cur, prev, 0, &out); sample_summary_update(&out); stats_print(prog_name, mask, cur, prev, &out); } void sample_switch_mode(void) { sample_log_level ^= LL_DEBUG - 1; } static int sample_signal_cb(void) { struct signalfd_siginfo si; int r; r = read(sample_sig_fd, &si, sizeof(si)); if (r < 0) return -errno; switch (si.ssi_signo) { case SIGQUIT: sample_switch_mode(); printf("\n"); break; default: printf("\n"); return 1; } return 0; } /* Pointer swap trick */ static void swap(struct stats_record **a, struct stats_record **b) { struct stats_record *tmp; tmp = *a; *a = *b; *b = tmp; } static int print_stats(struct stats_record **rec, struct stats_record **prev) { char line[64] = "Summary"; int ret; swap(prev, rec); ret = sample_stats_collect(*rec); if (ret < 0) return ret; if (ifindex[0] && !(sample_mask & SAMPLE_SKIP_HEADING)) { char fi[IFNAMSIZ]; char to[IFNAMSIZ]; const char *f, *t; f = t = NULL; if (if_indextoname(ifindex[0], fi)) f = fi; if (if_indextoname(ifindex[1], to)) t = to; snprintf(line, sizeof(line), "%s->%s", f ?: "?", t ?: "?"); } sample_stats_print(sample_mask, *rec, *prev, line); return 0; } static int sample_timer_cb(int timerfd, struct stats_record **rec, struct stats_record **prev) { int ret; __u64 t; ret = read(timerfd, &t, sizeof(t)); if (ret < 0) return -errno; return print_stats(rec, prev); } int sample_run(unsigned int interval, void (*post_cb)(void *), void *ctx) { struct timespec ts = { interval, 0 }; struct itimerspec its = { ts, ts }; struct stats_record *rec, *prev; struct pollfd pfd[2] = {}; bool imm_exit = false; const char *envval; int timerfd, ret; envval = secure_getenv("XDP_SAMPLE_IMMEDIATE_EXIT"); if (envval && envval[0] == '1' && envval[1] == '\0') { pr_debug("XDP_SAMPLE_IMMEDIATE_EXIT envvar set, exiting immediately after setup\n"); imm_exit = true; } if (!interval) { pr_warn("Incorrect interval 0\n"); return -EINVAL; } sample_interval = interval; /* Pretty print numbers */ setlocale(LC_NUMERIC, "en_US.UTF-8"); timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK); if (timerfd < 0) return -errno; timerfd_settime(timerfd, 0, &its, NULL); pfd[0].fd = sample_sig_fd; pfd[0].events = POLLIN; pfd[1].fd = timerfd; pfd[1].events = POLLIN; ret = -ENOMEM; rec = alloc_stats_record(); if (!rec) goto end; prev = alloc_stats_record(); if (!prev) goto end_rec; sample_start_time = gettime(); ret = sample_stats_collect(rec); if (ret < 0) goto end_rec_prev; if (imm_exit) goto end_rec_prev; for (;;) { ret = poll(pfd, 2, -1); if (ret < 0) { if (errno == EINTR) continue; else break; } if (pfd[0].revents & POLLIN) { ret = sample_signal_cb(); if (ret) print_stats(&rec, &prev); } else if (pfd[1].revents & POLLIN) ret = sample_timer_cb(timerfd, &rec, &prev); if (ret) break; if (post_cb) post_cb(ctx); } end_rec_prev: free_stats_record(prev); end_rec: free_stats_record(rec); end: close(timerfd); return ret; } const char *get_driver_name(int ifindex) { struct ethtool_drvinfo drv = {}; char ifname[IF_NAMESIZE]; static char drvname[32]; struct ifreq ifr = {}; int fd, r = 0; fd = socket(AF_INET, SOCK_DGRAM, 0); if (fd < 0) return "[error]"; if (!if_indextoname(ifindex, ifname)) goto end; drv.cmd = ETHTOOL_GDRVINFO; safe_strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); ifr.ifr_data = (void *)&drv; r = ioctl(fd, SIOCETHTOOL, &ifr); if (r) goto end; safe_strncpy(drvname, drv.driver, sizeof(drvname)); close(fd); return drvname; end: r = errno; close(fd); return r == EOPNOTSUPP ? "loopback" : "[error]"; } int get_mac_addr(int ifindex, void *mac_addr) { char ifname[IF_NAMESIZE]; struct ifreq ifr = {}; int fd, r; fd = socket(AF_INET, SOCK_DGRAM, 0); if (fd < 0) return -errno; if (!if_indextoname(ifindex, ifname)) { r = -errno; goto end; } safe_strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); r = ioctl(fd, SIOCGIFHWADDR, &ifr); if (r) { r = -errno; goto end; } memcpy(mac_addr, ifr.ifr_hwaddr.sa_data, 6 * sizeof(char)); end: close(fd); return r; } xdp-tools-1.5.4/lib/util/xpcapng.c0000644000175100001660000004375615003640462016363 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /* * Description: * Simple PcapNG library developed from scratch as no library existed that * met the requirements for xdpdump. It can also be used by other XDP * applications that would like to capture packets for debugging purposes. */ /***************************************************************************** * Include files *****************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include "xpcapng.h" /***************************************************************************** * Simple roundup() macro *****************************************************************************/ #ifndef roundup #define roundup(x, y) ( \ { \ typeof(y) __y = y; \ (((x) + (__y - 1)) / __y) * __y; \ } \ ) #endif /***************************************************************************** * pcapng_dumper structure *****************************************************************************/ struct xpcapng_dumper { int pd_fd; uint32_t pd_interfaces; }; /***************************************************************************** * general pcapng block and option definitions *****************************************************************************/ enum pcapng_block_types { PCAPNG_SECTION_BLOCK = 0x0A0D0D0A, PCAPNG_INTERFACE_BLOCK = 1, PCAPNG_PACKET_BLOCK, PCAPNG_SIMPLE_PACKET_BLOCK, PCAPNG_NAME_RESOLUTION_BLOCK, PCAPNG_INTERFACE_STATS_BLOCK, PCAPNG_ENHANCED_PACKET_BLOCK }; struct pcapng_option { uint16_t po_type; uint16_t po_length; uint8_t po_data[]; } __attribute__((__packed__)); enum pcapng_opt { PCAPNG_OPT_END = 0, PCAPNG_OPT_COMMENT = 1, PCAPNG_OPT_CUSTOME_2988 = 2988, PCAPNG_OPT_CUSTOME_2989 = 2989, PCAPNG_OPT_CUSTOME_19372 = 19372, PCAPNG_OPT_CUSTOME_19373 = 29373 }; /***************************************************************************** * pcapng section header block definitions *****************************************************************************/ struct pcapng_section_header_block { uint32_t shb_block_type; uint32_t shb_block_length; uint32_t shb_byte_order_magic; uint16_t shb_major_version; uint16_t shb_minor_version; uint64_t shb_section_length; uint8_t shb_options[]; /* The options are followed by another: * uint32_t shb_block_length; */ } __attribute__((__packed__)); #define PCAPNG_BYTE_ORDER_MAGIC 0x1A2B3C4D #define PCAPNG_MAJOR_VERSION 1 #define PCAPNG_MINOR_VERSION 0 enum pcapng_opt_shb { PCAPNG_OPT_SHB_HARDWARE = 2, PCAPNG_OPT_SHB_OS, PCAPNG_OPT_SHB_USERAPPL }; /***************************************************************************** * pcapng interface description block definitions *****************************************************************************/ struct pcapng_interface_description_block { uint32_t idb_block_type; uint32_t idb_block_length; uint16_t idb_link_type; uint16_t idb_reserved; uint32_t idb_snap_len; uint8_t idb_options[]; /* The options are followed by another: * uint32_t idb_block_length; */ } __attribute__((__packed__)); enum pcapng_opt_idb { PCAPNG_OPT_IDB_IF_NAME = 2, PCAPNG_OPT_IDB_IF_DESCRIPTION, PCAPNG_OPT_IDB_IF_IPV4_ADDR, PCAPNG_OPT_IDB_IF_IPV6_ADDR, PCAPNG_OPT_IDB_IF_MAC_ADDR, PCAPNG_OPT_IDB_IF_EUI_ADDR, PCAPNG_OPT_IDB_IF_SPEED, PCAPNG_OPT_IDB_IF_TSRESOL, PCAPNG_OPT_IDB_IF_TZONE, PCAPNG_OPT_IDB_IF_FILTER, PCAPNG_OPT_IDB_IF_OS, PCAPNG_OPT_IDB_IF_FCSLEN, PCAPNG_OPT_IDB_IF_TOFFSET, PCAPNG_OPT_IDB_IF_HARDWARE }; /***************************************************************************** * pcapng interface description block definitions *****************************************************************************/ struct pcapng_enhanced_packet_block { uint32_t epb_block_type; uint32_t epb_block_length; uint32_t epb_interface_id; uint32_t epb_timestamp_hi; uint32_t epb_timestamp_low; uint32_t epb_captured_length; uint32_t epb_original_length; uint8_t epb_packet_data[]; /* The packet data is followed by: * uint8_t epb_options[]; * uint32_t epb_block_length; */ } __attribute__((__packed__)); enum pcapng_opt_epb { PCAPNG_OPT_EPB_FLAGS = 2, PCAPNG_OPT_EPB_HASH, PCAPNG_OPT_EPB_DROPCOUNT, PCAPNG_OPT_EPB_PACKETID, PCAPNG_OPT_EPB_QUEUE, PCAPNG_OPT_EPB_VERDICT }; enum pcapng_epb_vedict_type { PCAPNG_EPB_VEDRICT_TYPE_HARDWARE = 0, PCAPNG_EPB_VEDRICT_TYPE_EBPF_TC, PCAPNG_EPB_VEDRICT_TYPE_EBPF_XDP }; /***************************************************************************** * pcapng_get_option_length() *****************************************************************************/ static size_t pcapng_get_option_length(size_t len) { return roundup(sizeof(struct pcapng_option) + len, sizeof(uint32_t)); } /***************************************************************************** * pcapng_add_option() *****************************************************************************/ static struct pcapng_option *pcapng_add_option(struct pcapng_option *opt, uint16_t type, uint16_t length, const void *data) { if (opt == NULL) return NULL; opt->po_type = type; opt->po_length = length; if (data) memcpy(opt->po_data, data, length); return (struct pcapng_option *) ((uint8_t *)opt + pcapng_get_option_length(length)); } /***************************************************************************** * pcapng_write_shb() *****************************************************************************/ static bool pcapng_write_shb(struct xpcapng_dumper *pd, const char *comment, const char *hardware, const char *os, const char *user_application) { int rc; size_t shb_length; struct pcapng_section_header_block *shb; struct pcapng_option *opt; if (pd == NULL) { errno = EINVAL; return false; } /* First calculate the total length of the SHB. */ shb_length = sizeof(*shb); if (comment) shb_length += pcapng_get_option_length(strlen(comment)); if (hardware) shb_length += pcapng_get_option_length(strlen(hardware)); if (os) shb_length += pcapng_get_option_length(strlen(os)); if (user_application) shb_length += pcapng_get_option_length( strlen(user_application)); shb_length += pcapng_get_option_length(0); shb_length += sizeof(uint32_t); /* Allocate the SHB and fill it. */ shb = calloc(1, shb_length); if (shb == NULL) { errno = ENOMEM; return false; } shb->shb_block_type = PCAPNG_SECTION_BLOCK; shb->shb_block_length = shb_length; shb->shb_byte_order_magic = PCAPNG_BYTE_ORDER_MAGIC; shb->shb_major_version = PCAPNG_MAJOR_VERSION; shb->shb_minor_version = PCAPNG_MINOR_VERSION; shb->shb_section_length = UINT64_MAX; /* Add the options and block_length value */ opt = (struct pcapng_option *) &shb->shb_options; if (comment) opt = pcapng_add_option(opt, PCAPNG_OPT_COMMENT, strlen(comment), comment); if (hardware) opt = pcapng_add_option(opt, PCAPNG_OPT_SHB_HARDWARE, strlen(hardware), hardware); if (os) opt = pcapng_add_option(opt, PCAPNG_OPT_SHB_OS, strlen(os), os); if (user_application) opt = pcapng_add_option(opt, PCAPNG_OPT_SHB_USERAPPL, strlen(user_application), user_application); /* WARNING: If a new option is added, make sure the length calculation * above is also updated! */ opt = pcapng_add_option(opt, PCAPNG_OPT_END, 0, NULL); memcpy(opt, &shb->shb_block_length, sizeof(shb->shb_block_length)); /* Write the SHB, and free its memory. */ rc = write(pd->pd_fd, shb, shb_length); free(shb); if ((size_t)rc != shb_length) return false; return true; } /***************************************************************************** * pcapng_write_idb() *****************************************************************************/ static bool pcapng_write_idb(struct xpcapng_dumper *pd, const char *name, uint16_t snap_len, const char *description, const uint8_t *mac, uint64_t speed, uint8_t ts_resolution, const char *hardware) { int rc; size_t idb_length; struct pcapng_interface_description_block *idb; struct pcapng_option *opt; if (pd == NULL) { errno = EINVAL; return false; } /* First calculate the total length of the IDB. */ idb_length = sizeof(*idb); if (name) idb_length += pcapng_get_option_length(strlen(name)); if (description) idb_length += pcapng_get_option_length(strlen(description)); if (mac) idb_length += pcapng_get_option_length(6); if (speed) idb_length += pcapng_get_option_length(sizeof(uint64_t)); if (ts_resolution != 6 && ts_resolution != 0) idb_length += pcapng_get_option_length(1); if (hardware) idb_length += pcapng_get_option_length(strlen(hardware)); idb_length += pcapng_get_option_length(0); idb_length += sizeof(uint32_t); /* Allocate the IDB and fill it. */ idb = calloc(1, idb_length); if (idb == NULL) { errno = ENOMEM; return false; } idb->idb_block_type = PCAPNG_INTERFACE_BLOCK; idb->idb_block_length = idb_length; idb->idb_link_type = 1; /* Ethernet */ idb->idb_snap_len = snap_len; /* Add the options and block_length value */ opt = (struct pcapng_option *) &idb->idb_options; if (name) opt = pcapng_add_option(opt, PCAPNG_OPT_IDB_IF_NAME, strlen(name), name); if (description) opt = pcapng_add_option(opt, PCAPNG_OPT_IDB_IF_DESCRIPTION, strlen(description), description); if (mac) opt = pcapng_add_option(opt, PCAPNG_OPT_IDB_IF_MAC_ADDR, 6, mac); if (speed) opt = pcapng_add_option(opt, PCAPNG_OPT_IDB_IF_SPEED, sizeof(uint64_t), &speed); if (ts_resolution != 6 && ts_resolution != 0) opt = pcapng_add_option(opt, PCAPNG_OPT_IDB_IF_TSRESOL, sizeof(uint8_t), &ts_resolution); if (hardware) opt = pcapng_add_option(opt, PCAPNG_OPT_IDB_IF_HARDWARE, strlen(hardware), hardware); /* WARNING: If a new option is added, make sure the length calculation * above is also updated! */ opt = pcapng_add_option(opt, PCAPNG_OPT_END, 0, NULL); memcpy(opt, &idb->idb_block_length, sizeof(idb->idb_block_length)); /* Write the IDB, and free it's memory. */ rc = write(pd->pd_fd, idb, idb_length); free(idb); if ((size_t)rc != idb_length) return false; return true; } /***************************************************************************** * pcapng_write_epb() *****************************************************************************/ static bool pcapng_write_epb(struct xpcapng_dumper *pd, uint32_t ifid, const uint8_t *pkt, uint32_t len, uint32_t caplen, uint64_t timestamp, struct xpcapng_epb_options_s *epb_options) { int i = 0; int rc; size_t pad_length; size_t com_length = 0; size_t epb_length; struct pcapng_enhanced_packet_block epb; struct pcapng_option *opt; struct iovec iov[7]; static uint8_t pad[4] = {0, 0, 0, 0}; uint8_t options[8 + 12 + 12 + 8 + 16 + 4 + 4]; /* PCAPNG_OPT_EPB_FLAGS[8] + * PCAPNG_OPT_EPB_DROPCOUNT[12] + * PCAPNG_OPT_EPB_PACKETID[12] + * PCAPNG_OPT_EPB_QUEUE[8] + * PCAPNG_OPT_EPB_VERDICT[16] + * PCAPNG_OPT_END[4] + * epb_block_length */ static struct xdp_verdict { uint8_t type; int64_t verdict; }__attribute__((__packed__)) verdict = { PCAPNG_EPB_VEDRICT_TYPE_EBPF_XDP, 0 }; if (pd == NULL) { errno = EINVAL; return false; } /* First calculate the total length of the EPB. */ pad_length = roundup(caplen, sizeof(uint32_t)) - caplen; epb_length = sizeof(epb); epb_length += caplen + pad_length; if (epb_options->flags) epb_length += pcapng_get_option_length(sizeof(uint32_t)); if (epb_options->dropcount) epb_length += pcapng_get_option_length(sizeof(uint64_t)); if (epb_options->packetid) epb_length += pcapng_get_option_length(sizeof(uint64_t)); if (epb_options->queue) epb_length += pcapng_get_option_length(sizeof(uint32_t)); if (epb_options->xdp_verdict) epb_length += pcapng_get_option_length(sizeof(verdict)); if (epb_options->comment) { com_length = strlen(epb_options->comment); epb_length += pcapng_get_option_length(com_length); } epb_length += pcapng_get_option_length(0); epb_length += sizeof(uint32_t); /* Fill in the EPB. */ epb.epb_block_type = PCAPNG_ENHANCED_PACKET_BLOCK; epb.epb_block_length = epb_length; epb.epb_interface_id = ifid; epb.epb_timestamp_hi = timestamp >> 32; epb.epb_timestamp_low = (uint32_t) timestamp; epb.epb_captured_length = caplen; epb.epb_original_length = len; /* Add the flag/end option and block_length value */ opt = (struct pcapng_option *) options; if (epb_options->flags) opt = pcapng_add_option(opt, PCAPNG_OPT_EPB_FLAGS, sizeof(uint32_t), &epb_options->flags); if (epb_options->dropcount) opt = pcapng_add_option(opt, PCAPNG_OPT_EPB_DROPCOUNT, sizeof(uint64_t), &epb_options->dropcount); if (epb_options->packetid) opt = pcapng_add_option(opt, PCAPNG_OPT_EPB_PACKETID, sizeof(uint64_t), epb_options->packetid); if (epb_options->queue) opt = pcapng_add_option(opt, PCAPNG_OPT_EPB_QUEUE, sizeof(uint32_t), epb_options->queue); if (epb_options->xdp_verdict) { verdict.verdict = *epb_options->xdp_verdict; opt = pcapng_add_option(opt, PCAPNG_OPT_EPB_VERDICT, sizeof(verdict), &verdict); } /* WARNING: If a new option is added, make sure the length calculation * and the options[] variable above are also updated! */ opt = pcapng_add_option(opt, PCAPNG_OPT_END, 0, NULL); memcpy(opt, &epb.epb_block_length, sizeof(epb.epb_block_length)); /* Write the EPB in parts, including the options, this looks not as * straightforward as pcapng_write_idb() but here we would like to * avoid as many memcopy's as possible. */ /* Add base EPB structure. */ iov[i].iov_base = &epb; iov[i++].iov_len = sizeof(epb); /* Add Packet Data. */ iov[i].iov_base = (void *)pkt; iov[i++].iov_len = caplen; /* Add Packet Data padding if needed. */ if (pad_length > 0) { iov[i].iov_base = pad; iov[i++].iov_len = pad_length; } /* Add comment if supplied */ if (epb_options->comment) { uint16_t opt[2] = {PCAPNG_OPT_COMMENT, com_length}; size_t opt_pad = roundup(com_length, sizeof(uint32_t)) - com_length; /* Add option header. */ iov[i].iov_base = opt; iov[i++].iov_len = sizeof(opt); /* Add actual comment string. */ iov[i].iov_base = (void *)epb_options->comment; iov[i++].iov_len = com_length; /* Add padding to uint32_t if needed. */ if (opt_pad) { iov[i].iov_base = pad; iov[i++].iov_len = opt_pad; } } /* Write other options and final EPB size. */ iov[i].iov_base = options; iov[i++].iov_len = 8 + (epb_options->flags ? 8 : 0) + (epb_options->dropcount ? 12 : 0) + (epb_options->packetid ? 12 : 0) + (epb_options->queue ? 8 : 0) + (epb_options->xdp_verdict ? 16 : 0); rc = writev(pd->pd_fd, iov, i); if ((size_t)rc != epb_length) return false; return true; } /***************************************************************************** * xpcapng_dump_open() *****************************************************************************/ struct xpcapng_dumper *xpcapng_dump_open(const char *file, const char *comment, const char *hardware, const char *os, const char *user_application) { struct xpcapng_dumper *pd = NULL; if (file == NULL) { errno = EINVAL; goto error_exit; } pd = calloc(1, sizeof(*pd)); if (pd == NULL) { errno = ENOMEM; goto error_exit; } pd->pd_fd = -1; if (strcmp(file, "-") == 0) { pd->pd_fd = STDOUT_FILENO; } else { pd->pd_fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, 0600); if (pd->pd_fd < 0) goto error_exit; } if (!pcapng_write_shb(pd, comment, hardware, os, user_application)) goto error_exit; return pd; error_exit: if (pd) { if (pd->pd_fd >= 0 && pd->pd_fd != STDOUT_FILENO) close(pd->pd_fd); free(pd); } return NULL; } /***************************************************************************** * xpcapng_dump_close() *****************************************************************************/ void xpcapng_dump_close(struct xpcapng_dumper *pd) { if (pd == NULL) return; if (pd->pd_fd < 0 && pd->pd_fd != STDOUT_FILENO) close(pd->pd_fd); free(pd); } /***************************************************************************** * xpcapng_dump_flush() *****************************************************************************/ int xpcapng_dump_flush(struct xpcapng_dumper *pd) { if (pd != NULL) return fsync(pd->pd_fd); errno = EINVAL; return -1; } /***************************************************************************** * pcapng_dump_add_interface() *****************************************************************************/ int xpcapng_dump_add_interface(struct xpcapng_dumper *pd, uint16_t snap_len, const char *name, const char *description, const uint8_t *mac, uint64_t speed, uint8_t ts_resolution, const char *hardware) { if (!pcapng_write_idb(pd, name, snap_len, description, mac, speed, ts_resolution, hardware)) return -1; return pd->pd_interfaces++; } /***************************************************************************** * xpcapng_dump_enhanced_pkt() *****************************************************************************/ bool xpcapng_dump_enhanced_pkt(struct xpcapng_dumper *pd, uint32_t ifid, const uint8_t *pkt, uint32_t len, uint32_t caplen, uint64_t timestamp, struct xpcapng_epb_options_s *options) { struct xpcapng_epb_options_s default_options = {}; return pcapng_write_epb(pd, ifid, pkt, len, caplen, timestamp, options ?: &default_options); } xdp-tools-1.5.4/lib/util/xdp_sample.bpf.c0000644000175100001660000000153115003640462017606 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 #include #include #include #include #include #ifndef HAVE_LIBBPF_BPF_PROGRAM__FLAGS /* bpf_trace_vprintk() appeared in the same libbpf version as bpf_program__flags() */ static long (*bpf_trace_vprintk)(const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 177; #endif SEC("tp_btf/xdp_cpumap_kthread") int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed, unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats) { static const char fmt[] = "Stats: %d %u %u %d %d\n"; unsigned long long args[] = { map_id, processed, drops, sched, xdp_stats->pass }; bpf_trace_vprintk(fmt, sizeof(fmt), args, sizeof(args)); return 0; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/lib/util/xpcapng.h0000644000175100001660000000441415003640462016354 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ /***************************************************************************** * Multiple include protection *****************************************************************************/ #ifndef __XPCAPNG_H_ #define __XPCAPNG_H_ /***************************************************************************** * Handle *****************************************************************************/ struct xpcapng_dumper; /***************************************************************************** * Flag variables *****************************************************************************/ enum xpcapng_epb_flags { PCAPNG_EPB_FLAG_INBOUND = 0x1, PCAPNG_EPB_FLAG_OUTBOUND = 0x2 }; /***************************************************************************** * EPB options structure *****************************************************************************/ struct xpcapng_epb_options_s { enum xpcapng_epb_flags flags; uint64_t dropcount; uint64_t *packetid; uint32_t *queue; int64_t *xdp_verdict; const char *comment; }; /***************************************************************************** * APIs *****************************************************************************/ extern struct xpcapng_dumper *xpcapng_dump_open(const char *file, const char *comment, const char *hardware, const char *os, const char *user_application); extern void xpcapng_dump_close(struct xpcapng_dumper *pd); extern int xpcapng_dump_flush(struct xpcapng_dumper *pd); extern int xpcapng_dump_add_interface(struct xpcapng_dumper *pd, uint16_t snap_len, const char *name, const char *description, const uint8_t *mac, uint64_t speed, uint8_t ts_resolution, const char *hardware); extern bool xpcapng_dump_enhanced_pkt(struct xpcapng_dumper *pd, uint32_t ifid, const uint8_t *pkt, uint32_t len, uint32_t caplen, uint64_t timestamp, struct xpcapng_epb_options_s *options); /***************************************************************************** * End-of include file *****************************************************************************/ #endif /* __XPCAPNG_H_ */ xdp-tools-1.5.4/lib/util/stats.c0000644000175100001660000001402615003640462016045 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include #include #include #include #include #include #include #include #include #include #include "stats.h" #include "util.h" #include "logging.h" #define NANOSEC_PER_SEC 1000000000 /* 10^9 */ static int gettime(__u64 *nstime) { struct timespec t; int res; res = clock_gettime(CLOCK_MONOTONIC, &t); if (res < 0) { pr_warn("Error with gettimeofday! (%i)\n", res); return res; } *nstime = (__u64)t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec; return 0; } static double calc_period(struct record *r, struct record *p) { double period_ = 0; __u64 period = 0; period = r->timestamp - p->timestamp; if (period > 0) period_ = ((double)period / NANOSEC_PER_SEC); return period_; } int stats_print_one(struct stats_record *stats_rec) { __u64 packets, bytes; struct record *rec; int i, err; /* Print for each XDP actions stats */ for (i = 0; i < XDP_ACTION_MAX; i++) { char *fmt = " %-35s %'11lld pkts %'11lld KiB\n"; const char *action = action2str(i); rec = &stats_rec->stats[i]; packets = rec->total.rx_packets; bytes = rec->total.rx_bytes; if (rec->enabled) { err = printf(fmt, action, packets, bytes / 1024); if (err < 0) return err; } } return 0; } int stats_print(struct stats_record *stats_rec, struct stats_record *stats_prev) { struct record *rec, *prev; __u64 packets, bytes; struct timespec t; bool first = true; double period; double pps; /* packets per sec */ double bps; /* bits per sec */ int i, err; err = clock_gettime(CLOCK_REALTIME, &t); if (err < 0) { pr_warn("Error with gettimeofday! (%i)\n", err); return err; } /* Print for each XDP actions stats */ for (i = 0; i < XDP_ACTION_MAX; i++) { char *fmt = "%-12s %'11lld pkts (%'10.0f pps)" " %'11lld KiB (%'6.0f Mbits/s)\n"; const char *action = action2str(i); rec = &stats_rec->stats[i]; prev = &stats_prev->stats[i]; if (!rec->enabled) continue; packets = rec->total.rx_packets - prev->total.rx_packets; bytes = rec->total.rx_bytes - prev->total.rx_bytes; period = calc_period(rec, prev); if (period == 0) return 0; if (first) { printf("Period of %fs ending at %ld.%06ld\n", period, (long) t.tv_sec, (long) t.tv_nsec / 1000); first = false; } pps = packets / period; bps = (bytes * 8) / period / 1000000; printf(fmt, action, rec->total.rx_packets, pps, rec->total.rx_bytes / 1024, bps, period); } printf("\n"); return 0; } /* BPF_MAP_TYPE_ARRAY */ static int map_get_value_array(int fd, __u32 key, struct xdp_stats_record *value) { int err = 0; err = bpf_map_lookup_elem(fd, &key, value); if (err) pr_debug("bpf_map_lookup_elem failed key:0x%X\n", key); return err; } /* BPF_MAP_TYPE_PERCPU_ARRAY */ static int map_get_value_percpu_array(int fd, __u32 key, struct xdp_stats_record *value) { /* For percpu maps, userspace gets a value per possible CPU */ int nr_cpus = libbpf_num_possible_cpus(); struct xdp_stats_record *values; __u64 sum_bytes = 0; __u64 sum_pkts = 0; int i, err; if (nr_cpus < 0) return nr_cpus; values = calloc(nr_cpus, sizeof(*values)); if (!values) return -ENOMEM; err = bpf_map_lookup_elem(fd, &key, values); if (err) { pr_debug("bpf_map_lookup_elem failed key:0x%X\n", key); goto out; } /* Sum values from each CPU */ for (i = 0; i < nr_cpus; i++) { sum_pkts += values[i].rx_packets; sum_bytes += values[i].rx_bytes; } value->rx_packets = sum_pkts; value->rx_bytes = sum_bytes; out: free(values); return err; } static int map_collect(int fd, __u32 map_type, __u32 key, struct record *rec) { struct xdp_stats_record value = {}; int err; /* Get time as close as possible to reading map contents */ err = gettime(&rec->timestamp); if (err) return err; switch (map_type) { case BPF_MAP_TYPE_ARRAY: err = map_get_value_array(fd, key, &value); break; case BPF_MAP_TYPE_PERCPU_ARRAY: err = map_get_value_percpu_array(fd, key, &value); break; default: pr_warn("Unknown map_type: %u cannot handle\n", map_type); err = -EINVAL; break; } if (err) return err; rec->total.rx_packets = value.rx_packets; rec->total.rx_bytes = value.rx_bytes; return 0; } int stats_collect(int map_fd, __u32 map_type, struct stats_record *stats_rec) { /* Collect all XDP actions stats */ __u32 key; int err; for (key = 0; key < XDP_ACTION_MAX; key++) { if (!stats_rec->stats[key].enabled) continue; err = map_collect(map_fd, map_type, key, &stats_rec->stats[key]); if (err) return err; } return 0; } static int check_map_pin(__u32 map_id, const char *pin_dir, const char *map_name) { struct bpf_map_info info = {}; int fd, ret = 0; fd = get_pinned_map_fd(pin_dir, map_name, &info); if (fd < 0) { if (fd == -ENOENT) pr_warn("Stats map disappeared while polling\n"); else pr_warn("Unable to re-open stats map\n"); return fd; } if (info.id != map_id) { pr_warn("Stats map ID changed while polling\n"); ret = -EINVAL; } close(fd); return ret; } int stats_poll(int map_fd, int interval, bool *exit, const char *pin_dir, const char *map_name) { struct bpf_map_info info = {}; struct stats_record prev, record = { 0 }; __u32 info_len = sizeof(info); __u32 map_type, map_id; int err; record.stats[XDP_DROP].enabled = true; record.stats[XDP_PASS].enabled = true; record.stats[XDP_REDIRECT].enabled = true; record.stats[XDP_TX].enabled = true; if (!interval) return -EINVAL; err = bpf_obj_get_info_by_fd(map_fd, &info, &info_len); if (err) return -errno; map_type = info.type; map_id = info.id; /* Get initial reading quickly */ stats_collect(map_fd, map_type, &record); usleep(1000000 / 4); while (!*exit) { if (pin_dir) { err = check_map_pin(map_id, pin_dir, map_name); if (err) return err; } memset(&info, 0, sizeof(info)); prev = record; /* struct copy */ stats_collect(map_fd, map_type, &record); err = stats_print(&record, &prev); if (err) return err; usleep(interval * 1000); } return 0; } xdp-tools-1.5.4/lib/util/Makefile0000644000175100001660000000121015003640462016172 0ustar runnerdockerinclude util.mk LIB_DIR ?= .. include $(LIB_DIR)/defines.mk include $(LIBXDP_DIR)/libxdp.mk all: $(UTIL_OBJS) UTIL_SKEL_H = $(UTIL_BPF_OBJS:.bpf.o=.skel.h) $(UTIL_OBJS): %.o: %.c %.h $(UTIL_SKEL_H) $(LIBMK) $(QUIET_CC)$(CC) $(CFLAGS) $(CPPFLAGS) -Wall -I../../headers -c -o $@ $< clean: $(Q)rm -f $(UTIL_OBJS) $(UTIL_BPF_OBJS) $(UTIL_SKEL_H) *.ll BPF_CFLAGS += -I$(HEADER_DIR) $(ARCH_INCLUDES) $(UTIL_BPF_OBJS): %.o: %.c $(KERN_USER_H) $(BPF_HEADERS) $(LIBMK) $(QUIET_CLANG)$(CLANG) -target $(BPF_TARGET) $(BPF_CFLAGS) -O2 -c -g -o $@ $< $(UTIL_SKEL_H): %.skel.h: %.bpf.o $(QUIET_GEN)$(BPFTOOL) gen skeleton $< name ${@:.skel.h=} > $@ xdp-tools-1.5.4/lib/util/logging.c0000644000175100001660000000365615003640462016344 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include #include #include #include #include "logging.h" #include "util.h" static enum logging_print_level log_level = LOG_INFO; static int print_func(enum logging_print_level level, int indent, const char *format, va_list args) { int i; if (level > log_level) return 0; for (i = 0; i < indent; i++) fprintf(stderr, " "); return vfprintf(stderr, format, args); } static int libbpf_print_func(enum libbpf_print_level level, const char *format, va_list args) { return print_func(level + 1, 2, format, args); } static int libbpf_silent_func(__unused enum libbpf_print_level level, __unused const char *format, __unused va_list args) { return 0; } static int libxdp_print_func(enum libxdp_print_level level, const char *format, va_list args) { return print_func(level + 1, 1, format, args); } static int libxdp_silent_func(__unused enum libxdp_print_level level, __unused const char *format, __unused va_list args) { return 0; } #define __printf(a, b) __attribute__((format(printf, a, b))) __printf(2, 3) void logging_print(enum logging_print_level level, const char *format, ...) { va_list args; va_start(args, format); print_func(level, 0, format, args); va_end(args); } void init_lib_logging(void) { libbpf_set_print(libbpf_print_func); libxdp_set_print(libxdp_print_func); } void silence_libbpf_logging(void) { if (log_level < LOG_VERBOSE) libbpf_set_print(libbpf_silent_func); } void silence_libxdp_logging(void) { if (log_level < LOG_VERBOSE) libxdp_set_print(libxdp_silent_func); } enum logging_print_level set_log_level(enum logging_print_level level) { enum logging_print_level old_level = log_level; log_level = level; return old_level; } enum logging_print_level increase_log_level(void) { if (log_level < LOG_VERBOSE) log_level++; return log_level; } xdp-tools-1.5.4/lib/testing/0000755000175100001660000000000015003640462015240 5ustar runnerdockerxdp-tools-1.5.4/lib/testing/setup-netns-env.sh0000755000175100001660000000104515003640462020652 0ustar runnerdocker#!/bin/bash # SPDX-License-Identifier: GPL-2.0-or-later # # Script to setup things inside a test environment, used by testenv.sh for # executing commands. # # Author: Toke Høiland-Jørgensen (toke@redhat.com) # Date: 7 March 2019 # Copyright (c) 2019 Red Hat die() { echo "$1" >&2 exit 1 } [ -n "$TESTENV_NAME" ] || die "TESTENV_NAME missing from environment" [ -n "$1" ] || die "Usage: $0 " set -o nounset mount -t bpf bpf /sys/fs/bpf/ || die "Unable to mount /sys/fs/bpf inside test environment" exec "$@" xdp-tools-1.5.4/lib/testing/xdp_pass.c0000644000175100001660000000042315003640462017224 0ustar runnerdocker#include #include #include struct { __uint(priority, 10); __uint(XDP_PASS, 1); } XDP_RUN_CONFIG(xdp_pass); SEC("xdp") int xdp_pass(struct xdp_md *ctx) { return XDP_PASS; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/lib/testing/run_tests.sh0000755000175100001660000000060415003640462017625 0ustar runnerdocker#!/bin/bash TEST_PROG_DIR="${TEST_PROG_DIR:-$(dirname "${BASH_SOURCE[0]}")}" TESTS_DIR="${TESTS_DIR:-$TEST_PROG_DIR/tests}" TEST_RUNNER="$TEST_PROG_DIR/test_runner.sh" RET=0 echo "Running all tests from $TESTS_DIR" for f in "$TESTS_DIR"/*/test-*.sh; do if [[ ! -f "$f" ]]; then echo "No tests found!" exit 1 fi "$TEST_RUNNER" "$f" || RET=1 done exit $RET xdp-tools-1.5.4/lib/testing/test-tool.c0000644000175100001660000001444615003640462017347 0ustar runnerdocker#include #include #include #include #include #include #include #include #include #include #include #include "params.h" #include "logging.h" #include "util.h" #include "xdp_sample.h" #include "compat.h" #define PROG_NAME "test-tool" struct enum_val xdp_modes[] = { {"native", XDP_MODE_NATIVE}, {"skb", XDP_MODE_SKB}, {"hw", XDP_MODE_HW}, {"unspecified", XDP_MODE_UNSPEC}, {NULL, 0} }; static const struct loadopt { bool help; enum xdp_attach_mode mode; struct iface iface; char *filename; } defaults_load = { .mode = XDP_MODE_NATIVE }; static struct bpf_object *open_bpf_obj(const char *filename, struct bpf_object_open_opts *opts) { struct bpf_object *obj; int err; obj = bpf_object__open_file(filename, opts); err = libbpf_get_error(obj); if (err) { if (err == -ENOENT) pr_debug( "Couldn't load the eBPF program (libbpf said 'no such file').\n" "Maybe the program was compiled with a too old " "version of LLVM (need v9.0+)?\n"); return ERR_PTR(err); } return obj; } static int do_xdp_attach(int ifindex, int prog_fd, int old_fd, __u32 xdp_flags) { #ifdef HAVE_LIBBPF_BPF_XDP_ATTACH LIBBPF_OPTS(bpf_xdp_attach_opts, opts, .old_prog_fd = old_fd); return bpf_xdp_attach(ifindex, prog_fd, xdp_flags, &opts); #else DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = old_fd); return bpf_set_link_xdp_fd_opts(ifindex, prog_fd, xdp_flags, old_fd ? &opts : NULL); #endif } int do_load(const void *cfg, __unused const char *pin_root_path) { const struct loadopt *opt = cfg; struct bpf_program *bpf_prog; char errmsg[STRERR_BUFSIZE]; struct bpf_object *obj; int err = EXIT_SUCCESS; int xdp_flags; int prog_fd; silence_libbpf_logging(); retry: obj = open_bpf_obj(opt->filename, NULL); if (IS_ERR(obj)) { err = PTR_ERR(obj); if (err == -EPERM && !double_rlimit()) goto retry; libxdp_strerror(err, errmsg, sizeof(errmsg)); pr_warn("ERROR: Couldn't open file '%s': %s\n", opt->filename, errmsg); goto out; } err = bpf_object__load(obj); if (err) { if (err == -EPERM && !double_rlimit()) { bpf_object__close(obj); goto retry; } libbpf_strerror(err, errmsg, sizeof(errmsg)); pr_warn("ERROR: Can't load eBPF object: %s(%d)\n", errmsg, err); goto out; } bpf_prog = bpf_object__next_program(obj, NULL); if (!bpf_prog) { pr_warn("ERROR: Couldn't find xdp program in bpf object!\n"); err = -ENOENT; goto out; } prog_fd = bpf_program__fd(bpf_prog); if (prog_fd < 0) { err = prog_fd; libxdp_strerror(err, errmsg, sizeof(errmsg)); pr_warn("ERROR: Couldn't find xdp program's file descriptor: %s\n", errmsg); goto out; } xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; switch (opt->mode) { case XDP_MODE_SKB: xdp_flags |= XDP_FLAGS_SKB_MODE; break; case XDP_MODE_NATIVE: xdp_flags |= XDP_FLAGS_DRV_MODE; break; case XDP_MODE_HW: xdp_flags |= XDP_FLAGS_HW_MODE; break; case XDP_MODE_UNSPEC: break; } err = do_xdp_attach(opt->iface.ifindex, prog_fd, 0, xdp_flags); if (err < 0) { pr_info("ERROR: Failed attaching XDP program to ifindex %d: %s\n", opt->iface.ifindex, strerror(-err)); switch (-err) { case EBUSY: case EEXIST: pr_info("XDP already loaded on device.\n"); break; case EOPNOTSUPP: pr_info("XDP mode not supported; try using SKB mode.\n"); break; default: break; } goto out; } out: return err; } static struct prog_option load_options[] = { DEFINE_OPTION("mode", OPT_ENUM, struct loadopt, mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load XDP program in ; default native"), DEFINE_OPTION("dev", OPT_IFNAME, struct loadopt, iface, .positional = true, .metavar = "", .required = true, .help = "Load on device "), DEFINE_OPTION("filename", OPT_STRING, struct loadopt, filename, .positional = true, .metavar = "", .required = true, .help = "Load program from "), END_OPTIONS }; enum probe_action { PROBE_CPUMAP_PROGRAM, PROBE_XDP_LOAD_BYTES, }; struct enum_val probe_actions[] = { {"cpumap-prog", PROBE_CPUMAP_PROGRAM}, {"xdp-load-bytes", PROBE_XDP_LOAD_BYTES}, {NULL, 0} }; static const struct probeopt { enum probe_action action; } defaults_probe = {}; int do_probe(const void *cfg, __unused const char *pin_root_path) { const struct probeopt *opt = cfg; bool res = false; switch (opt->action) { case PROBE_CPUMAP_PROGRAM: #ifdef HAVE_BPFTOOL res = sample_probe_cpumap_compat(); #endif break; case PROBE_XDP_LOAD_BYTES: #ifdef HAVE_BPFTOOL res = sample_probe_xdp_load_bytes(); #endif break; default: return EXIT_FAILURE; } pr_debug("Probing for %s: %s\n", probe_actions[opt->action].name, res ? "Supported" : "Unsupported"); return res ? EXIT_SUCCESS : EXIT_FAILURE; } static struct prog_option probe_options[] = { DEFINE_OPTION("action", OPT_ENUM, struct probeopt, action, .positional = true, .metavar = "", .required = true, .typearg = probe_actions, .help = "Probe for "), END_OPTIONS }; int do_help(__unused const void *cfg, __unused const char *pin_root_path) { fprintf(stderr, "Usage: test-tool COMMAND [options]\n" "\n" "COMMAND can be one of:\n" " load - load an XDP program on an interface\n" " probe - probe for kernel features\n" " help - show this help message\n" "\n" "Use 'test-tool COMMAND --help' to see options for each command\n"); return -1; } static const struct prog_command cmds[] = { DEFINE_COMMAND(load, "Load an XDP program on an interface"), DEFINE_COMMAND(probe, "Probe for kernel features"), { .name = "help", .func = do_help, .no_cfg = true }, END_COMMANDS }; union all_opts { struct loadopt load; struct probeopt probe; }; int main(int argc, char **argv) { if (argc > 1) return dispatch_commands(argv[1], argc - 1, argv + 1, cmds, sizeof(union all_opts), PROG_NAME, false); return do_help(NULL, NULL); } xdp-tools-1.5.4/lib/testing/xdp_drop.c0000644000175100001660000000024315003640462017222 0ustar runnerdocker#include #include SEC("xdp") int xdp_drop(struct xdp_md *ctx) { return XDP_DROP; } char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/lib/testing/.gitignore0000644000175100001660000000001215003640462017221 0ustar runnerdockertest-tool xdp-tools-1.5.4/lib/testing/test_long_func_name.c0000644000175100001660000000122315003640462021413 0ustar runnerdocker#include #include #include #define bpf_debug(fmt, ...) \ { \ char __fmt[] = fmt; \ bpf_trace_printk(__fmt, sizeof(__fmt), \ ##__VA_ARGS__); \ } SEC("xdp") int xdp_test_prog_with_a_long_name(struct xdp_md *ctx) { bpf_debug("PASS[1]: prog %u\n", ctx->ingress_ifindex); return XDP_PASS; } SEC("xdp") int xdp_test_prog_with_a_long_name_too(struct xdp_md *ctx) { bpf_debug("PASS[2]: prog %u\n", ctx->ingress_ifindex); return XDP_PASS; } struct { __uint(priority, 5); __uint(XDP_PASS, 1); } XDP_RUN_CONFIG(xdp_test_prog_with_a_long_name); char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/lib/testing/test_config.install.sh0000644000175100001660000000041715003640462021547 0ustar runnerdocker# Test config for having tools in $PATH - to be installed along with the # test runners in /usr/share/xdp-tools XDPDUMP=xdpdump XDP_BENCH=xdp-bench XDP_FILTER=xdp-filter XDP_FORWARD=xdp-forward XDP_LOADER=xdp-loader XDP_MONITOR=xdp-monitor XDP_TRAFFICGEN=xdp-trafficgen xdp-tools-1.5.4/lib/testing/Makefile0000644000175100001660000000070515003640462016702 0ustar runnerdocker# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) TEST_TARGETS := test-tool XDP_TARGETS := test_long_func_name xdp_drop xdp_pass SCRIPTS_FILES := test_runner.sh setup-netns-env.sh run_tests.sh XDP_OBJ_INSTALL := LIB_DIR = .. include $(LIB_DIR)/common.mk install_local:: install -m 0755 -d $(DESTDIR)$(SCRIPTSDIR) install -m 0644 test_config.install.sh $(DESTDIR)$(SCRIPTSDIR)/test_config.sh install -m 0644 $(XDP_OBJ) $(DESTDIR)$(SCRIPTSDIR)/ xdp-tools-1.5.4/lib/testing/test_runner.sh0000755000175100001660000002665415003640462020164 0ustar runnerdocker#!/bin/bash # SPDX-License-Identifier: GPL-2.0-or-later # # Script to setup and manage tests for xdp-tools. # Based on the test-env script from xdp-tutorial. # # Author: Toke Høiland-Jørgensen (toke@redhat.com) # Date: 26 May 2020 # Copyright (c) 2020 Red Hat set -o nounset umask 077 TEST_PROG_DIR="${TEST_PROG_DIR:-$(dirname "${BASH_SOURCE[0]}")}" SETUP_SCRIPT="$TEST_PROG_DIR/setup-netns-env.sh" TEST_CONFIG="$TEST_PROG_DIR/test_config.sh" IP6_SUBNET=fc42:dead:cafe # must have exactly three :-separated elements IP6_PREFIX_SIZE=64 # Size of assigned prefixes IP6_FULL_PREFIX_SIZE=48 # Size of IP6_SUBNET IP4_SUBNET=10.11 IP4_PREFIX_SIZE=24 # Size of assigned prefixes IP4_FULL_PREFIX_SIZE=16 # Size of IP4_SUBNET GENERATED_NAME_PREFIX="xdptest" ALL_TESTS="" VERBOSE_TESTS=${V:-0} NUM_NS=2 NEEDED_TOOLS="capinfos ethtool ip ping sed tc tcpdump timeout tshark nft socat" if [ -f "$TEST_CONFIG" ]; then source "$TEST_CONFIG" fi if command -v ping6 >/dev/null 2>&1; then PING6=ping6 else PING6=ping fi # Odd return value for skipping, as only 0-255 is valid. SKIPPED_TEST=249 # Global state variables that will be set by options etc below STATEDIR= CMD= NS= NS_NAMES=() IP6_PREFIX= IP4_PREFIX= INSIDE_IP6= INSIDE_IP4= INSIDE_MAC= OUTSIDE_IP6= OUTSIDE_IP4= OUTSIDE_MAC= ALL_INSIDE_IP6=() ALL_INSIDE_IP4=() is_trace_attach_supported() { if [[ -z "${TRACE_ATTACH_SUPPORT:-}" ]]; then [ -f "$STATEDIR/trace_attach_support" ] && \ TRACE_ATTACH_SUPPORT=$(< "$STATEDIR/trace_attach_support") if [[ -z "${TRACE_ATTACH_SUPPORT:-}" ]]; then RESULT=$($XDP_LOADER load -v "$NS" "$TEST_PROG_DIR/xdp_pass.o" 2>&1) PID=$(start_background "$XDPDUMP -i $NS") RESULT=$(stop_background "$PID") if [[ "$RESULT" == *"The kernel does not support fentry function attach"* ]]; then TRACE_ATTACH_SUPPORT="false" else TRACE_ATTACH_SUPPORT="true" fi echo "$TRACE_ATTACH_SUPPORT" > "$STATEDIR/trace_attach_support" $XDP_LOADER unload "$NS" --all fi fi if [[ "$TRACE_ATTACH_SUPPORT" == "true" ]]; then return 0 else return 1 fi } is_multiprog_supported() { if [[ -z "${MULTIPROG_SUPPORT:-}" ]]; then RESULT=$($XDP_LOADER load -v "$NS" "$TEST_PROG_DIR/xdp_pass.o" 2>&1) if [[ "$RESULT" == *"Compatibility check for dispatcher program failed"* ]]; then MULTIPROG_SUPPORT="false" else MULTIPROG_SUPPORT="true" fi $XDP_LOADER unload "$NS" --all fi if [[ "$MULTIPROG_SUPPORT" == "true" ]]; then return 0 else return 1 fi } is_progmap_supported() { if [[ -z "${PROGMAP_SUPPORT:-}" ]]; then RESULT=$(timeout -s INT 1 $XDP_BENCH redirect-cpu "$NS" -c 0 -r drop -vv 2>&1) if [[ "$RESULT" == *"Create CPU entry failed: Cannot allocate memory"* ]]; then PROGMAP_SUPPORT="false" else PROGMAP_SUPPORT="true" fi fi if [[ "$PROGMAP_SUPPORT" == "true" ]]; then return 0 else return 1 fi } skip_if_missing_veth_rxq() { if ! ethtool -l $NS >/dev/null 2>&1; then exit "$SKIPPED_TEST" fi } skip_if_missing_cpumap_attach() { if ! $TEST_PROG_DIR/test-tool probe cpumap-prog; then exit "$SKIPPED_TEST" fi } skip_if_missing_xdp_load_bytes() { if ! $TEST_PROG_DIR/test-tool probe xdp-load-bytes; then exit "$SKIPPED_TEST" fi } skip_if_missing_kernel_symbol() { if ! grep -q "$1" /proc/kallsyms; then exit "$SKIPPED_TEST" fi } skip_if_legacy_fallback() { if ! is_multiprog_supported; then exit "$SKIPPED_TEST" fi } skip_if_missing_trace_attach() { if ! is_trace_attach_supported; then exit "$SKIPPED_TEST" fi } die() { echo "$1" >&2 exit 1 } start_background() { local TMP_FILE="${STATEDIR}/tmp_proc_$$_$RANDOM" setsid bash -c "$*" &> ${TMP_FILE} & local PID=$! sleep 2 # Wait to make sure the command is executed in the background mv "$TMP_FILE" "${STATEDIR}/proc/${PID}" >& /dev/null echo "$PID" } start_background_no_stderr() { local TMP_FILE="${STATEDIR}/tmp_proc_$$_$RANDOM" setsid bash -c "$*" 1> ${TMP_FILE} 2>/dev/null & local PID=$! sleep 2 # Wait to make sure the command is executed in the background mv "$TMP_FILE" "${STATEDIR}/proc/${PID}" >& /dev/null echo "$PID" } start_background_ns_devnull() { local TMP_FILE="${STATEDIR}/tmp_proc_$$_$RANDOM" setsid ip netns exec "$NS" env TESTENV_NAME="$NS" "$SETUP_SCRIPT" bash -c "$*" 1>/dev/null 2>${TMP_FILE} & local PID=$! sleep 2 # Wait to make sure the command is executed in the background mv "$TMP_FILE" "${STATEDIR}/proc/${PID}" >& /dev/null echo $PID } stop_background() { local PID=$1 local OUTPUT_FILE="${STATEDIR}/proc/${PID}" if kill -SIGINT "-$PID" 2>/dev/null; then sleep 2 # Wait to make sure the buffer is flushed after the shutdown kill -SIGTERM "-$PID" 2>/dev/null && sleep 1 # just in case SIGINT was not enough fi if [ -f "$OUTPUT_FILE" ]; then cat "$OUTPUT_FILE" rm "$OUTPUT_FILE" >& /dev/null fi } check_prereq() { local max_locked_mem=$(ulimit -l) for t in $NEEDED_TOOLS; do command -v "$t" > /dev/null || die "Missing required tool: $t" done if [ "$EUID" -ne "0" ]; then die "This script needs root permissions to run." fi STATEDIR="$(mktemp -d --tmpdir=${TMPDIR:-/tmp} --suffix=.xdptest)" if [ $? -ne 0 ]; then die "Unable to create state dir in $TMPDIR" fi mkdir ${STATEDIR}/proc if [ "$max_locked_mem" != "unlimited" ]; then ulimit -l unlimited || die "Unable to set ulimit" fi mount -t bpf bpf /sys/fs/bpf/ || die "Unable to mount bpffs" } gen_nsname() { local nsname while nsname=$(printf "%s-%04x" "$GENERATED_NAME_PREFIX" $RANDOM) [ -e "$STATEDIR/${nsname}.ns" ] do true; done touch "$STATEDIR/${nsname}.ns" echo $nsname } iface_macaddr() { local iface="$1" ip -br link show dev "$iface" | awk '{print $3}' } set_sysctls() { local iface="$1" local in_ns="${2:-}" local nscmd= [ -n "$in_ns" ] && nscmd="ip netns exec $in_ns" local sysctls_off_v6=(accept_dad accept_ra mldv1_unsolicited_report_interval mldv2_unsolicited_report_interval) local sysctls_on=(forwarding) for s in ${sysctls_off_v6[*]}; do $nscmd sysctl -w net.ipv6.conf.$iface.${s}=0 >/dev/null done for s in ${sysctls_on[*]}; do $nscmd sysctl -w net.ipv6.conf.$iface.${s}=1 >/dev/null $nscmd sysctl -w net.ipv6.conf.all.${s}=1 >/dev/null $nscmd sysctl -w net.ipv4.conf.$iface.${s}=1 >/dev/null $nscmd sysctl -w net.ipv4.conf.all.${s}=1 >/dev/null done } init_ns() { local nsname=$1 local num=$2 local peername="testl-ve-$num" IP6_PREFIX="${IP6_SUBNET}:${num}::" IP4_PREFIX="${IP4_SUBNET}.$((0x$num))." INSIDE_IP6="${IP6_PREFIX}2" INSIDE_IP4="${IP4_PREFIX}2" OUTSIDE_IP6="${IP6_PREFIX}1" OUTSIDE_IP4="${IP4_PREFIX}1" ip netns add "$nsname" ip link add dev "$nsname" type veth peer name "$peername" set_sysctls $nsname ethtool -K "$nsname" rxvlan off txvlan off gro on ethtool -K "$peername" rxvlan off txvlan off gro on OUTSIDE_MAC=$(iface_macaddr "$nsname") INSIDE_MAC=$(iface_macaddr "$peername") ip link set dev "$peername" netns "$nsname" ip link set dev "$nsname" up ip addr add dev "$nsname" "${OUTSIDE_IP6}/${IP6_PREFIX_SIZE}" ip -n "$nsname" link set dev "$peername" name veth0 ip -n "$nsname" link set dev lo up ip -n "$nsname" link set dev veth0 up set_sysctls veth0 "$nsname" ip -n "$nsname" addr add dev veth0 "${INSIDE_IP6}/${IP6_PREFIX_SIZE}" # Prevent neighbour queries on the link ip neigh add "$INSIDE_IP6" lladdr "$INSIDE_MAC" dev "$nsname" nud permanent ip -n "$nsname" neigh add "$OUTSIDE_IP6" lladdr "$OUTSIDE_MAC" dev veth0 nud permanent ip addr add dev "$nsname" "${OUTSIDE_IP4}/${IP4_PREFIX_SIZE}" ip -n "$nsname" addr add dev veth0 "${INSIDE_IP4}/${IP4_PREFIX_SIZE}" ip neigh add "$INSIDE_IP4" lladdr "$INSIDE_MAC" dev "$nsname" nud permanent ip -n "$nsname" neigh add "$OUTSIDE_IP4" lladdr "$OUTSIDE_MAC" dev veth0 nud permanent # Add default routes inside the ns ip -n "$nsname" route add default via $OUTSIDE_IP4 dev veth0 ip -n "$nsname" -6 route add default via $OUTSIDE_IP6 dev veth0 ALL_INSIDE_IP4+=($INSIDE_IP4) ALL_INSIDE_IP6+=($INSIDE_IP6) } setup() { local nsname set -o errexit check_prereq for i in $(seq $NUM_NS); do nsname=$(gen_nsname) init_ns $nsname $i NS_NAMES+=($nsname) done set +o errexit NS=$nsname } teardown_ns() { local nsname=$1 ip link del dev "$nsname" ip netns del "$nsname" [ -d "/sys/fs/bpf/$nsname" ] && rmdir "/sys/fs/bpf/$nsname" || true } teardown() { for ns in "${NS_NAMES[@]}"; do teardown_ns $ns done for f in ${STATEDIR}/proc/*; do if [ -f "$f" ]; then local pid="${f/${STATEDIR}\/proc\//}" stop_background "$pid" &> /dev/null || true fi done rm -rf "$STATEDIR" } ns_exec() { ip netns exec "$NS" env TESTENV_NAME="$NS" "$SETUP_SCRIPT" "$@" } is_func() { type "$1" 2>/dev/null | grep -q 'is a function' } check_run() { local ret "$@" ret=$? echo "Command '$@' exited with status $ret" echo "" if [ "$ret" -ne "0" ]; then exit $ret fi } exec_test() { local testn="$1" local output local ret printf " %-30s" "[$testn]" if ! is_func "$testn"; then echo "INVALID" return 1 fi output=$($testn 2>&1) ret=$? if [ "$ret" -eq "0" ]; then echo "PASS" elif [ "$ret" -eq "$SKIPPED_TEST" ]; then echo "SKIPPED" ret=0 else echo "FAIL" fi if [ "$ret" -ne "0" ] || [ "$VERBOSE_TESTS" -eq "1" ]; then echo "$output" | sed 's/^/ /' echo " Test $testn exited with return code: $ret" fi return $ret } run_tests() { local TESTS="$*" local ret=0 [ -z "$TESTS" ] && TESTS="$ALL_TESTS" echo " Running tests from $TEST_DEFINITIONS" for testn in $TESTS; do exec_test $testn || ret=1 if is_func cleanup_tests; then cleanup_tests || true fi done return $ret } usage() { echo "Usage: $0 [test names]" >&2 exit 1 } if [ "$EUID" -ne "0" ]; then if command -v sudo >/dev/null 2>&1; then exec sudo env V=${VERBOSE_TESTS} DEBUG_TESTENV=${DEBUG_TESTENV:-0} "$0" "$@" else die "Tests should be run as root" fi else if [ "${DID_UNSHARE:-0}" -ne "1" ]; then echo " Executing tests in separate net- and mount namespaces" >&2 exec env DID_UNSHARE=1 unshare -n -m "$0" "$@" fi fi export XDPDUMP export XDP_BENCH export XDP_FILTER export XDP_FORWARD export XDP_LOADER export XDP_MONITOR export XDP_TRAFFICGEN TEST_DEFINITIONS="${1:-}" [ -f "$TEST_DEFINITIONS" ] || usage source "$TEST_DEFINITIONS" TOOL_TESTS_DIR="$(dirname "$TEST_DEFINITIONS")" shift trap teardown EXIT setup if [ "${DEBUG_TESTENV:-0}" -eq "1" ] && [ -n "$SHELL" ]; then echo "Entering interactive testenv debug - Ctrl-D to exit and resume test execution" $SHELL fi run_tests "$@" xdp-tools-1.5.4/lib/export-man.el0000644000175100001660000000570115003640462016202 0ustar runnerdocker;;; export-man.el -- Export man page and filter result ;;; Commentary: ;;; ;;; Exports a man page and filters the result so we can exclude parts of the man ;;; page based on features enabled in the build system. ;;; ;;; The export-man-page function is called from common.mk with --eval ;;; Code: (require 'ox-man) (require 'parse-time) (defvar feature-exclude-tags '(("LIBBPF_PERF_BUFFER__CONSUME" . "feat_perfbuf")) "Mapping of feature strings to exclude tags for man page export.") (defvar feature-exclude-regexes '(("LIBBPF_PERF_BUFFER__CONSUME" . "--perf-wakeup")) "Mapping of feature strings to regexes to filter form export man page.") (defun get-feature-values (enabled-feats exclude-list) "Get feature-tag values for ENABLED-FEATS based on EXCLUDE-LIST." (delq nil (mapcar #'(lambda (f) (unless (member (car f) enabled-feats) (cdr f))) exclude-list))) (defun replace-regexp-in-buffer (regexp replace) "Replace REGEXP with REPLACE in buffer." (let ((case-fold-search nil)) (goto-char 0) (when (re-search-forward regexp nil t) (replace-match replace)))) (defun open-file (filename) "Find file FILENAME but complain if it doesn't exist." (if (file-exists-p filename) (find-file filename) (error "File not found: %s" filename))) (defun get-file-mod-time (filename) (let* ((file-modtime (file-attribute-modification-time (file-attributes filename))) (git-logtime (ignore-errors (shell-command-to-string (format "git log -1 --pretty='format:%%cI' -- %s" filename)))) (git-modtime (ignore-errors (parse-iso8601-time-string git-logtime)))) (or git-modtime file-modtime))) (defun filter-post-export (file feat-list version modtime) "Post-process exported FILE based on features in FEAT-LIST and VERSION." (let ((exclude-regexes (get-feature-values feat-list feature-exclude-regexes)) (date (format-time-string "%B %_d, %Y" modtime)) (make-backup-files nil)) (with-current-buffer (open-file file) (mapc #'(lambda (r) (delete-matching-lines r)) exclude-regexes) (replace-regexp-in-buffer "DATE" date) (replace-regexp-in-buffer "VERSION" version) (replace-regexp-in-buffer "^.SH \"\\([^\"]+\\) - \\([^\"]+\\)\"" ".SH \"NAME\"\n\\1 \\\\- \\2\n.SH \"SYNOPSIS\"") (delete-trailing-whitespace) (save-buffer)))) (defun export-man-page (outfile infile enabled-features version) "Export man page from INFILE into OUTFILE with ENABLED-FEATURES and VERSION." (let* ((feat-list (split-string enabled-features)) (org-export-exclude-tags (get-feature-values feat-list feature-exclude-tags)) (modtime (get-file-mod-time infile))) (with-current-buffer (open-file infile) (org-export-to-file 'man outfile) (filter-post-export outfile feat-list version modtime)))) (provide 'export-man) ;;; export-man.el ends here xdp-tools-1.5.4/lib/defines.mk0000644000175100001660000000265715003640462015543 0ustar runnerdockerCFLAGS ?= -O2 -g BPF_CFLAGS ?= -Wall -Wno-unused-value -Wno-pointer-sign \ -Wno-compare-distinct-pointer-types \ -Wno-visibility -Werror -fno-stack-protector BPF_TARGET ?= bpf HAVE_FEATURES := include $(LIB_DIR)/../config.mk include $(LIB_DIR)/../version.mk PREFIX?=/usr/local LIBDIR?=$(PREFIX)/lib SBINDIR?=$(PREFIX)/sbin HDRDIR?=$(PREFIX)/include/xdp DATADIR?=$(PREFIX)/share RUNDIR?=/run MANDIR?=$(DATADIR)/man SCRIPTSDIR?=$(DATADIR)/xdp-tools BPF_DIR_MNT ?=/sys/fs/bpf BPF_OBJECT_DIR ?=$(LIBDIR)/bpf MAX_DISPATCHER_ACTIONS ?=10 HEADER_DIR = $(LIB_DIR)/../headers TEST_DIR = $(LIB_DIR)/testing LIBXDP_DIR := $(LIB_DIR)/libxdp LIBBPF_DIR := $(LIB_DIR)/libbpf DEFINES := -DBPF_DIR_MNT=\"$(BPF_DIR_MNT)\" -DBPF_OBJECT_PATH=\"$(BPF_OBJECT_DIR)\" \ -DMAX_DISPATCHER_ACTIONS=$(MAX_DISPATCHER_ACTIONS) -DTOOLS_VERSION=\"$(TOOLS_VERSION)\" \ -DLIBBPF_VERSION=\"$(LIBBPF_VERSION)\" -DRUNDIR=\"$(RUNDIR)\" DEFINES += $(foreach feat,$(HAVE_FEATURES),-DHAVE_$(feat)) ifneq ($(PRODUCTION),1) DEFINES += -DDEBUG endif ifeq ($(SYSTEM_LIBBPF),y) DEFINES += -DLIBBPF_DYNAMIC endif DEFINES += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 CFLAGS += -std=gnu11 -Wextra -Werror $(DEFINES) $(ARCH_INCLUDES) BPF_CFLAGS += $(DEFINES) $(filter -ffile-prefix-map=%,$(CFLAGS)) $(filter -I%,$(CFLAGS)) $(ARCH_INCLUDES) CONFIGMK := $(LIB_DIR)/../config.mk LIBMK := Makefile $(CONFIGMK) $(LIB_DIR)/defines.mk $(LIB_DIR)/common.mk $(LIB_DIR)/../version.mk xdp-tools-1.5.4/lib/common.mk0000644000175100001660000001046215003640462015407 0ustar runnerdocker# Common Makefile parts for BPF-building with libbpf # -------------------------------------------------- # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) # # This file should be included from your Makefile like: # LIB_DIR = ../lib/ # include $(LIB_DIR)/common.mk # # It is expected that you define the variables: # XDP_TARGETS and USER_TARGETS # as a space-separated list # XDP_C = ${XDP_TARGETS:=.c} XDP_OBJ = ${XDP_C:.c=.o} BPF_SKEL_OBJ = ${BPF_SKEL_TARGETS:=.o} BPF_SKEL_H = ${BPF_SKEL_OBJ:.bpf.o=.skel.h} USER_C := ${USER_TARGETS:=.c} USER_OBJ := ${USER_C:.c=.o} TEST_C := ${TEST_TARGETS:=.c} TEST_OBJ := ${TEST_C:.c=.o} XDP_OBJ_INSTALL ?= $(XDP_OBJ) MAN_FILES := $(MAN_PAGE) # Expect this is defined by including Makefile, but define if not LIB_DIR ?= ../lib LDLIBS ?= $(USER_LIBS) LDLIBS += -lm include $(LIB_DIR)/defines.mk include $(LIB_DIR)/libxdp/libxdp.mk # get list of objects in util include $(LIB_DIR)/util/util.mk # Extend if including Makefile already added some LIB_OBJS += $(foreach obj,$(UTIL_OBJS),$(LIB_DIR)/util/$(obj)) EXTRA_DEPS += EXTRA_USER_DEPS += LDFLAGS+=-L$(LIBXDP_DIR) ifeq ($(DYNAMIC_LIBXDP),1) LDLIBS:=-lxdp $(LDLIBS) OBJECT_LIBXDP:=$(LIBXDP_DIR)/libxdp.so.$(LIBXDP_VERSION) else LDLIBS:=-l:libxdp.a $(LDLIBS) OBJECT_LIBXDP:=$(LIBXDP_DIR)/libxdp.a endif # Detect submodule libbpf source file changes ifeq ($(SYSTEM_LIBBPF),n) LIBBPF_SOURCES := $(wildcard $(LIBBPF_DIR)/src/*.[ch]) endif LIBXDP_SOURCES := $(wildcard $(LIBXDP_DIR)/*.[ch] $(LIBXDP_DIR)/*.in) # BPF-prog kern and userspace shares struct via header file: KERN_USER_H ?= $(wildcard common_kern_user.h) CFLAGS += -I$(HEADER_DIR) -I$(LIB_DIR)/util $(ARCH_INCLUDES) BPF_CFLAGS += -I$(HEADER_DIR) $(ARCH_INCLUDES) BPF_HEADERS := $(wildcard $(HEADER_DIR)/bpf/*.h) $(wildcard $(HEADER_DIR)/xdp/*.h) all: $(USER_TARGETS) $(XDP_OBJ) $(EXTRA_TARGETS) $(TEST_TARGETS) man .PHONY: clean clean:: $(Q)rm -f $(USER_TARGETS) $(XDP_OBJ) $(TEST_TARGETS) $(USER_OBJ) $(TEST_OBJ) $(USER_GEN) $(BPF_SKEL_H) *.ll .PHONY: install install: all install_local install -m 0755 -d $(DESTDIR)$(SBINDIR) install -m 0755 -d $(DESTDIR)$(BPF_OBJECT_DIR) $(if $(USER_TARGETS),install -m 0755 $(USER_TARGETS) $(DESTDIR)$(SBINDIR)) $(if $(XDP_OBJ_INSTALL),install -m 0644 $(XDP_OBJ_INSTALL) $(DESTDIR)$(BPF_OBJECT_DIR)) $(if $(MAN_FILES),install -m 0755 -d $(DESTDIR)$(MANDIR)/man8) $(if $(MAN_FILES),install -m 0644 $(MAN_FILES) $(DESTDIR)$(MANDIR)/man8) $(if $(SCRIPTS_FILES),install -m 0755 -d $(DESTDIR)$(SCRIPTSDIR)) $(if $(SCRIPTS_FILES),install -m 0755 $(SCRIPTS_FILES) $(DESTDIR)$(SCRIPTSDIR)) $(if $(TEST_FILE),install -m 0755 -d $(DESTDIR)$(SCRIPTSDIR)/tests/$(TOOL_NAME)) $(if $(TEST_FILE),install -m 0644 $(TEST_FILE) $(DESTDIR)$(SCRIPTSDIR)/tests/$(TOOL_NAME)) $(if $(TEST_FILE_DEPS),install -m 0644 $(TEST_FILE_DEPS) $(DESTDIR)$(SCRIPTSDIR)/tests/$(TOOL_NAME)) $(if $(TEST_TARGETS),install -m 0755 $(TEST_TARGETS) $(DESTDIR)$(SCRIPTSDIR)) .PHONY: install_local install_local:: $(OBJECT_LIBBPF): $(LIBBPF_SOURCES) $(Q)$(MAKE) -C $(LIB_DIR) libbpf $(OBJECT_LIBXDP): $(LIBXDP_SOURCES) $(Q)$(MAKE) -C $(LIBXDP_DIR) $(CONFIGMK): $(Q)$(MAKE) -C $(LIB_DIR)/.. config.mk # Create expansions for dependencies LIB_H := ${LIB_OBJS:.o=.h} # Detect if any of common obj changed and create dependency on .h-files $(LIB_OBJS): %.o: %.c %.h $(LIB_H) $(Q)$(MAKE) -C $(dir $@) $(notdir $@) ALL_EXEC_TARGETS=$(USER_TARGETS) $(TEST_TARGETS) $(ALL_EXEC_TARGETS): %: %.c $(OBJECT_LIBBPF) $(OBJECT_LIBXDP) $(LIBMK) $(LIB_OBJS) $(KERN_USER_H) $(EXTRA_DEPS) $(EXTRA_USER_DEPS) $(BPF_SKEL_H) $(USER_EXTRA_C) $(QUIET_CC)$(CC) -Wall $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -o $@ $(LIB_OBJS) \ $< $(USER_EXTRA_C) $(LDLIBS) $(XDP_OBJ): %.o: %.c $(KERN_USER_H) $(EXTRA_DEPS) $(BPF_HEADERS) $(LIBMK) $(QUIET_CLANG)$(CLANG) -target $(BPF_TARGET) $(BPF_CFLAGS) -O2 -c -g -o $@ $< $(BPF_SKEL_H): %.skel.h: %.bpf.o $(QUIET_GEN)$(BPFTOOL) gen skeleton $< name $(notdir ${@:.skel.h=}) > $@ .PHONY: man ifeq ($(EMACS),) man: ; else man: $(MAN_PAGE) $(MAN_PAGE): README.org $(LIBMK) $(LIB_DIR)/export-man.el $(QUIET_GEN)$(EMACS) -Q --batch --load "$(LIB_DIR)/export-man.el" \ --eval "(export-man-page \"$@\" \"$<\" \"$(HAVE_FEATURES)\" \"v$(TOOLS_VERSION)\")" endif .PHONY: test ifeq ($(TEST_FILE),) test: @echo " No tests defined" else test: all $(Q)$(TEST_DIR)/test_runner.sh $(TEST_FILE) $(TESTS) endif xdp-tools-1.5.4/lib/libbpf/0000755000175100001660000000000014706536574015042 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/ci/0000755000175100001660000000000014706536574015435 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/ci/vmtest/0000755000175100001660000000000014706536574016757 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/ci/vmtest/run_selftests.sh0000755000175100001660000000436714706536574022230 0ustar runnerdocker#!/bin/bash set -euo pipefail source $(cd $(dirname $0) && pwd)/helpers.sh ARCH=$(uname -m) STATUS_FILE=/exitstatus read_lists() { (for path in "$@"; do if [[ -s "$path" ]]; then cat "$path" fi; done) | cut -d'#' -f1 | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | tr -s '\n' ',' } test_progs() { if [[ "${KERNEL}" != '4.9.0' ]]; then foldable start test_progs "Testing test_progs" # "&& true" does not change the return code (it is not executed # if the Python script fails), but it prevents exiting on a # failure due to the "set -e". ./test_progs ${DENYLIST:+-d"$DENYLIST"} ${ALLOWLIST:+-a"$ALLOWLIST"} && true echo "test_progs:$?" >> "${STATUS_FILE}" foldable end test_progs fi } test_progs_no_alu32() { foldable start test_progs-no_alu32 "Testing test_progs-no_alu32" ./test_progs-no_alu32 ${DENYLIST:+-d"$DENYLIST"} ${ALLOWLIST:+-a"$ALLOWLIST"} && true echo "test_progs-no_alu32:$?" >> "${STATUS_FILE}" foldable end test_progs-no_alu32 } test_maps() { if [[ "${KERNEL}" == 'latest' ]]; then foldable start test_maps "Testing test_maps" ./test_maps && true echo "test_maps:$?" >> "${STATUS_FILE}" foldable end test_maps fi } test_verifier() { if [[ "${KERNEL}" == 'latest' ]]; then foldable start test_verifier "Testing test_verifier" ./test_verifier && true echo "test_verifier:$?" >> "${STATUS_FILE}" foldable end test_verifier fi } foldable end vm_init foldable start kernel_config "Kconfig" zcat /proc/config.gz foldable end kernel_config configs_path=/${PROJECT_NAME}/selftests/bpf local_configs_path=${PROJECT_NAME}/vmtest/configs DENYLIST=$(read_lists \ "$configs_path/DENYLIST" \ "$configs_path/DENYLIST.${ARCH}" \ "$local_configs_path/DENYLIST" \ "$local_configs_path/DENYLIST-${KERNEL}" \ "$local_configs_path/DENYLIST-${KERNEL}.${ARCH}" \ ) ALLOWLIST=$(read_lists \ "$configs_path/ALLOWLIST" \ "$configs_path/ALLOWLIST.${ARCH}" \ "$local_configs_path/ALLOWLIST" \ "$local_configs_path/ALLOWLIST-${KERNEL}" \ "$local_configs_path/ALLOWLIST-${KERNEL}.${ARCH}" \ ) echo "DENYLIST: ${DENYLIST}" echo "ALLOWLIST: ${ALLOWLIST}" cd ${PROJECT_NAME}/selftests/bpf if [ $# -eq 0 ]; then test_progs test_progs_no_alu32 # test_maps test_verifier else for test_name in "$@"; do "${test_name}" done fi xdp-tools-1.5.4/lib/libbpf/ci/vmtest/configs/0000755000175100001660000000000014706536574020407 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/ci/vmtest/configs/ALLOWLIST-5.5.00000644000175100001660000000133314706536574022347 0ustar runnerdocker# attach_probe autoload bpf_verif_scale cgroup_attach_autodetach cgroup_attach_override core_autosize core_extern core_read_macros core_reloc core_retro cpu_mask endian get_branch_snapshot get_stackid_cannot_attach global_data global_data_init global_func_args hashmap legacy_printk linked_funcs linked_maps map_lock obj_name perf_buffer perf_event_stackmap pinning pkt_md_access probe_user queue_stack_map raw_tp_writable_reject_nbd_invalid raw_tp_writable_test_run rdonly_maps section_names signal_pending sockmap_ktls spinlock stacktrace_map stacktrace_map_raw_tp static_linked task_fd_query_rawtp task_fd_query_tp tc_bpf tcp_estats test_global_funcs/arg_tag_ctx* tp_attach_query usdt/urand_pid_attach xdp xdp_noinline xdp_perf xdp-tools-1.5.4/lib/libbpf/ci/vmtest/configs/DENYLIST-latest0000644000175100001660000000145214706536574023061 0ustar runnerdockerdecap_sanity # weird failure with decap_sanity_ns netns already existing, TBD empty_skb # waiting the fix in bpf tree to make it to bpf-next bpf_nf/tc-bpf-ct # test consistently failing on x86: https://github.com/libbpf/libbpf/pull/698#issuecomment-1590341200 bpf_nf/xdp-ct # test consistently failing on x86: https://github.com/libbpf/libbpf/pull/698#issuecomment-1590341200 kprobe_multi_bench_attach # suspected to cause crashes in CI find_vma # test consistently fails on latest kernel, see https://github.com/libbpf/libbpf/issues/754 for details bpf_cookie/perf_event send_signal/send_signal_nmi send_signal/send_signal_nmi_thread lwt_reroute # crashes kernel, fix pending upstream tc_links_ingress # fails, same fix is pending upstream tc_redirect # enough is enough, banned for life for flakiness xdp-tools-1.5.4/lib/libbpf/ci/vmtest/configs/DENYLIST0000644000175100001660000000172114706536574021566 0ustar runnerdocker# TEMPORARY btf_dump/btf_dump: syntax kprobe_multi_bench_attach core_reloc/enum64val core_reloc/size___diff_sz core_reloc/type_based___diff_sz test_ima # All of CI is broken on it following 6.3-rc1 merge lwt_reroute # crashes kernel after netnext merge from 2ab1efad60ad "net/sched: cls_api: complement tcf_tfilter_dump_policy" tc_links_ingress # started failing after net-next merge from 2ab1efad60ad "net/sched: cls_api: complement tcf_tfilter_dump_policy" xdp_bonding/xdp_bonding_features # started failing after net merge from 359e54a93ab4 "l2tp: pass correct message length to ip6_append_data" tc_redirect/tc_redirect_dtime # uapi breakage after net-next commit 885c36e59f46 ("net: Re-use and set mono_delivery_time bit for userspace tstamp packets") migrate_reuseport/IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation migrate_reuseport/IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation verify_pkcs7_sig # keeps failing xdp-tools-1.5.4/lib/libbpf/ci/vmtest/configs/ALLOWLIST-4.9.00000644000175100001660000000016714706536574022356 0ustar runnerdocker# btf_dump -- need to disable data dump sub-tests core_retro cpu_mask hashmap legacy_printk perf_buffer section_names xdp-tools-1.5.4/lib/libbpf/ci/vmtest/configs/DENYLIST-5.5.00000644000175100001660000000034114706536574022226 0ustar runnerdocker# This complements ALLOWLIST-5.5.0 but excludes subtest that can't work on 5.5 btf # "size check test", "func (Non zero vlen)" tailcalls # tailcall_bpf2bpf_1, tailcall_bpf2bpf_2, tailcall_bpf2bpf_3 tc_bpf/tc_bpf_non_root xdp-tools-1.5.4/lib/libbpf/ci/vmtest/configs/DENYLIST-latest.s390x0000644000175100001660000000114614706536574023746 0ustar runnerdocker# TEMPORARY sockmap_listen/sockhash VSOCK test_vsock_redir usdt/basic # failing verifier due to bounds check after LLVM update usdt/multispec # same as above deny_namespace # not yet in bpf denylist tc_redirect/tc_redirect_dtime # very flaky lru_bug # not yet in bpf-next denylist # Disabled temporarily for a crash. # https://lore.kernel.org/bpf/c9923c1d-971d-4022-8dc8-1364e929d34c@gmail.com/ dummy_st_ops/dummy_init_ptr_arg fexit_bpf2bpf tailcalls trace_ext xdp_bpf2bpf xdp_metadata xdp-tools-1.5.4/lib/libbpf/ci/vmtest/helpers.sh0000755000175100001660000000115014706536574020755 0ustar runnerdocker# shellcheck shell=bash # $1 - start or end # $2 - fold identifier, no spaces # $3 - fold section description foldable() { local YELLOW='\033[1;33m' local NOCOLOR='\033[0m' if [ $1 = "start" ]; then line="::group::$2" if [ ! -z "${3:-}" ]; then line="$line - ${YELLOW}$3${NOCOLOR}" fi else line="::endgroup::" fi echo -e "$line" } __print() { local TITLE="" if [[ -n $2 ]]; then TITLE=" title=$2" fi echo "::$1${TITLE}::$3" } # $1 - title # $2 - message print_error() { __print error $1 $2 } # $1 - title # $2 - message print_notice() { __print notice $1 $2 } xdp-tools-1.5.4/lib/libbpf/ci/managers/0000755000175100001660000000000014706536574017232 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/ci/managers/travis_wait.bash0000644000175100001660000000271614706536574022433 0ustar runnerdocker# This was borrowed from https://github.com/travis-ci/travis-build/tree/master/lib/travis/build/bash # to get around https://github.com/travis-ci/travis-ci/issues/9979. It should probably be removed # as soon as Travis CI has started to provide an easy way to export the functions to bash scripts. travis_jigger() { local cmd_pid="${1}" shift local timeout="${1}" shift local count=0 echo -e "\\n" while [[ "${count}" -lt "${timeout}" ]]; do count="$((count + 1))" echo -ne "Still running (${count} of ${timeout}): ${*}\\r" sleep 60 done echo -e "\\n${ANSI_RED}Timeout (${timeout} minutes) reached. Terminating \"${*}\"${ANSI_RESET}\\n" kill -9 "${cmd_pid}" } travis_wait() { local timeout="${1}" if [[ "${timeout}" =~ ^[0-9]+$ ]]; then shift else timeout=20 fi local cmd=("${@}") local log_file="travis_wait_${$}.log" "${cmd[@]}" &>"${log_file}" & local cmd_pid="${!}" travis_jigger "${!}" "${timeout}" "${cmd[@]}" & local jigger_pid="${!}" local result { set +e wait "${cmd_pid}" 2>/dev/null result="${?}" ps -p"${jigger_pid}" &>/dev/null && kill "${jigger_pid}" set -e } if [[ "${result}" -eq 0 ]]; then echo -e "\\n${ANSI_GREEN}The command ${cmd[*]} exited with ${result}.${ANSI_RESET}" else echo -e "\\n${ANSI_RED}The command ${cmd[*]} exited with ${result}.${ANSI_RESET}" fi echo -e "\\n${ANSI_GREEN}Log:${ANSI_RESET}\\n" cat "${log_file}" return "${result}" } xdp-tools-1.5.4/lib/libbpf/ci/managers/debian.sh0000755000175100001660000000677014706536574021025 0ustar runnerdocker#!/bin/bash PHASES=(${@:-SETUP RUN RUN_ASAN CLEANUP}) DEBIAN_RELEASE="${DEBIAN_RELEASE:-testing}" CONT_NAME="${CONT_NAME:-libbpf-debian-$DEBIAN_RELEASE}" ENV_VARS="${ENV_VARS:-}" DOCKER_RUN="${DOCKER_RUN:-docker run}" REPO_ROOT="${REPO_ROOT:-$PWD}" ADDITIONAL_DEPS=(pkgconf) EXTRA_CFLAGS="" EXTRA_LDFLAGS="" function info() { echo -e "\033[33;1m$1\033[0m" } function error() { echo -e "\033[31;1m$1\033[0m" } function docker_exec() { docker exec $ENV_VARS $CONT_NAME "$@" } set -eu source "$(dirname $0)/travis_wait.bash" for phase in "${PHASES[@]}"; do case $phase in SETUP) info "Setup phase" info "Using Debian $DEBIAN_RELEASE" docker --version docker pull debian:$DEBIAN_RELEASE info "Starting container $CONT_NAME" $DOCKER_RUN -v $REPO_ROOT:/build:rw \ -w /build --privileged=true --name $CONT_NAME \ -dit --net=host debian:$DEBIAN_RELEASE /bin/bash echo -e "::group::Build Env Setup" docker_exec bash -c "echo deb-src http://deb.debian.org/debian $DEBIAN_RELEASE main >>/etc/apt/sources.list" docker_exec apt-get -y update docker_exec apt-get -y install aptitude docker_exec aptitude -y install make libz-dev libelf-dev docker_exec aptitude -y install "${ADDITIONAL_DEPS[@]}" echo -e "::endgroup::" ;; RUN|RUN_CLANG|RUN_CLANG14|RUN_CLANG15|RUN_CLANG16|RUN_GCC10|RUN_GCC11|RUN_GCC12|RUN_ASAN|RUN_CLANG_ASAN|RUN_GCC10_ASAN) CC="cc" if [[ "$phase" =~ "RUN_CLANG(\d+)(_ASAN)?" ]]; then ENV_VARS="-e CC=clang-${BASH_REMATCH[1]} -e CXX=clang++-${BASH_REMATCH[1]}" CC="clang-${BASH_REMATCH[1]}" elif [[ "$phase" = *"CLANG"* ]]; then ENV_VARS="-e CC=clang -e CXX=clang++" CC="clang" elif [[ "$phase" =~ "RUN_GCC(\d+)(_ASAN)?" ]]; then ENV_VARS="-e CC=gcc-${BASH_REMATCH[1]} -e CXX=g++-${BASH_REMATCH[1]}" CC="gcc-${BASH_REMATCH[1]}" fi if [[ "$phase" = *"ASAN"* ]]; then EXTRA_CFLAGS="${EXTRA_CFLAGS} -fsanitize=address,undefined" EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -fsanitize=address,undefined" fi if [[ "$CC" != "cc" ]]; then docker_exec aptitude -y install "$CC" else docker_exec aptitude -y install gcc fi docker_exec mkdir build install docker_exec ${CC} --version info "build" docker_exec make -j$((4*$(nproc))) EXTRA_CFLAGS="${EXTRA_CFLAGS}" EXTRA_LDFLAGS="${EXTRA_LDFLAGS}" -C ./src -B OBJDIR=../build info "ldd build/libbpf.so:" docker_exec ldd build/libbpf.so if ! docker_exec ldd build/libbpf.so | grep -q libelf; then error "No reference to libelf.so in libbpf.so!" exit 1 fi info "install" docker_exec make -j$((4*$(nproc))) -C src OBJDIR=../build DESTDIR=../install install info "link binary" docker_exec bash -c "EXTRA_CFLAGS=\"${EXTRA_CFLAGS}\" EXTRA_LDFLAGS=\"${EXTRA_LDFLAGS}\" ./ci/managers/test_compile.sh" ;; CLEANUP) info "Cleanup phase" docker stop $CONT_NAME docker rm -f $CONT_NAME ;; *) echo >&2 "Unknown phase '$phase'" exit 1 esac done xdp-tools-1.5.4/lib/libbpf/ci/managers/ubuntu.sh0000755000175100001660000000130414706536574021111 0ustar runnerdocker#!/bin/bash set -eux RELEASE="focal" apt-get update apt-get install -y pkg-config source "$(dirname $0)/travis_wait.bash" cd $REPO_ROOT EXTRA_CFLAGS="-Werror -Wall -fsanitize=address,undefined" EXTRA_LDFLAGS="-Werror -Wall -fsanitize=address,undefined" mkdir build install cc --version make -j$((4*$(nproc))) EXTRA_CFLAGS="${EXTRA_CFLAGS}" EXTRA_LDFLAGS="${EXTRA_LDFLAGS}" -C ./src -B OBJDIR=../build ldd build/libbpf.so if ! ldd build/libbpf.so | grep -q libelf; then echo "FAIL: No reference to libelf.so in libbpf.so!" exit 1 fi make -j$((4*$(nproc))) -C src OBJDIR=../build DESTDIR=../install install EXTRA_CFLAGS=${EXTRA_CFLAGS} EXTRA_LDFLAGS=${EXTRA_LDFLAGS} $(dirname $0)/test_compile.sh xdp-tools-1.5.4/lib/libbpf/ci/managers/test_compile.sh0000755000175100001660000000052214706536574022257 0ustar runnerdocker#!/bin/bash set -euox pipefail EXTRA_CFLAGS=${EXTRA_CFLAGS:-} EXTRA_LDFLAGS=${EXTRA_LDFLAGS:-} cat << EOF > main.c #include int main() { return bpf_object__open(0) < 0; } EOF # static linking ${CC:-cc} ${EXTRA_CFLAGS} ${EXTRA_LDFLAGS} -o main -I./include/uapi -I./install/usr/include main.c ./build/libbpf.a -lelf -lz xdp-tools-1.5.4/lib/libbpf/ci/diffs/0000755000175100001660000000000014706536574016530 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/ci/diffs/0001-selftests-bpf-fix-inet_csk_accept-prototype-in-test_.patch0000644000175100001660000000236314706536574032377 0ustar runnerdockerFrom 0daad0a615e687e1247230f3d0c31ae60ba32314 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 28 May 2024 15:29:38 -0700 Subject: [PATCH bpf-next] selftests/bpf: fix inet_csk_accept prototype in test_sk_storage_tracing.c Recent kernel change ([0]) changed inet_csk_accept() prototype. Adapt progs/test_sk_storage_tracing.c to take that into account. [0] 92ef0fd55ac8 ("net: change proto and proto_ops accept type") Signed-off-by: Andrii Nakryiko --- tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c index 02e718f06e0f..40531e56776e 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c +++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c @@ -84,7 +84,7 @@ int BPF_PROG(trace_tcp_connect, struct sock *sk) } SEC("fexit/inet_csk_accept") -int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern, +int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg, struct sock *accepted_sk) { set_task_info(accepted_sk); -- 2.43.0 xdp-tools-1.5.4/lib/libbpf/ci/diffs/0003-selftests-bpf-Fix-uprobe-consumer-test.patch0000644000175100001660000000475214706536574027546 0ustar runnerdockerFrom affb32e4f056883f285f8535b766293b85752fb4 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 24 Sep 2024 13:07:30 +0200 Subject: [PATCH] selftests/bpf: Fix uprobe consumer test With newly merged code the uprobe behaviour is slightly different and affects uprobe consumer test. We no longer need to check if the uprobe object is still preserved after removing last uretprobe, because it stays as long as there's pending/installed uretprobe instance. This allows to run uretprobe consumers registered 'after' uprobe was hit even if previous uretprobe got unregistered before being hit. The uprobe object will be now removed after the last uprobe ref is released and in such case it's held by ri->uprobe (return instance) which is released after the uretprobe is hit. Reported-by: Ihor Solodrai Signed-off-by: Jiri Olsa Signed-off-by: Daniel Borkmann Tested-by: Ihor Solodrai Closes: https://lore.kernel.org/bpf/w6U8Z9fdhjnkSp2UaFaV1fGqJXvfLEtDKEUyGDkwmoruDJ_AgF_c0FFhrkeKW18OqiP-05s9yDKiT6X-Ns-avN_ABf0dcUkXqbSJN1TQSXo=@pm.me/ --- .../testing/selftests/bpf/prog_tests/uprobe_multi_test.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index 844f6fc8487b..c1ac813ff9ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -869,21 +869,14 @@ static void consumer_test(struct uprobe_multi_consumers *skel, fmt = "prog 0/1: uprobe"; } else { /* - * uprobe return is tricky ;-) - * * to trigger uretprobe consumer, the uretprobe needs to be installed, * which means one of the 'return' uprobes was alive when probe was hit: * * idxs: 2/3 uprobe return in 'installed' mask - * - * in addition if 'after' state removes everything that was installed in - * 'before' state, then uprobe kernel object goes away and return uprobe - * is not installed and we won't hit it even if it's in 'after' state. */ unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */ - unsigned long probe_preserved = before & after; /* did uprobe go away */ - if (had_uretprobes && probe_preserved && test_bit(idx, after)) + if (had_uretprobes && test_bit(idx, after)) val++; fmt = "idx 2/3: uretprobe"; } -- 2.34.1 xdp-tools-1.5.4/lib/libbpf/ci/diffs/.keep0000644000175100001660000000000014706536574017443 0ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/ci/diffs/0002-xdp-bonding-Fix-feature-flags-when-there-are-no-slav.patch0000644000175100001660000000441514706536574032002 0ustar runnerdockerFrom f267f262815033452195f46c43b572159262f533 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 5 Mar 2024 10:08:28 +0100 Subject: [PATCH 2/2] xdp, bonding: Fix feature flags when there are no slave devs anymore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 9b0ed890ac2a ("bonding: do not report NETDEV_XDP_ACT_XSK_ZEROCOPY") changed the driver from reporting everything as supported before a device was bonded into having the driver report that no XDP feature is supported until a real device is bonded as it seems to be more truthful given eventually real underlying devices decide what XDP features are supported. The change however did not take into account when all slave devices get removed from the bond device. In this case after 9b0ed890ac2a, the driver keeps reporting a feature mask of 0x77, that is, NETDEV_XDP_ACT_MASK & ~NETDEV_XDP_ACT_XSK_ZEROCOPY whereas it should have reported a feature mask of 0. Fix it by resetting XDP feature flags in the same way as if no XDP program is attached to the bond device. This was uncovered by the XDP bond selftest which let BPF CI fail. After adjusting the starting masks on the latter to 0 instead of NETDEV_XDP_ACT_MASK the test passes again together with this fix. Fixes: 9b0ed890ac2a ("bonding: do not report NETDEV_XDP_ACT_XSK_ZEROCOPY") Signed-off-by: Daniel Borkmann Cc: Magnus Karlsson Cc: Prashant Batra Cc: Toke Høiland-Jørgensen Cc: Jakub Kicinski Reviewed-by: Toke Høiland-Jørgensen Message-ID: <20240305090829.17131-1-daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov --- drivers/net/bonding/bond_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a11748b8d69b..cd0683bcca03 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1811,7 +1811,7 @@ void bond_xdp_set_features(struct net_device *bond_dev) ASSERT_RTNL(); - if (!bond_xdp_check(bond)) { + if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) { xdp_clear_features_flag(bond_dev); return; } -- 2.43.0 xdp-tools-1.5.4/lib/libbpf/ci/diffs/0001-arch-Kconfig-Move-SPECULATION_MITIGATIONS-to-arch-Kc.patch0000644000175100001660000000464314706536574030656 0ustar runnerdockerFrom c71766e8ff7a7f950522d25896fba758585500df Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 22 Apr 2024 21:14:40 -0700 Subject: [PATCH] arch/Kconfig: Move SPECULATION_MITIGATIONS to arch/Kconfig SPECULATION_MITIGATIONS is currently defined only for x86. As a result, IS_ENABLED(CONFIG_SPECULATION_MITIGATIONS) is always false for other archs. f337a6a21e2f effectively set "mitigations=off" by default on non-x86 archs, which is not desired behavior. Jakub observed this change when running bpf selftests on s390 and arm64. Fix this by moving SPECULATION_MITIGATIONS to arch/Kconfig so that it is available in all archs and thus can be used safely in kernel/cpu.c Fixes: f337a6a21e2f ("x86/cpu: Actually turn off mitigations by default for SPECULATION_MITIGATIONS=n") Cc: stable@vger.kernel.org Cc: Sean Christopherson Cc: Ingo Molnar Cc: Daniel Sneddon Cc: Jakub Kicinski Signed-off-by: Song Liu --- arch/Kconfig | 10 ++++++++++ arch/x86/Kconfig | 10 ---------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 9f066785bb71..8f4af75005f8 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1609,4 +1609,14 @@ config CC_HAS_SANE_FUNCTION_ALIGNMENT # strict alignment always, even with -falign-functions. def_bool CC_HAS_MIN_FUNCTION_ALIGNMENT || CC_IS_CLANG +menuconfig SPECULATION_MITIGATIONS + bool "Mitigations for speculative execution vulnerabilities" + default y + help + Say Y here to enable options which enable mitigations for + speculative execution hardware vulnerabilities. + + If you say N, all mitigations will be disabled. You really + should know what you are doing to say so. + endmenu diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 39886bab943a..50c890fce5e0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2486,16 +2486,6 @@ config PREFIX_SYMBOLS def_bool y depends on CALL_PADDING && !CFI_CLANG -menuconfig SPECULATION_MITIGATIONS - bool "Mitigations for speculative execution vulnerabilities" - default y - help - Say Y here to enable options which enable mitigations for - speculative execution hardware vulnerabilities. - - If you say N, all mitigations will be disabled. You really - should know what you are doing to say so. - if SPECULATION_MITIGATIONS config MITIGATION_PAGE_TABLE_ISOLATION -- 2.43.0 xdp-tools-1.5.4/lib/libbpf/SYNC.md0000644000175100001660000003166014706536574016146 0ustar runnerdocker Libbpf sync =========== Libbpf *authoritative source code* is developed as part of [bpf-next Linux source tree](https://kernel.googlesource.com/pub/scm/linux/kernel/git/bpf/bpf-next) under `tools/lib/bpf` subdirectory and is periodically synced to Github. Most of the mundane mechanical things like bpf and bpf-next tree merge, Git history transformation, cherry-picking relevant commits, re-generating auto-generated headers, etc. are taken care by [sync-kernel.sh script](https://github.com/libbpf/libbpf/blob/master/scripts/sync-kernel.sh). But occasionally human needs to do few extra things to make everything work nicely. This document goes over the process of syncing libbpf sources from Linux repo to this Github repository. Feel free to contribute fixes and additions if you run into new problems not outlined here. Setup expectations ------------------ Sync script has particular expectation of upstream Linux repo setup. It expects that current HEAD of that repo points to bpf-next's master branch and that there is a separate local branch pointing to bpf tree's master branch. This is important, as the script will automatically merge their histories for the purpose of libbpf sync. Below, we assume that Linux repo is located at `~/linux`, it's current head is at latest `bpf-next/master`, and libbpf's Github repo is located at `~/libbpf`, checked out to latest commit on `master` branch. It doesn't matter from where to run `sync-kernel.sh` script, but we'll be running it from inside `~/libbpf`. ``` $ cd ~/linux && git remote -v | grep -E '^(bpf|bpf-next)' bpf https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git (fetch) bpf ssh://git@gitolite.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git (push) bpf-next https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git (fetch) bpf-next ssh://git@gitolite.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git (push) $ git branch -vv | grep -E '^? (master|bpf-master)' * bpf-master 2d311f480b52 [bpf/master] riscv, bpf: Fix patch_text implicit declaration master c8ee37bde402 [bpf-next/master] libbpf: Fix bpf_xdp_query() in old kernels $ git checkout bpf-master && git pull && git checkout master && git pull ... $ git log --oneline -n1 c8ee37bde402 (HEAD -> master, bpf-next/master) libbpf: Fix bpf_xdp_query() in old kernels $ cd ~/libbpf && git checkout master && git pull Your branch is up to date with 'libbpf/master'. Already up to date. ``` Running setup script -------------------- First step is to always run `sync-kernel.sh` script. It expects three arguments: ``` $ scripts/sync-kernel.sh ``` Note, that we'll store script's entire output in `/tmp/libbpf-sync.txt` and put it into PR summary later on. **Please store scripts output and include it in PR summary for others to check for anything unexpected and suspicious.** ``` $ scripts/sync-kernel.sh ~/libbpf ~/linux bpf-master | tee /tmp/libbpf-sync.txt Dumping existing libbpf commit signatures... WORKDIR: /home/andriin/libbpf LINUX REPO: /home/andriin/linux LIBBPF REPO: /home/andriin/libbpf ... ``` Most of the time this will go very uneventful. One expected case when sync script might require user intervention is if `bpf` tree has some libbpf fixes, which is nowadays not a very frequent occurence. But if that happens, script will show you a diff between expected state as of latest bpf-next and synced Github repo state. And will ask if these changes look good. Please use your best judgement to verify that differences are indeed from expected `bpf` tree fixes. E.g., it might look like below: ``` Comparing list of files... Comparing file contents... --- /home/andriin/linux/include/uapi/linux/netdev.h 2023-02-27 16:54:42.270583372 -0800 +++ /home/andriin/libbpf/include/uapi/linux/netdev.h 2023-02-27 16:54:34.615530796 -0800 @@ -19,7 +19,7 @@ * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP * in zero copy mode. * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw - * oflloading. + * offloading. * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear * XDP buffer support in the driver napi callback. * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements /home/andriin/linux/include/uapi/linux/netdev.h and /home/andriin/libbpf/include/uapi/linux/netdev.h are different! Unfortunately, there are some inconsistencies, please double check. Does everything look good? [y/N]: ``` If it looks sensible and expected, type `y` and script will proceed. If sync is successful, your `~/linux` repo will be left in original state on the original HEAD commit. `~/libbpf` repo will now be on a new branch, named `libbpf-sync-` (e.g., `libbpf-sync-2023-02-28T00-53-40.072Z`). Push this branch into your fork of `libbpf/libbpf` Github repo and create a PR: ``` $ git push --set-upstream origin libbpf-sync-2023-02-28T00-53-40.072Z Enumerating objects: 130, done. Counting objects: 100% (115/115), done. Delta compression using up to 80 threads Compressing objects: 100% (28/28), done. Writing objects: 100% (32/32), 5.57 KiB | 1.86 MiB/s, done. Total 32 (delta 21), reused 0 (delta 0), pack-reused 0 remote: Resolving deltas: 100% (21/21), completed with 9 local objects. remote: remote: Create a pull request for 'libbpf-sync-2023-02-28T00-53-40.072Z' on GitHub by visiting: remote: https://github.com/anakryiko/libbpf/pull/new/libbpf-sync-2023-02-28T00-53-40.072Z remote: To github.com:anakryiko/libbpf.git * [new branch] libbpf-sync-2023-02-28T00-53-40.072Z -> libbpf-sync-2023-02-28T00-53-40.072Z Branch 'libbpf-sync-2023-02-28T00-53-40.072Z' set up to track remote branch 'libbpf-sync-2023-02-28T00-53-40.072Z' from 'origin'. ``` **Please, adjust PR name to have a properly looking timestamp. Libbpf maintainers will be very thankful for that!** By default Github will turn above branch name into PR with subject "Libbpf sync 2023 02 28 t00 53 40.072 z". Please fix this into a proper timestamp, e.g.: "Libbpf sync 2023-02-28T00:53:40.072Z". Thank you! **Please don't forget to paste contents of /tmp/libbpf-sync.txt into PR summary!** Once PR is created, libbpf CI will run a bunch of tests to check that everything is good. In simple cases that would be all you'd need to do. In more complicated cases some extra adjustments might be necessary. **Please, keep naming and style consistent.** Prefix CI-related fixes with `ci: ` prefix. If you had to modify sync script, prefix it with `sync: `. Also make sure that each such commit has `Signed-off-by: Your Full Name `, just like you'd do that for Linux upstream patch. Libbpf closely follows kernel conventions and styling, so please help maintaining that. Including new sources --------------------- If entirely new source files (typically `*.c`) were added to the library in the kernel repository, it may be necessary to add these to the build system manually (you may notice linker errors otherwise), because the script cannot handle such changes automatically. To that end, edit `src/Makefile` as necessary. Commit [c2495832ced4](https://github.com/libbpf/libbpf/commit/c2495832ced4239bcd376b9954db38a6addd89ca) is an example of how to go about doing that. Similarly, if new public API header files were added, the `Makefile` will need to be adjusted as well. Updating allow/deny lists ------------------------- Libbpf CI intentionally runs a subset of latest BPF selftests on old kernel (4.9 and 5.5, currently). It happens from time to time that some tests that previously were successfully running on old kernels now don't, typically due to reliance on some freshly added kernel feature. It might look something like this in [CI logs](https://github.com/libbpf/libbpf/actions/runs/4206303272/jobs/7299609578#step:4:2733): ``` All error logs: serial_test_xdp_info:FAIL:get_xdp_none errno=2 #283 xdp_info:FAIL Summary: 49/166 PASSED, 5 SKIPPED, 1 FAILED ``` In such case we can either work with upstream to fix test to be compatible with old kernels, or we'll have to add a test into a denylist (or remove it from allowlist, like was [done](https://github.com/libbpf/libbpf/commit/ea284299025bf85b85b4923191de6463cd43ccd6) for the case above). ``` $ find . -name '*LIST*' ./ci/vmtest/configs/ALLOWLIST-4.9.0 ./ci/vmtest/configs/DENYLIST-5.5.0 ./ci/vmtest/configs/DENYLIST-latest.s390x ./ci/vmtest/configs/DENYLIST-latest ./ci/vmtest/configs/ALLOWLIST-5.5.0 ``` Please determine which tests need to be added/removed from which list. And then add that as a separate commit. **Please keep using the same branch name, so that the same PR can be updated.** There is no need to open new PRs for each such fix. Regenerating vmlinux.h header ----------------------------- To compile latest BPF selftests against old kernels, we check in pre-generated [vmlinux.h](https://github.com/libbpf/libbpf/blob/master/.github/actions/build-selftests/vmlinux.h) header file, located at `.github/actions/build-selftests/vmlinux.h`, which contains type definitions from latest upstream kernel. When after libbpf sync upstream BPF selftests require new kernel types, we'd need to regenerate `vmlinux.h` and check it in as well. This will looks something like this in [CI logs](https://github.com/libbpf/libbpf/actions/runs/4198939244/jobs/7283214243#step:4:1903): ``` In file included from progs/test_spin_lock_fail.c:5: /home/runner/work/libbpf/libbpf/.kernel/tools/testing/selftests/bpf/bpf_experimental.h:73:53: error: declaration of 'struct bpf_rb_root' will not be visible outside of this function [-Werror,-Wvisibility] extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, ^ /home/runner/work/libbpf/libbpf/.kernel/tools/testing/selftests/bpf/bpf_experimental.h:81:35: error: declaration of 'struct bpf_rb_root' will not be visible outside of this function [-Werror,-Wvisibility] extern void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, ^ /home/runner/work/libbpf/libbpf/.kernel/tools/testing/selftests/bpf/bpf_experimental.h:90:52: error: declaration of 'struct bpf_rb_root' will not be visible outside of this function [-Werror,-Wvisibility] extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; ^ 3 errors generated. make: *** [Makefile:572: /home/runner/work/libbpf/libbpf/.kernel/tools/testing/selftests/bpf/test_spin_lock_fail.bpf.o] Error 1 make: *** Waiting for unfinished jobs.... Error: Process completed with exit code 2. ``` You'll need to build latest upstream kernel from `bpf-next` tree, using BPF selftest configs. Concat arch-agnostic and arch-specific configs, build kernel, then use bpftool to dump `vmlinux.h`: ``` $ cd ~/linux $ cat tools/testing/selftests/bpf/config \ tools/testing/selftests/bpf/config.x86_64 > .config $ make -j$(nproc) olddefconfig all ... $ bpftool btf dump file ~/linux/vmlinux format c > ~/libbpf/.github/actions/build-selftests/vmlinux.h $ cd ~/libbpf && git add . && git commit -s ``` Check in generated `vmlinux.h`, don't forget to use `ci: ` commit prefix, add it on top of sync commits. Push to Github and let libbpf CI do the checking for you. See [this commit](https://github.com/libbpf/libbpf/commit/34212c94a64df8eeb1dd5d064630a65e1dfd4c20) for reference. Troubleshooting --------------- If something goes wrong and sync script exits early or is terminated early by user, you might end up with `~/linux` repo on temporary sync-related branch. Don't worry, though, sync script never destroys repo state, it follows "copy-on-write" philosophy and creates new branches where necessary. So it's very easy to restore previous state. So if anything goes wrong, it's easy to start fresh: ``` $ git branch | grep -E 'libbpf-.*Z' libbpf-baseline-2023-02-28T00-43-35.146Z libbpf-bpf-baseline-2023-02-28T00-43-35.146Z libbpf-bpf-tip-2023-02-28T00-43-35.146Z libbpf-squash-base-2023-02-28T00-43-35.146Z * libbpf-squash-tip-2023-02-28T00-43-35.146Z $ git cherry-pick --abort $ git checkout master && git branch | grep -E 'libbpf-.*Z' | xargs git br -D Switched to branch 'master' Your branch is up to date with 'bpf-next/master'. Deleted branch libbpf-baseline-2023-02-28T00-43-35.146Z (was 951bce29c898). Deleted branch libbpf-bpf-baseline-2023-02-28T00-43-35.146Z (was 3a70e0d4c9d7). Deleted branch libbpf-bpf-tip-2023-02-28T00-43-35.146Z (was 2d311f480b52). Deleted branch libbpf-squash-base-2023-02-28T00-43-35.146Z (was 957f109ef883). Deleted branch libbpf-squash-tip-2023-02-28T00-43-35.146Z (was be66130d2339). Deleted branch libbpf-tip-2023-02-28T00-43-35.146Z (was 2d311f480b52). ``` You might need to do the same for your `~/libbpf` repo sometimes, depending at which stage sync script was terminated. xdp-tools-1.5.4/lib/libbpf/.github/0000755000175100001660000000000014706536574016402 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/.github/actions/0000755000175100001660000000000014706536574020042 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/.github/actions/build-selftests/0000755000175100001660000000000014706536574023153 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/.github/actions/build-selftests/vmlinux.h0000644000175100001660001052162114706536574025036 0ustar runnerdocker#ifndef __VMLINUX_H__ #define __VMLINUX_H__ #ifndef BPF_NO_PRESERVE_ACCESS_INDEX #pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) #endif #ifndef __ksym #define __ksym __attribute__((section(".ksyms"))) #endif #ifndef __weak #define __weak __attribute__((weak)) #endif enum { ACPI_BUTTON_LID_INIT_IGNORE = 0, ACPI_BUTTON_LID_INIT_OPEN = 1, ACPI_BUTTON_LID_INIT_METHOD = 2, ACPI_BUTTON_LID_INIT_DISABLED = 3, }; enum { ACPI_GENL_ATTR_UNSPEC = 0, ACPI_GENL_ATTR_EVENT = 1, __ACPI_GENL_ATTR_MAX = 2, }; enum { ACPI_GENL_CMD_UNSPEC = 0, ACPI_GENL_CMD_EVENT = 1, __ACPI_GENL_CMD_MAX = 2, }; enum { ACPI_REFCLASS_LOCAL = 0, ACPI_REFCLASS_ARG = 1, ACPI_REFCLASS_REFOF = 2, ACPI_REFCLASS_INDEX = 3, ACPI_REFCLASS_TABLE = 4, ACPI_REFCLASS_NAME = 5, ACPI_REFCLASS_DEBUG = 6, ACPI_REFCLASS_MAX = 6, }; enum { ACPI_RSC_INITGET = 0, ACPI_RSC_INITSET = 1, ACPI_RSC_FLAGINIT = 2, ACPI_RSC_1BITFLAG = 3, ACPI_RSC_2BITFLAG = 4, ACPI_RSC_3BITFLAG = 5, ACPI_RSC_6BITFLAG = 6, ACPI_RSC_ADDRESS = 7, ACPI_RSC_BITMASK = 8, ACPI_RSC_BITMASK16 = 9, ACPI_RSC_COUNT = 10, ACPI_RSC_COUNT16 = 11, ACPI_RSC_COUNT_GPIO_PIN = 12, ACPI_RSC_COUNT_GPIO_RES = 13, ACPI_RSC_COUNT_GPIO_VEN = 14, ACPI_RSC_COUNT_SERIAL_RES = 15, ACPI_RSC_COUNT_SERIAL_VEN = 16, ACPI_RSC_DATA8 = 17, ACPI_RSC_EXIT_EQ = 18, ACPI_RSC_EXIT_LE = 19, ACPI_RSC_EXIT_NE = 20, ACPI_RSC_LENGTH = 21, ACPI_RSC_MOVE_GPIO_PIN = 22, ACPI_RSC_MOVE_GPIO_RES = 23, ACPI_RSC_MOVE_SERIAL_RES = 24, ACPI_RSC_MOVE_SERIAL_VEN = 25, ACPI_RSC_MOVE8 = 26, ACPI_RSC_MOVE16 = 27, ACPI_RSC_MOVE32 = 28, ACPI_RSC_MOVE64 = 29, ACPI_RSC_SET8 = 30, ACPI_RSC_SOURCE = 31, ACPI_RSC_SOURCEX = 32, }; enum { AD_CURRENT_WHILE_TIMER = 0, AD_ACTOR_CHURN_TIMER = 1, AD_PERIODIC_TIMER = 2, AD_PARTNER_CHURN_TIMER = 3, AD_WAIT_WHILE_TIMER = 4, }; enum { AD_MARKER_INFORMATION_SUBTYPE = 1, AD_MARKER_RESPONSE_SUBTYPE = 2, }; enum { AD_TYPE_LACPDU = 1, AD_TYPE_MARKER = 2, }; enum { AFFINITY = 0, AFFINITY_LIST = 1, EFFECTIVE = 2, EFFECTIVE_LIST = 3, }; enum { AML_FIELD_ACCESS_ANY = 0, AML_FIELD_ACCESS_BYTE = 1, AML_FIELD_ACCESS_WORD = 2, AML_FIELD_ACCESS_DWORD = 3, AML_FIELD_ACCESS_QWORD = 4, AML_FIELD_ACCESS_BUFFER = 5, }; enum { AML_FIELD_ATTRIB_QUICK = 2, AML_FIELD_ATTRIB_SEND_RECEIVE = 4, AML_FIELD_ATTRIB_BYTE = 6, AML_FIELD_ATTRIB_WORD = 8, AML_FIELD_ATTRIB_BLOCK = 10, AML_FIELD_ATTRIB_BYTES = 11, AML_FIELD_ATTRIB_PROCESS_CALL = 12, AML_FIELD_ATTRIB_BLOCK_PROCESS_CALL = 13, AML_FIELD_ATTRIB_RAW_BYTES = 14, AML_FIELD_ATTRIB_RAW_PROCESS_BYTES = 15, }; enum { AML_FIELD_UPDATE_PRESERVE = 0, AML_FIELD_UPDATE_WRITE_AS_ONES = 32, AML_FIELD_UPDATE_WRITE_AS_ZEROS = 64, }; enum { ARCH_LBR_BR_TYPE_JCC = 0, ARCH_LBR_BR_TYPE_NEAR_IND_JMP = 1, ARCH_LBR_BR_TYPE_NEAR_REL_JMP = 2, ARCH_LBR_BR_TYPE_NEAR_IND_CALL = 3, ARCH_LBR_BR_TYPE_NEAR_REL_CALL = 4, ARCH_LBR_BR_TYPE_NEAR_RET = 5, ARCH_LBR_BR_TYPE_KNOWN_MAX = 5, ARCH_LBR_BR_TYPE_MAP_MAX = 16, }; enum { ASCII_NULL = 0, ASCII_BELL = 7, ASCII_BACKSPACE = 8, ASCII_IGNORE_FIRST = 8, ASCII_HTAB = 9, ASCII_LINEFEED = 10, ASCII_VTAB = 11, ASCII_FORMFEED = 12, ASCII_CAR_RET = 13, ASCII_IGNORE_LAST = 13, ASCII_SHIFTOUT = 14, ASCII_SHIFTIN = 15, ASCII_CANCEL = 24, ASCII_SUBSTITUTE = 26, ASCII_ESCAPE = 27, ASCII_CSI_IGNORE_FIRST = 32, ASCII_CSI_IGNORE_LAST = 63, ASCII_DEL = 127, ASCII_EXT_CSI = 155, }; enum { ATA_MAX_DEVICES = 2, ATA_MAX_PRD = 256, ATA_SECT_SIZE = 512, ATA_MAX_SECTORS_128 = 128, ATA_MAX_SECTORS = 256, ATA_MAX_SECTORS_1024 = 1024, ATA_MAX_SECTORS_LBA48 = 65535, ATA_MAX_SECTORS_TAPE = 65535, ATA_MAX_TRIM_RNUM = 64, ATA_ID_WORDS = 256, ATA_ID_CONFIG = 0, ATA_ID_CYLS = 1, ATA_ID_HEADS = 3, ATA_ID_SECTORS = 6, ATA_ID_SERNO = 10, ATA_ID_BUF_SIZE = 21, ATA_ID_FW_REV = 23, ATA_ID_PROD = 27, ATA_ID_MAX_MULTSECT = 47, ATA_ID_DWORD_IO = 48, ATA_ID_TRUSTED = 48, ATA_ID_CAPABILITY = 49, ATA_ID_OLD_PIO_MODES = 51, ATA_ID_OLD_DMA_MODES = 52, ATA_ID_FIELD_VALID = 53, ATA_ID_CUR_CYLS = 54, ATA_ID_CUR_HEADS = 55, ATA_ID_CUR_SECTORS = 56, ATA_ID_MULTSECT = 59, ATA_ID_LBA_CAPACITY = 60, ATA_ID_SWDMA_MODES = 62, ATA_ID_MWDMA_MODES = 63, ATA_ID_PIO_MODES = 64, ATA_ID_EIDE_DMA_MIN = 65, ATA_ID_EIDE_DMA_TIME = 66, ATA_ID_EIDE_PIO = 67, ATA_ID_EIDE_PIO_IORDY = 68, ATA_ID_ADDITIONAL_SUPP = 69, ATA_ID_QUEUE_DEPTH = 75, ATA_ID_SATA_CAPABILITY = 76, ATA_ID_SATA_CAPABILITY_2 = 77, ATA_ID_FEATURE_SUPP = 78, ATA_ID_MAJOR_VER = 80, ATA_ID_COMMAND_SET_1 = 82, ATA_ID_COMMAND_SET_2 = 83, ATA_ID_CFSSE = 84, ATA_ID_CFS_ENABLE_1 = 85, ATA_ID_CFS_ENABLE_2 = 86, ATA_ID_CSF_DEFAULT = 87, ATA_ID_UDMA_MODES = 88, ATA_ID_HW_CONFIG = 93, ATA_ID_SPG = 98, ATA_ID_LBA_CAPACITY_2 = 100, ATA_ID_SECTOR_SIZE = 106, ATA_ID_WWN = 108, ATA_ID_LOGICAL_SECTOR_SIZE = 117, ATA_ID_COMMAND_SET_3 = 119, ATA_ID_COMMAND_SET_4 = 120, ATA_ID_LAST_LUN = 126, ATA_ID_DLF = 128, ATA_ID_CSFO = 129, ATA_ID_CFA_POWER = 160, ATA_ID_CFA_KEY_MGMT = 162, ATA_ID_CFA_MODES = 163, ATA_ID_DATA_SET_MGMT = 169, ATA_ID_SCT_CMD_XPORT = 206, ATA_ID_ROT_SPEED = 217, ATA_ID_PIO4 = 2, ATA_ID_SERNO_LEN = 20, ATA_ID_FW_REV_LEN = 8, ATA_ID_PROD_LEN = 40, ATA_ID_WWN_LEN = 8, ATA_PCI_CTL_OFS = 2, ATA_PIO0 = 1, ATA_PIO1 = 3, ATA_PIO2 = 7, ATA_PIO3 = 15, ATA_PIO4 = 31, ATA_PIO5 = 63, ATA_PIO6 = 127, ATA_PIO4_ONLY = 16, ATA_SWDMA0 = 1, ATA_SWDMA1 = 3, ATA_SWDMA2 = 7, ATA_SWDMA2_ONLY = 4, ATA_MWDMA0 = 1, ATA_MWDMA1 = 3, ATA_MWDMA2 = 7, ATA_MWDMA3 = 15, ATA_MWDMA4 = 31, ATA_MWDMA12_ONLY = 6, ATA_MWDMA2_ONLY = 4, ATA_UDMA0 = 1, ATA_UDMA1 = 3, ATA_UDMA2 = 7, ATA_UDMA3 = 15, ATA_UDMA4 = 31, ATA_UDMA5 = 63, ATA_UDMA6 = 127, ATA_UDMA7 = 255, ATA_UDMA24_ONLY = 20, ATA_UDMA_MASK_40C = 7, ATA_PRD_SZ = 8, ATA_PRD_TBL_SZ = 2048, ATA_PRD_EOT = -2147483648, ATA_DMA_TABLE_OFS = 4, ATA_DMA_STATUS = 2, ATA_DMA_CMD = 0, ATA_DMA_WR = 8, ATA_DMA_START = 1, ATA_DMA_INTR = 4, ATA_DMA_ERR = 2, ATA_DMA_ACTIVE = 1, ATA_HOB = 128, ATA_NIEN = 2, ATA_LBA = 64, ATA_DEV1 = 16, ATA_DEVICE_OBS = 160, ATA_DEVCTL_OBS = 8, ATA_BUSY = 128, ATA_DRDY = 64, ATA_DF = 32, ATA_DSC = 16, ATA_DRQ = 8, ATA_CORR = 4, ATA_SENSE = 2, ATA_ERR = 1, ATA_SRST = 4, ATA_ICRC = 128, ATA_BBK = 128, ATA_UNC = 64, ATA_MC = 32, ATA_IDNF = 16, ATA_MCR = 8, ATA_ABORTED = 4, ATA_TRK0NF = 2, ATA_AMNF = 1, ATAPI_LFS = 240, ATAPI_EOM = 2, ATAPI_ILI = 1, ATAPI_IO = 2, ATAPI_COD = 1, ATA_REG_DATA = 0, ATA_REG_ERR = 1, ATA_REG_NSECT = 2, ATA_REG_LBAL = 3, ATA_REG_LBAM = 4, ATA_REG_LBAH = 5, ATA_REG_DEVICE = 6, ATA_REG_STATUS = 7, ATA_REG_FEATURE = 1, ATA_REG_CMD = 7, ATA_REG_BYTEL = 4, ATA_REG_BYTEH = 5, ATA_REG_DEVSEL = 6, ATA_REG_IRQ = 2, ATA_CMD_DEV_RESET = 8, ATA_CMD_CHK_POWER = 229, ATA_CMD_STANDBY = 226, ATA_CMD_IDLE = 227, ATA_CMD_EDD = 144, ATA_CMD_DOWNLOAD_MICRO = 146, ATA_CMD_DOWNLOAD_MICRO_DMA = 147, ATA_CMD_NOP = 0, ATA_CMD_FLUSH = 231, ATA_CMD_FLUSH_EXT = 234, ATA_CMD_ID_ATA = 236, ATA_CMD_ID_ATAPI = 161, ATA_CMD_SERVICE = 162, ATA_CMD_READ = 200, ATA_CMD_READ_EXT = 37, ATA_CMD_READ_QUEUED = 38, ATA_CMD_READ_STREAM_EXT = 43, ATA_CMD_READ_STREAM_DMA_EXT = 42, ATA_CMD_WRITE = 202, ATA_CMD_WRITE_EXT = 53, ATA_CMD_WRITE_QUEUED = 54, ATA_CMD_WRITE_STREAM_EXT = 59, ATA_CMD_WRITE_STREAM_DMA_EXT = 58, ATA_CMD_WRITE_FUA_EXT = 61, ATA_CMD_WRITE_QUEUED_FUA_EXT = 62, ATA_CMD_FPDMA_READ = 96, ATA_CMD_FPDMA_WRITE = 97, ATA_CMD_NCQ_NON_DATA = 99, ATA_CMD_FPDMA_SEND = 100, ATA_CMD_FPDMA_RECV = 101, ATA_CMD_PIO_READ = 32, ATA_CMD_PIO_READ_EXT = 36, ATA_CMD_PIO_WRITE = 48, ATA_CMD_PIO_WRITE_EXT = 52, ATA_CMD_READ_MULTI = 196, ATA_CMD_READ_MULTI_EXT = 41, ATA_CMD_WRITE_MULTI = 197, ATA_CMD_WRITE_MULTI_EXT = 57, ATA_CMD_WRITE_MULTI_FUA_EXT = 206, ATA_CMD_SET_FEATURES = 239, ATA_CMD_SET_MULTI = 198, ATA_CMD_PACKET = 160, ATA_CMD_VERIFY = 64, ATA_CMD_VERIFY_EXT = 66, ATA_CMD_WRITE_UNCORR_EXT = 69, ATA_CMD_STANDBYNOW1 = 224, ATA_CMD_IDLEIMMEDIATE = 225, ATA_CMD_SLEEP = 230, ATA_CMD_INIT_DEV_PARAMS = 145, ATA_CMD_READ_NATIVE_MAX = 248, ATA_CMD_READ_NATIVE_MAX_EXT = 39, ATA_CMD_SET_MAX = 249, ATA_CMD_SET_MAX_EXT = 55, ATA_CMD_READ_LOG_EXT = 47, ATA_CMD_WRITE_LOG_EXT = 63, ATA_CMD_READ_LOG_DMA_EXT = 71, ATA_CMD_WRITE_LOG_DMA_EXT = 87, ATA_CMD_TRUSTED_NONDATA = 91, ATA_CMD_TRUSTED_RCV = 92, ATA_CMD_TRUSTED_RCV_DMA = 93, ATA_CMD_TRUSTED_SND = 94, ATA_CMD_TRUSTED_SND_DMA = 95, ATA_CMD_PMP_READ = 228, ATA_CMD_PMP_READ_DMA = 233, ATA_CMD_PMP_WRITE = 232, ATA_CMD_PMP_WRITE_DMA = 235, ATA_CMD_CONF_OVERLAY = 177, ATA_CMD_SEC_SET_PASS = 241, ATA_CMD_SEC_UNLOCK = 242, ATA_CMD_SEC_ERASE_PREP = 243, ATA_CMD_SEC_ERASE_UNIT = 244, ATA_CMD_SEC_FREEZE_LOCK = 245, ATA_CMD_SEC_DISABLE_PASS = 246, ATA_CMD_CONFIG_STREAM = 81, ATA_CMD_SMART = 176, ATA_CMD_MEDIA_LOCK = 222, ATA_CMD_MEDIA_UNLOCK = 223, ATA_CMD_DSM = 6, ATA_CMD_CHK_MED_CRD_TYP = 209, ATA_CMD_CFA_REQ_EXT_ERR = 3, ATA_CMD_CFA_WRITE_NE = 56, ATA_CMD_CFA_TRANS_SECT = 135, ATA_CMD_CFA_ERASE = 192, ATA_CMD_CFA_WRITE_MULT_NE = 205, ATA_CMD_REQ_SENSE_DATA = 11, ATA_CMD_SANITIZE_DEVICE = 180, ATA_CMD_ZAC_MGMT_IN = 74, ATA_CMD_ZAC_MGMT_OUT = 159, ATA_CMD_RESTORE = 16, ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 1, ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 2, ATA_SUBCMD_FPDMA_SEND_DSM = 0, ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 2, ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0, ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 5, ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 6, ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 7, ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0, ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 1, ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 2, ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 3, ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 4, ATA_LOG_DIRECTORY = 0, ATA_LOG_SATA_NCQ = 16, ATA_LOG_NCQ_NON_DATA = 18, ATA_LOG_NCQ_SEND_RECV = 19, ATA_LOG_CDL = 24, ATA_LOG_CDL_SIZE = 512, ATA_LOG_IDENTIFY_DEVICE = 48, ATA_LOG_SENSE_NCQ = 15, ATA_LOG_SENSE_NCQ_SIZE = 1024, ATA_LOG_CONCURRENT_POSITIONING_RANGES = 71, ATA_LOG_SUPPORTED_CAPABILITIES = 3, ATA_LOG_CURRENT_SETTINGS = 4, ATA_LOG_SECURITY = 6, ATA_LOG_SATA_SETTINGS = 8, ATA_LOG_ZONED_INFORMATION = 9, ATA_LOG_DEVSLP_OFFSET = 48, ATA_LOG_DEVSLP_SIZE = 8, ATA_LOG_DEVSLP_MDAT = 0, ATA_LOG_DEVSLP_MDAT_MASK = 31, ATA_LOG_DEVSLP_DETO = 1, ATA_LOG_DEVSLP_VALID = 7, ATA_LOG_DEVSLP_VALID_MASK = 128, ATA_LOG_NCQ_PRIO_OFFSET = 9, ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0, ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = 1, ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 4, ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = 1, ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 8, ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = 1, ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 12, ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = 1, ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 16, ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = 1, ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = 2, ATA_LOG_NCQ_SEND_RECV_SIZE = 20, ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0, ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0, ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = 1, ATA_LOG_NCQ_NON_DATA_ABORT_ALL = 2, ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = 4, ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = 8, ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = 16, ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 28, ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = 1, ATA_LOG_NCQ_NON_DATA_SIZE = 64, ATA_CMD_READ_LONG = 34, ATA_CMD_READ_LONG_ONCE = 35, ATA_CMD_WRITE_LONG = 50, ATA_CMD_WRITE_LONG_ONCE = 51, SETFEATURES_XFER = 3, XFER_UDMA_7 = 71, XFER_UDMA_6 = 70, XFER_UDMA_5 = 69, XFER_UDMA_4 = 68, XFER_UDMA_3 = 67, XFER_UDMA_2 = 66, XFER_UDMA_1 = 65, XFER_UDMA_0 = 64, XFER_MW_DMA_4 = 36, XFER_MW_DMA_3 = 35, XFER_MW_DMA_2 = 34, XFER_MW_DMA_1 = 33, XFER_MW_DMA_0 = 32, XFER_SW_DMA_2 = 18, XFER_SW_DMA_1 = 17, XFER_SW_DMA_0 = 16, XFER_PIO_6 = 14, XFER_PIO_5 = 13, XFER_PIO_4 = 12, XFER_PIO_3 = 11, XFER_PIO_2 = 10, XFER_PIO_1 = 9, XFER_PIO_0 = 8, XFER_PIO_SLOW = 0, SETFEATURES_WC_ON = 2, SETFEATURES_WC_OFF = 130, SETFEATURES_RA_ON = 170, SETFEATURES_RA_OFF = 85, SETFEATURES_AAM_ON = 66, SETFEATURES_AAM_OFF = 194, SETFEATURES_SPINUP = 7, SETFEATURES_SPINUP_TIMEOUT = 30000, SETFEATURES_SATA_ENABLE = 16, SETFEATURES_SATA_DISABLE = 144, SETFEATURES_CDL = 13, SATA_FPDMA_OFFSET = 1, SATA_FPDMA_AA = 2, SATA_DIPM = 3, SATA_FPDMA_IN_ORDER = 4, SATA_AN = 5, SATA_SSP = 6, SATA_DEVSLP = 9, SETFEATURE_SENSE_DATA = 195, SETFEATURE_SENSE_DATA_SUCC_NCQ = 196, ATA_SET_MAX_ADDR = 0, ATA_SET_MAX_PASSWD = 1, ATA_SET_MAX_LOCK = 2, ATA_SET_MAX_UNLOCK = 3, ATA_SET_MAX_FREEZE_LOCK = 4, ATA_SET_MAX_PASSWD_DMA = 5, ATA_SET_MAX_UNLOCK_DMA = 6, ATA_DCO_RESTORE = 192, ATA_DCO_FREEZE_LOCK = 193, ATA_DCO_IDENTIFY = 194, ATA_DCO_SET = 195, ATA_SMART_ENABLE = 216, ATA_SMART_READ_VALUES = 208, ATA_SMART_READ_THRESHOLDS = 209, ATA_DSM_TRIM = 1, ATA_SMART_LBAM_PASS = 79, ATA_SMART_LBAH_PASS = 194, ATAPI_PKT_DMA = 1, ATAPI_DMADIR = 4, ATAPI_CDB_LEN = 16, SATA_PMP_MAX_PORTS = 15, SATA_PMP_CTRL_PORT = 15, SATA_PMP_GSCR_DWORDS = 128, SATA_PMP_GSCR_PROD_ID = 0, SATA_PMP_GSCR_REV = 1, SATA_PMP_GSCR_PORT_INFO = 2, SATA_PMP_GSCR_ERROR = 32, SATA_PMP_GSCR_ERROR_EN = 33, SATA_PMP_GSCR_FEAT = 64, SATA_PMP_GSCR_FEAT_EN = 96, SATA_PMP_PSCR_STATUS = 0, SATA_PMP_PSCR_ERROR = 1, SATA_PMP_PSCR_CONTROL = 2, SATA_PMP_FEAT_BIST = 1, SATA_PMP_FEAT_PMREQ = 2, SATA_PMP_FEAT_DYNSSC = 4, SATA_PMP_FEAT_NOTIFY = 8, ATA_CBL_NONE = 0, ATA_CBL_PATA40 = 1, ATA_CBL_PATA80 = 2, ATA_CBL_PATA40_SHORT = 3, ATA_CBL_PATA_UNK = 4, ATA_CBL_PATA_IGN = 5, ATA_CBL_SATA = 6, SCR_STATUS = 0, SCR_ERROR = 1, SCR_CONTROL = 2, SCR_ACTIVE = 3, SCR_NOTIFICATION = 4, SERR_DATA_RECOVERED = 1, SERR_COMM_RECOVERED = 2, SERR_DATA = 256, SERR_PERSISTENT = 512, SERR_PROTOCOL = 1024, SERR_INTERNAL = 2048, SERR_PHYRDY_CHG = 65536, SERR_PHY_INT_ERR = 131072, SERR_COMM_WAKE = 262144, SERR_10B_8B_ERR = 524288, SERR_DISPARITY = 1048576, SERR_CRC = 2097152, SERR_HANDSHAKE = 4194304, SERR_LINK_SEQ_ERR = 8388608, SERR_TRANS_ST_ERROR = 16777216, SERR_UNRECOG_FIS = 33554432, SERR_DEV_XCHG = 67108864, }; enum { AT_PKT_END = -1, BEYOND_PKT_END = -2, }; enum { AX25_VALUES_IPDEFMODE = 0, AX25_VALUES_AXDEFMODE = 1, AX25_VALUES_BACKOFF = 2, AX25_VALUES_CONMODE = 3, AX25_VALUES_WINDOW = 4, AX25_VALUES_EWINDOW = 5, AX25_VALUES_T1 = 6, AX25_VALUES_T2 = 7, AX25_VALUES_T3 = 8, AX25_VALUES_IDLE = 9, AX25_VALUES_N2 = 10, AX25_VALUES_PACLEN = 11, AX25_VALUES_PROTOCOL = 12, AX25_MAX_VALUES = 13, }; enum { Audit_equal = 0, Audit_not_equal = 1, Audit_bitmask = 2, Audit_bittest = 3, Audit_lt = 4, Audit_gt = 5, Audit_le = 6, Audit_ge = 7, Audit_bad = 8, }; enum { BDX_PCI_UNCORE_HA = 0, BDX_PCI_UNCORE_IMC = 1, BDX_PCI_UNCORE_IRP = 2, BDX_PCI_UNCORE_QPI = 3, BDX_PCI_UNCORE_R2PCIE = 4, BDX_PCI_UNCORE_R3QPI = 5, }; enum { BIAS = 2147483648, }; enum { BIOSET_NEED_BVECS = 1, BIOSET_NEED_RESCUER = 2, BIOSET_PERCPU_CACHE = 4, }; enum { BIO_PAGE_PINNED = 0, BIO_CLONED = 1, BIO_BOUNCED = 2, BIO_QUIET = 3, BIO_CHAIN = 4, BIO_REFFED = 5, BIO_BPS_THROTTLED = 6, BIO_TRACE_COMPLETION = 7, BIO_CGROUP_ACCT = 8, BIO_QOS_THROTTLED = 9, BIO_QOS_MERGED = 10, BIO_REMAPPED = 11, BIO_ZONE_WRITE_PLUGGING = 12, BIO_EMULATES_ZONE_APPEND = 13, BIO_FLAG_LAST = 14, }; enum { BLK_MQ_F_SHOULD_MERGE = 1, BLK_MQ_F_TAG_QUEUE_SHARED = 2, BLK_MQ_F_STACKING = 4, BLK_MQ_F_TAG_HCTX_SHARED = 8, BLK_MQ_F_BLOCKING = 16, BLK_MQ_F_NO_SCHED = 32, BLK_MQ_F_NO_SCHED_BY_DEFAULT = 64, BLK_MQ_F_ALLOC_POLICY_START_BIT = 7, BLK_MQ_F_ALLOC_POLICY_BITS = 1, }; enum { BLK_MQ_NO_TAG = 4294967295, BLK_MQ_TAG_MIN = 1, BLK_MQ_TAG_MAX = 4294967294, }; enum { BLK_MQ_REQ_NOWAIT = 1, BLK_MQ_REQ_RESERVED = 2, BLK_MQ_REQ_PM = 4, }; enum { BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_SCHED_RESTART = 2, BLK_MQ_S_INACTIVE = 3, BLK_MQ_S_MAX = 4, }; enum { BLK_MQ_UNIQUE_TAG_BITS = 16, BLK_MQ_UNIQUE_TAG_MASK = 65535, }; enum { BLK_TAG_ALLOC_FIFO = 0, BLK_TAG_ALLOC_RR = 1, BLK_TAG_ALLOC_MAX = 2, }; enum { BLOCK_BITMAP = 0, INODE_BITMAP = 1, INODE_TABLE = 2, GROUP_TABLE_COUNT = 3, }; enum { BOND_3AD_STAT_LACPDU_RX = 0, BOND_3AD_STAT_LACPDU_TX = 1, BOND_3AD_STAT_LACPDU_UNKNOWN_RX = 2, BOND_3AD_STAT_LACPDU_ILLEGAL_RX = 3, BOND_3AD_STAT_MARKER_RX = 4, BOND_3AD_STAT_MARKER_TX = 5, BOND_3AD_STAT_MARKER_RESP_RX = 6, BOND_3AD_STAT_MARKER_RESP_TX = 7, BOND_3AD_STAT_MARKER_UNKNOWN_RX = 8, BOND_3AD_STAT_PAD = 9, __BOND_3AD_STAT_MAX = 10, }; enum { BOND_AD_STABLE = 0, BOND_AD_BANDWIDTH = 1, BOND_AD_COUNT = 2, }; enum { BOND_OPTFLAG_NOSLAVES = 1, BOND_OPTFLAG_IFDOWN = 2, BOND_OPTFLAG_RAWVAL = 4, }; enum { BOND_OPT_MODE = 0, BOND_OPT_PACKETS_PER_SLAVE = 1, BOND_OPT_XMIT_HASH = 2, BOND_OPT_ARP_VALIDATE = 3, BOND_OPT_ARP_ALL_TARGETS = 4, BOND_OPT_FAIL_OVER_MAC = 5, BOND_OPT_ARP_INTERVAL = 6, BOND_OPT_ARP_TARGETS = 7, BOND_OPT_DOWNDELAY = 8, BOND_OPT_UPDELAY = 9, BOND_OPT_LACP_RATE = 10, BOND_OPT_MINLINKS = 11, BOND_OPT_AD_SELECT = 12, BOND_OPT_NUM_PEER_NOTIF = 13, BOND_OPT_MIIMON = 14, BOND_OPT_PRIMARY = 15, BOND_OPT_PRIMARY_RESELECT = 16, BOND_OPT_USE_CARRIER = 17, BOND_OPT_ACTIVE_SLAVE = 18, BOND_OPT_QUEUE_ID = 19, BOND_OPT_ALL_SLAVES_ACTIVE = 20, BOND_OPT_RESEND_IGMP = 21, BOND_OPT_LP_INTERVAL = 22, BOND_OPT_SLAVES = 23, BOND_OPT_TLB_DYNAMIC_LB = 24, BOND_OPT_AD_ACTOR_SYS_PRIO = 25, BOND_OPT_AD_ACTOR_SYSTEM = 26, BOND_OPT_AD_USER_PORT_KEY = 27, BOND_OPT_NUM_PEER_NOTIF_ALIAS = 28, BOND_OPT_PEER_NOTIF_DELAY = 29, BOND_OPT_LACP_ACTIVE = 30, BOND_OPT_MISSED_MAX = 31, BOND_OPT_NS_TARGETS = 32, BOND_OPT_PRIO = 33, BOND_OPT_COUPLED_CONTROL = 34, BOND_OPT_LAST = 35, }; enum { BOND_VALFLAG_DEFAULT = 1, BOND_VALFLAG_MIN = 2, BOND_VALFLAG_MAX = 4, }; enum { BOND_XSTATS_UNSPEC = 0, BOND_XSTATS_3AD = 1, __BOND_XSTATS_MAX = 2, }; enum { BPF_ADJ_ROOM_ENCAP_L2_MASK = 255, BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, }; enum { BPF_ANY = 0, BPF_NOEXIST = 1, BPF_EXIST = 2, BPF_F_LOCK = 4, }; enum { BPF_CSUM_LEVEL_QUERY = 0, BPF_CSUM_LEVEL_INC = 1, BPF_CSUM_LEVEL_DEC = 2, BPF_CSUM_LEVEL_RESET = 3, }; enum { BPF_FIB_LKUP_RET_SUCCESS = 0, BPF_FIB_LKUP_RET_BLACKHOLE = 1, BPF_FIB_LKUP_RET_UNREACHABLE = 2, BPF_FIB_LKUP_RET_PROHIBIT = 3, BPF_FIB_LKUP_RET_NOT_FWDED = 4, BPF_FIB_LKUP_RET_FWD_DISABLED = 5, BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, BPF_FIB_LKUP_RET_NO_NEIGH = 7, BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9, }; enum { BPF_FIB_LOOKUP_DIRECT = 1, BPF_FIB_LOOKUP_OUTPUT = 2, BPF_FIB_LOOKUP_SKIP_NEIGH = 4, BPF_FIB_LOOKUP_TBID = 8, BPF_FIB_LOOKUP_SRC = 16, BPF_FIB_LOOKUP_MARK = 32, }; enum { BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1, BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2, BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4, }; enum { BPF_F_ADJ_ROOM_FIXED_GSO = 1, BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2, BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4, BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8, BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16, BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32, BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64, BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128, BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256, }; enum { BPF_F_BPRM_SECUREEXEC = 1, }; enum { BPF_F_BROADCAST = 8, BPF_F_EXCLUDE_INGRESS = 16, }; enum { BPF_F_CURRENT_NETNS = -1, }; enum { BPF_F_GET_BRANCH_RECORDS_SIZE = 1, }; enum { BPF_F_HDR_FIELD_MASK = 15, }; enum { BPF_F_INDEX_MASK = 4294967295ULL, BPF_F_CURRENT_CPU = 4294967295ULL, BPF_F_CTXLEN_MASK = 4503595332403200ULL, }; enum { BPF_F_INGRESS = 1, }; enum { BPF_F_KPROBE_MULTI_RETURN = 1, }; enum { BPF_F_NEIGH = 2, BPF_F_PEER = 4, BPF_F_NEXTHOP = 8, }; enum { BPF_F_NO_PREALLOC = 1, BPF_F_NO_COMMON_LRU = 2, BPF_F_NUMA_NODE = 4, BPF_F_RDONLY = 8, BPF_F_WRONLY = 16, BPF_F_STACK_BUILD_ID = 32, BPF_F_ZERO_SEED = 64, BPF_F_RDONLY_PROG = 128, BPF_F_WRONLY_PROG = 256, BPF_F_CLONE = 512, BPF_F_MMAPABLE = 1024, BPF_F_PRESERVE_ELEMS = 2048, BPF_F_INNER_MAP = 4096, BPF_F_LINK = 8192, BPF_F_PATH_FD = 16384, BPF_F_VTYPE_BTF_OBJ_FD = 32768, BPF_F_TOKEN_FD = 65536, BPF_F_SEGV_ON_FAULT = 131072, BPF_F_NO_USER_CONV = 262144, }; enum { BPF_F_PSEUDO_HDR = 16, BPF_F_MARK_MANGLED_0 = 32, BPF_F_MARK_ENFORCE = 64, }; enum { BPF_F_RECOMPUTE_CSUM = 1, BPF_F_INVALIDATE_HASH = 2, }; enum { BPF_F_SKIP_FIELD_MASK = 255, BPF_F_USER_STACK = 256, BPF_F_FAST_STACK_CMP = 512, BPF_F_REUSE_STACKID = 1024, BPF_F_USER_BUILD_ID = 2048, }; enum { BPF_F_SYSCTL_BASE_NAME = 1, }; enum { BPF_F_TIMER_ABS = 1, BPF_F_TIMER_CPU_PIN = 2, }; enum { BPF_F_TUNINFO_FLAGS = 16, }; enum { BPF_F_TUNINFO_IPV6 = 1, }; enum { BPF_F_UPROBE_MULTI_RETURN = 1, }; enum { BPF_F_ZERO_CSUM_TX = 2, BPF_F_DONT_FRAGMENT = 4, BPF_F_SEQ_NUMBER = 8, BPF_F_NO_TUNNEL_KEY = 16, }; enum { BPF_LOAD_HDR_OPT_TCP_SYN = 1, }; enum { BPF_LOCAL_STORAGE_GET_F_CREATE = 1, BPF_SK_STORAGE_GET_F_CREATE = 1, }; enum { BPF_MAX_LOOPS = 8388608, }; enum { BPF_MAX_TRAMP_LINKS = 38, }; enum { BPF_RB_AVAIL_DATA = 0, BPF_RB_RING_SIZE = 1, BPF_RB_CONS_POS = 2, BPF_RB_PROD_POS = 3, }; enum { BPF_RB_NO_WAKEUP = 1, BPF_RB_FORCE_WAKEUP = 2, }; enum { BPF_REG_0 = 0, BPF_REG_1 = 1, BPF_REG_2 = 2, BPF_REG_3 = 3, BPF_REG_4 = 4, BPF_REG_5 = 5, BPF_REG_6 = 6, BPF_REG_7 = 7, BPF_REG_8 = 8, BPF_REG_9 = 9, BPF_REG_10 = 10, __MAX_BPF_REG = 11, }; enum { BPF_RINGBUF_BUSY_BIT = 2147483648, BPF_RINGBUF_DISCARD_BIT = 1073741824, BPF_RINGBUF_HDR_SZ = 8, }; enum { BPF_SKB_TSTAMP_UNSPEC = 0, BPF_SKB_TSTAMP_DELIVERY_MONO = 1, BPF_SKB_CLOCK_REALTIME = 0, BPF_SKB_CLOCK_MONOTONIC = 1, BPF_SKB_CLOCK_TAI = 2, }; enum { BPF_SKEL_KERNEL = 1, }; enum { BPF_SK_LOOKUP_F_REPLACE = 1, BPF_SK_LOOKUP_F_NO_REUSEPORT = 2, }; enum { BPF_SOCK_OPS_RTO_CB_FLAG = 1, BPF_SOCK_OPS_RETRANS_CB_FLAG = 2, BPF_SOCK_OPS_STATE_CB_FLAG = 4, BPF_SOCK_OPS_RTT_CB_FLAG = 8, BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16, BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32, BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64, BPF_SOCK_OPS_ALL_CB_FLAGS = 127, }; enum { BPF_SOCK_OPS_VOID = 0, BPF_SOCK_OPS_TIMEOUT_INIT = 1, BPF_SOCK_OPS_RWND_INIT = 2, BPF_SOCK_OPS_TCP_CONNECT_CB = 3, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, BPF_SOCK_OPS_NEEDS_ECN = 6, BPF_SOCK_OPS_BASE_RTT = 7, BPF_SOCK_OPS_RTO_CB = 8, BPF_SOCK_OPS_RETRANS_CB = 9, BPF_SOCK_OPS_STATE_CB = 10, BPF_SOCK_OPS_TCP_LISTEN_CB = 11, BPF_SOCK_OPS_RTT_CB = 12, BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13, BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14, BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15, }; enum { BPF_TASK_ITER_ALL_PROCS = 0, BPF_TASK_ITER_ALL_THREADS = 1, BPF_TASK_ITER_PROC_THREADS = 2, }; enum { BPF_TCP_ESTABLISHED = 1, BPF_TCP_SYN_SENT = 2, BPF_TCP_SYN_RECV = 3, BPF_TCP_FIN_WAIT1 = 4, BPF_TCP_FIN_WAIT2 = 5, BPF_TCP_TIME_WAIT = 6, BPF_TCP_CLOSE = 7, BPF_TCP_CLOSE_WAIT = 8, BPF_TCP_LAST_ACK = 9, BPF_TCP_LISTEN = 10, BPF_TCP_CLOSING = 11, BPF_TCP_NEW_SYN_RECV = 12, BPF_TCP_BOUND_INACTIVE = 13, BPF_TCP_MAX_STATES = 14, }; enum { BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, }; enum { BPF_XFRM_STATE_OPTS_SZ = 36, }; enum { BR_MCAST_DIR_RX = 0, BR_MCAST_DIR_TX = 1, BR_MCAST_DIR_SIZE = 2, }; enum { BTF_FIELDS_MAX = 11, }; enum { BTF_FIELD_IGNORE = 0, BTF_FIELD_FOUND = 1, }; enum { BTF_F_COMPACT = 1, BTF_F_NONAME = 2, BTF_F_PTR_RAW = 4, BTF_F_ZERO = 8, }; enum { BTF_KFUNC_SET_MAX_CNT = 256, BTF_DTOR_KFUNC_MAX_CNT = 256, BTF_KFUNC_FILTER_MAX_CNT = 16, }; enum { BTF_KIND_UNKN = 0, BTF_KIND_INT = 1, BTF_KIND_PTR = 2, BTF_KIND_ARRAY = 3, BTF_KIND_STRUCT = 4, BTF_KIND_UNION = 5, BTF_KIND_ENUM = 6, BTF_KIND_FWD = 7, BTF_KIND_TYPEDEF = 8, BTF_KIND_VOLATILE = 9, BTF_KIND_CONST = 10, BTF_KIND_RESTRICT = 11, BTF_KIND_FUNC = 12, BTF_KIND_FUNC_PROTO = 13, BTF_KIND_VAR = 14, BTF_KIND_DATASEC = 15, BTF_KIND_FLOAT = 16, BTF_KIND_DECL_TAG = 17, BTF_KIND_TYPE_TAG = 18, BTF_KIND_ENUM64 = 19, NR_BTF_KINDS = 20, BTF_KIND_MAX = 19, }; enum { BTF_MODULE_F_LIVE = 1, }; enum { BTF_SOCK_TYPE_INET = 0, BTF_SOCK_TYPE_INET_CONN = 1, BTF_SOCK_TYPE_INET_REQ = 2, BTF_SOCK_TYPE_INET_TW = 3, BTF_SOCK_TYPE_REQ = 4, BTF_SOCK_TYPE_SOCK = 5, BTF_SOCK_TYPE_SOCK_COMMON = 6, BTF_SOCK_TYPE_TCP = 7, BTF_SOCK_TYPE_TCP_REQ = 8, BTF_SOCK_TYPE_TCP_TW = 9, BTF_SOCK_TYPE_TCP6 = 10, BTF_SOCK_TYPE_UDP = 11, BTF_SOCK_TYPE_UDP6 = 12, BTF_SOCK_TYPE_UNIX = 13, BTF_SOCK_TYPE_MPTCP = 14, BTF_SOCK_TYPE_SOCKET = 15, MAX_BTF_SOCK_TYPE = 16, }; enum { BTF_TRACING_TYPE_TASK = 0, BTF_TRACING_TYPE_FILE = 1, BTF_TRACING_TYPE_VMA = 2, MAX_BTF_TRACING_TYPE = 3, }; enum { BTF_VAR_STATIC = 0, BTF_VAR_GLOBAL_ALLOCATED = 1, BTF_VAR_GLOBAL_EXTERN = 2, }; enum { BTS_STATE_STOPPED = 0, BTS_STATE_INACTIVE = 1, BTS_STATE_ACTIVE = 2, }; enum { Blktrace_setup = 1, Blktrace_running = 2, Blktrace_stopped = 3, }; enum { CFTYPE_ONLY_ON_ROOT = 1, CFTYPE_NOT_ON_ROOT = 2, CFTYPE_NS_DELEGATABLE = 4, CFTYPE_NO_PREFIX = 8, CFTYPE_WORLD_WRITABLE = 16, CFTYPE_DEBUG = 32, __CFTYPE_ONLY_ON_DFL = 65536, __CFTYPE_NOT_ON_DFL = 131072, __CFTYPE_ADDED = 262144, }; enum { CGROUPSTATS_CMD_ATTR_UNSPEC = 0, CGROUPSTATS_CMD_ATTR_FD = 1, __CGROUPSTATS_CMD_ATTR_MAX = 2, }; enum { CGROUPSTATS_CMD_UNSPEC = 3, CGROUPSTATS_CMD_GET = 4, CGROUPSTATS_CMD_NEW = 5, __CGROUPSTATS_CMD_MAX = 6, }; enum { CGROUPSTATS_TYPE_UNSPEC = 0, CGROUPSTATS_TYPE_CGROUP_STATS = 1, __CGROUPSTATS_TYPE_MAX = 2, }; enum { CGRP_NOTIFY_ON_RELEASE = 0, CGRP_CPUSET_CLONE_CHILDREN = 1, CGRP_FREEZE = 2, CGRP_FROZEN = 3, CGRP_KILL = 4, }; enum { CGRP_ROOT_NOPREFIX = 2, CGRP_ROOT_XATTR = 4, CGRP_ROOT_NS_DELEGATE = 8, CGRP_ROOT_FAVOR_DYNMODS = 16, CGRP_ROOT_CPUSET_V2_MODE = 65536, CGRP_ROOT_MEMORY_LOCAL_EVENTS = 131072, CGRP_ROOT_MEMORY_RECURSIVE_PROT = 262144, CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = 524288, CGRP_ROOT_PIDS_LOCAL_EVENTS = 1048576, }; enum { CMIS_MODULE_LOW_PWR = 1, CMIS_MODULE_READY = 3, }; enum { CRNG_EMPTY = 0, CRNG_EARLY = 1, CRNG_READY = 2, }; enum { CRNG_RESEED_START_INTERVAL = 1000, CRNG_RESEED_INTERVAL = 60000, }; enum { CRYPTOA_UNSPEC = 0, CRYPTOA_ALG = 1, CRYPTOA_TYPE = 2, __CRYPTOA_MAX = 3, }; enum { CRYPTO_AUTHENC_KEYA_UNSPEC = 0, CRYPTO_AUTHENC_KEYA_PARAM = 1, }; enum { CRYPTO_KPP_SECRET_TYPE_UNKNOWN = 0, CRYPTO_KPP_SECRET_TYPE_DH = 1, CRYPTO_KPP_SECRET_TYPE_ECDH = 2, }; enum { CRYPTO_MSG_ALG_REQUEST = 0, CRYPTO_MSG_ALG_REGISTER = 1, CRYPTO_MSG_ALG_LOADED = 2, }; enum { CSD_FLAG_LOCK = 1, IRQ_WORK_PENDING = 1, IRQ_WORK_BUSY = 2, IRQ_WORK_LAZY = 4, IRQ_WORK_HARD_IRQ = 8, IRQ_WORK_CLAIMED = 3, CSD_TYPE_ASYNC = 0, CSD_TYPE_SYNC = 16, CSD_TYPE_IRQ_WORK = 32, CSD_TYPE_TTWU = 48, CSD_FLAG_TYPE_MASK = 240, }; enum { CSI_DEC_hl_CURSOR_KEYS = 1, CSI_DEC_hl_132_COLUMNS = 3, CSI_DEC_hl_REVERSE_VIDEO = 5, CSI_DEC_hl_ORIGIN_MODE = 6, CSI_DEC_hl_AUTOWRAP = 7, CSI_DEC_hl_AUTOREPEAT = 8, CSI_DEC_hl_MOUSE_X10 = 9, CSI_DEC_hl_SHOW_CURSOR = 25, CSI_DEC_hl_MOUSE_VT200 = 1000, }; enum { CSI_K_CURSOR_TO_LINEEND = 0, CSI_K_LINESTART_TO_CURSOR = 1, CSI_K_LINE = 2, }; enum { CSI_hl_DISPLAY_CTRL = 3, CSI_hl_INSERT = 4, CSI_hl_AUTO_NL = 20, }; enum { CSI_m_DEFAULT = 0, CSI_m_BOLD = 1, CSI_m_HALF_BRIGHT = 2, CSI_m_ITALIC = 3, CSI_m_UNDERLINE = 4, CSI_m_BLINK = 5, CSI_m_REVERSE = 7, CSI_m_PRI_FONT = 10, CSI_m_ALT_FONT1 = 11, CSI_m_ALT_FONT2 = 12, CSI_m_DOUBLE_UNDERLINE = 21, CSI_m_NORMAL_INTENSITY = 22, CSI_m_NO_ITALIC = 23, CSI_m_NO_UNDERLINE = 24, CSI_m_NO_BLINK = 25, CSI_m_NO_REVERSE = 27, CSI_m_FG_COLOR_BEG = 30, CSI_m_FG_COLOR_END = 37, CSI_m_FG_COLOR = 38, CSI_m_DEFAULT_FG_COLOR = 39, CSI_m_BG_COLOR_BEG = 40, CSI_m_BG_COLOR_END = 47, CSI_m_BG_COLOR = 48, CSI_m_DEFAULT_BG_COLOR = 49, CSI_m_BRIGHT_FG_COLOR_BEG = 90, CSI_m_BRIGHT_FG_COLOR_END = 97, CSI_m_BRIGHT_FG_COLOR_OFF = 60, CSI_m_BRIGHT_BG_COLOR_BEG = 100, CSI_m_BRIGHT_BG_COLOR_END = 107, CSI_m_BRIGHT_BG_COLOR_OFF = 60, }; enum { CSS_NO_REF = 1, CSS_ONLINE = 2, CSS_RELEASED = 4, CSS_VISIBLE = 8, CSS_DYING = 16, }; enum { CSS_TASK_ITER_PROCS = 1, CSS_TASK_ITER_THREADED = 2, CSS_TASK_ITER_SKIPPED = 65536, }; enum { CTRL_ATTR_MCAST_GRP_UNSPEC = 0, CTRL_ATTR_MCAST_GRP_NAME = 1, CTRL_ATTR_MCAST_GRP_ID = 2, __CTRL_ATTR_MCAST_GRP_MAX = 3, }; enum { CTRL_ATTR_OP_UNSPEC = 0, CTRL_ATTR_OP_ID = 1, CTRL_ATTR_OP_FLAGS = 2, __CTRL_ATTR_OP_MAX = 3, }; enum { CTRL_ATTR_POLICY_UNSPEC = 0, CTRL_ATTR_POLICY_DO = 1, CTRL_ATTR_POLICY_DUMP = 2, __CTRL_ATTR_POLICY_DUMP_MAX = 3, CTRL_ATTR_POLICY_DUMP_MAX = 2, }; enum { CTRL_ATTR_UNSPEC = 0, CTRL_ATTR_FAMILY_ID = 1, CTRL_ATTR_FAMILY_NAME = 2, CTRL_ATTR_VERSION = 3, CTRL_ATTR_HDRSIZE = 4, CTRL_ATTR_MAXATTR = 5, CTRL_ATTR_OPS = 6, CTRL_ATTR_MCAST_GROUPS = 7, CTRL_ATTR_POLICY = 8, CTRL_ATTR_OP_POLICY = 9, CTRL_ATTR_OP = 10, __CTRL_ATTR_MAX = 11, }; enum { CTRL_CMD_UNSPEC = 0, CTRL_CMD_NEWFAMILY = 1, CTRL_CMD_DELFAMILY = 2, CTRL_CMD_GETFAMILY = 3, CTRL_CMD_NEWOPS = 4, CTRL_CMD_DELOPS = 5, CTRL_CMD_GETOPS = 6, CTRL_CMD_NEWMCAST_GRP = 7, CTRL_CMD_DELMCAST_GRP = 8, CTRL_CMD_GETMCAST_GRP = 9, CTRL_CMD_GETPOLICY = 10, __CTRL_CMD_MAX = 11, }; enum { DAD_PROCESS = 0, DAD_BEGIN = 1, DAD_ABORT = 2, }; enum { DCCPO_PADDING = 0, DCCPO_MANDATORY = 1, DCCPO_MIN_RESERVED = 3, DCCPO_MAX_RESERVED = 31, DCCPO_CHANGE_L = 32, DCCPO_CONFIRM_L = 33, DCCPO_CHANGE_R = 34, DCCPO_CONFIRM_R = 35, DCCPO_NDP_COUNT = 37, DCCPO_ACK_VECTOR_0 = 38, DCCPO_ACK_VECTOR_1 = 39, DCCPO_TIMESTAMP = 41, DCCPO_TIMESTAMP_ECHO = 42, DCCPO_ELAPSED_TIME = 43, DCCPO_MAX = 45, DCCPO_MIN_RX_CCID_SPECIFIC = 128, DCCPO_MAX_RX_CCID_SPECIFIC = 191, DCCPO_MIN_TX_CCID_SPECIFIC = 192, DCCPO_MAX_TX_CCID_SPECIFIC = 255, }; enum { DD_DIR_COUNT = 2, }; enum { DD_PRIO_COUNT = 3, }; enum { DESC_TSS = 9, DESC_LDT = 2, DESCTYPE_S = 16, }; enum { DEVCONF_FORWARDING = 0, DEVCONF_HOPLIMIT = 1, DEVCONF_MTU6 = 2, DEVCONF_ACCEPT_RA = 3, DEVCONF_ACCEPT_REDIRECTS = 4, DEVCONF_AUTOCONF = 5, DEVCONF_DAD_TRANSMITS = 6, DEVCONF_RTR_SOLICITS = 7, DEVCONF_RTR_SOLICIT_INTERVAL = 8, DEVCONF_RTR_SOLICIT_DELAY = 9, DEVCONF_USE_TEMPADDR = 10, DEVCONF_TEMP_VALID_LFT = 11, DEVCONF_TEMP_PREFERED_LFT = 12, DEVCONF_REGEN_MAX_RETRY = 13, DEVCONF_MAX_DESYNC_FACTOR = 14, DEVCONF_MAX_ADDRESSES = 15, DEVCONF_FORCE_MLD_VERSION = 16, DEVCONF_ACCEPT_RA_DEFRTR = 17, DEVCONF_ACCEPT_RA_PINFO = 18, DEVCONF_ACCEPT_RA_RTR_PREF = 19, DEVCONF_RTR_PROBE_INTERVAL = 20, DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, DEVCONF_PROXY_NDP = 22, DEVCONF_OPTIMISTIC_DAD = 23, DEVCONF_ACCEPT_SOURCE_ROUTE = 24, DEVCONF_MC_FORWARDING = 25, DEVCONF_DISABLE_IPV6 = 26, DEVCONF_ACCEPT_DAD = 27, DEVCONF_FORCE_TLLAO = 28, DEVCONF_NDISC_NOTIFY = 29, DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, DEVCONF_SUPPRESS_FRAG_NDISC = 32, DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, DEVCONF_USE_OPTIMISTIC = 34, DEVCONF_ACCEPT_RA_MTU = 35, DEVCONF_STABLE_SECRET = 36, DEVCONF_USE_OIF_ADDRS_ONLY = 37, DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, DEVCONF_DROP_UNSOLICITED_NA = 41, DEVCONF_KEEP_ADDR_ON_DOWN = 42, DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, DEVCONF_SEG6_ENABLED = 44, DEVCONF_SEG6_REQUIRE_HMAC = 45, DEVCONF_ENHANCED_DAD = 46, DEVCONF_ADDR_GEN_MODE = 47, DEVCONF_DISABLE_POLICY = 48, DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, DEVCONF_NDISC_TCLASS = 50, DEVCONF_RPL_SEG_ENABLED = 51, DEVCONF_RA_DEFRTR_METRIC = 52, DEVCONF_IOAM6_ENABLED = 53, DEVCONF_IOAM6_ID = 54, DEVCONF_IOAM6_ID_WIDE = 55, DEVCONF_NDISC_EVICT_NOCARRIER = 56, DEVCONF_ACCEPT_UNTRACKED_NA = 57, DEVCONF_ACCEPT_RA_MIN_LFT = 58, DEVCONF_MAX = 59, }; enum { DEVLINK_ATTR_STATS_RX_PACKETS = 0, DEVLINK_ATTR_STATS_RX_BYTES = 1, DEVLINK_ATTR_STATS_RX_DROPPED = 2, __DEVLINK_ATTR_STATS_MAX = 3, DEVLINK_ATTR_STATS_MAX = 2, }; enum { DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT = 0, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE = 1, }; enum { DIO_SHOULD_DIRTY = 1, DIO_IS_SYNC = 2, }; enum { DIR_OFFSET_MIN = 2, }; enum { DISCOVERED = 16, EXPLORED = 32, FALLTHROUGH = 1, BRANCH = 2, }; enum { DISK_EVENT_FLAG_POLL = 1, DISK_EVENT_FLAG_UEVENT = 2, DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 4, }; enum { DISK_EVENT_MEDIA_CHANGE = 1, DISK_EVENT_EJECT_REQUEST = 2, }; enum { DMA_DSCR_HOST = 0, DMA_DSCR_DEVICE = 1, DMA_DSCR_CTRL = 2, DMA_DSCR_NUM = 3, }; enum { DONE_EXPLORING = 0, KEEP_EXPLORING = 1, }; enum { DQF_ROOT_SQUASH_B = 0, DQF_SYS_FILE_B = 16, DQF_PRIVATE = 17, }; enum { DQST_LOOKUPS = 0, DQST_DROPS = 1, DQST_READS = 2, DQST_WRITES = 3, DQST_CACHE_HITS = 4, DQST_ALLOC_DQUOTS = 5, DQST_FREE_DQUOTS = 6, DQST_SYNCS = 7, _DQST_DQSTAT_LAST = 8, }; enum { DUMP_PREFIX_NONE = 0, DUMP_PREFIX_ADDRESS = 1, DUMP_PREFIX_OFFSET = 2, }; enum { D_SHIFT_LEFT = 0, D_SHIFT_RIGHT = 1, }; enum { EC_FLAGS_QUERY_ENABLED = 0, EC_FLAGS_EVENT_HANDLER_INSTALLED = 1, EC_FLAGS_EC_HANDLER_INSTALLED = 2, EC_FLAGS_EC_REG_CALLED = 3, EC_FLAGS_QUERY_METHODS_INSTALLED = 4, EC_FLAGS_STARTED = 5, EC_FLAGS_STOPPED = 6, EC_FLAGS_EVENTS_MASKED = 7, }; enum { EI_ETYPE_NULL = 0, EI_ETYPE_ERRNO = 1, EI_ETYPE_ERRNO_NULL = 2, EI_ETYPE_TRUE = 3, }; enum { EMULATE = 0, XONLY = 1, NONE = 2, }; enum { EPecma = 0, EPdec = 1, EPeq = 2, EPgt = 3, EPlt = 4, }; enum { ERASE = 0, WERASE = 1, KILL = 2, }; enum { ES_WRITTEN_B = 0, ES_UNWRITTEN_B = 1, ES_DELAYED_B = 2, ES_HOLE_B = 3, ES_REFERENCED_B = 4, ES_FLAGS = 5, }; enum { ETHTOOL_A_BITSET_BITS_UNSPEC = 0, ETHTOOL_A_BITSET_BITS_BIT = 1, __ETHTOOL_A_BITSET_BITS_CNT = 2, ETHTOOL_A_BITSET_BITS_MAX = 1, }; enum { ETHTOOL_A_BITSET_BIT_UNSPEC = 0, ETHTOOL_A_BITSET_BIT_INDEX = 1, ETHTOOL_A_BITSET_BIT_NAME = 2, ETHTOOL_A_BITSET_BIT_VALUE = 3, __ETHTOOL_A_BITSET_BIT_CNT = 4, ETHTOOL_A_BITSET_BIT_MAX = 3, }; enum { ETHTOOL_A_BITSET_UNSPEC = 0, ETHTOOL_A_BITSET_NOMASK = 1, ETHTOOL_A_BITSET_SIZE = 2, ETHTOOL_A_BITSET_BITS = 3, ETHTOOL_A_BITSET_VALUE = 4, ETHTOOL_A_BITSET_MASK = 5, __ETHTOOL_A_BITSET_CNT = 6, ETHTOOL_A_BITSET_MAX = 5, }; enum { ETHTOOL_A_C33_PSE_PW_LIMIT_UNSPEC = 0, ETHTOOL_A_C33_PSE_PW_LIMIT_MIN = 1, ETHTOOL_A_C33_PSE_PW_LIMIT_MAX = 2, }; enum { ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0, ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1, ETHTOOL_A_CABLE_AMPLITUDE_mV = 2, __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3, ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2, }; enum { ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0, ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1, ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2, ETHTOOL_A_CABLE_FAULT_LENGTH_SRC = 3, __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 4, ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 3, }; enum { ETHTOOL_A_CABLE_INF_SRC_UNSPEC = 0, ETHTOOL_A_CABLE_INF_SRC_TDR = 1, ETHTOOL_A_CABLE_INF_SRC_ALCD = 2, }; enum { ETHTOOL_A_CABLE_NEST_UNSPEC = 0, ETHTOOL_A_CABLE_NEST_RESULT = 1, ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2, __ETHTOOL_A_CABLE_NEST_CNT = 3, ETHTOOL_A_CABLE_NEST_MAX = 2, }; enum { ETHTOOL_A_CABLE_PAIR_A = 0, ETHTOOL_A_CABLE_PAIR_B = 1, ETHTOOL_A_CABLE_PAIR_C = 2, ETHTOOL_A_CABLE_PAIR_D = 3, }; enum { ETHTOOL_A_CABLE_PULSE_UNSPEC = 0, ETHTOOL_A_CABLE_PULSE_mV = 1, __ETHTOOL_A_CABLE_PULSE_CNT = 2, ETHTOOL_A_CABLE_PULSE_MAX = 1, }; enum { ETHTOOL_A_CABLE_RESULT_UNSPEC = 0, ETHTOOL_A_CABLE_RESULT_PAIR = 1, ETHTOOL_A_CABLE_RESULT_CODE = 2, ETHTOOL_A_CABLE_RESULT_SRC = 3, __ETHTOOL_A_CABLE_RESULT_CNT = 4, ETHTOOL_A_CABLE_RESULT_MAX = 3, }; enum { ETHTOOL_A_CABLE_STEP_UNSPEC = 0, ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1, ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2, ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3, __ETHTOOL_A_CABLE_STEP_CNT = 4, ETHTOOL_A_CABLE_STEP_MAX = 3, }; enum { ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0, ETHTOOL_A_CABLE_TDR_NEST_STEP = 1, ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2, ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3, __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4, ETHTOOL_A_CABLE_TDR_NEST_MAX = 3, }; enum { ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1, ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2, }; enum { ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1, ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2, ETHTOOL_A_CABLE_TEST_NTF_NEST = 3, __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4, ETHTOOL_A_CABLE_TEST_NTF_MAX = 3, }; enum { ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1, ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2, ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3, ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4, __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5, ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4, }; enum { ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1, ETHTOOL_A_CABLE_TEST_TDR_CFG = 2, __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3, ETHTOOL_A_CABLE_TEST_TDR_MAX = 2, }; enum { ETHTOOL_A_CABLE_TEST_UNSPEC = 0, ETHTOOL_A_CABLE_TEST_HEADER = 1, __ETHTOOL_A_CABLE_TEST_CNT = 2, ETHTOOL_A_CABLE_TEST_MAX = 1, }; enum { ETHTOOL_A_CHANNELS_UNSPEC = 0, ETHTOOL_A_CHANNELS_HEADER = 1, ETHTOOL_A_CHANNELS_RX_MAX = 2, ETHTOOL_A_CHANNELS_TX_MAX = 3, ETHTOOL_A_CHANNELS_OTHER_MAX = 4, ETHTOOL_A_CHANNELS_COMBINED_MAX = 5, ETHTOOL_A_CHANNELS_RX_COUNT = 6, ETHTOOL_A_CHANNELS_TX_COUNT = 7, ETHTOOL_A_CHANNELS_OTHER_COUNT = 8, ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9, __ETHTOOL_A_CHANNELS_CNT = 10, ETHTOOL_A_CHANNELS_MAX = 9, }; enum { ETHTOOL_A_COALESCE_UNSPEC = 0, ETHTOOL_A_COALESCE_HEADER = 1, ETHTOOL_A_COALESCE_RX_USECS = 2, ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3, ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5, ETHTOOL_A_COALESCE_TX_USECS = 6, ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7, ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9, ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10, ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11, ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12, ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13, ETHTOOL_A_COALESCE_RX_USECS_LOW = 14, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15, ETHTOOL_A_COALESCE_TX_USECS_LOW = 16, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17, ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18, ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20, ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22, ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23, ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 24, ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 25, ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES = 26, ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES = 27, ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS = 28, ETHTOOL_A_COALESCE_RX_PROFILE = 29, ETHTOOL_A_COALESCE_TX_PROFILE = 30, __ETHTOOL_A_COALESCE_CNT = 31, ETHTOOL_A_COALESCE_MAX = 30, }; enum { ETHTOOL_A_DEBUG_UNSPEC = 0, ETHTOOL_A_DEBUG_HEADER = 1, ETHTOOL_A_DEBUG_MSGMASK = 2, __ETHTOOL_A_DEBUG_CNT = 3, ETHTOOL_A_DEBUG_MAX = 2, }; enum { ETHTOOL_A_EEE_UNSPEC = 0, ETHTOOL_A_EEE_HEADER = 1, ETHTOOL_A_EEE_MODES_OURS = 2, ETHTOOL_A_EEE_MODES_PEER = 3, ETHTOOL_A_EEE_ACTIVE = 4, ETHTOOL_A_EEE_ENABLED = 5, ETHTOOL_A_EEE_TX_LPI_ENABLED = 6, ETHTOOL_A_EEE_TX_LPI_TIMER = 7, __ETHTOOL_A_EEE_CNT = 8, ETHTOOL_A_EEE_MAX = 7, }; enum { ETHTOOL_A_FEATURES_UNSPEC = 0, ETHTOOL_A_FEATURES_HEADER = 1, ETHTOOL_A_FEATURES_HW = 2, ETHTOOL_A_FEATURES_WANTED = 3, ETHTOOL_A_FEATURES_ACTIVE = 4, ETHTOOL_A_FEATURES_NOCHANGE = 5, __ETHTOOL_A_FEATURES_CNT = 6, ETHTOOL_A_FEATURES_MAX = 5, }; enum { ETHTOOL_A_FEC_STAT_UNSPEC = 0, ETHTOOL_A_FEC_STAT_PAD = 1, ETHTOOL_A_FEC_STAT_CORRECTED = 2, ETHTOOL_A_FEC_STAT_UNCORR = 3, ETHTOOL_A_FEC_STAT_CORR_BITS = 4, __ETHTOOL_A_FEC_STAT_CNT = 5, ETHTOOL_A_FEC_STAT_MAX = 4, }; enum { ETHTOOL_A_FEC_UNSPEC = 0, ETHTOOL_A_FEC_HEADER = 1, ETHTOOL_A_FEC_MODES = 2, ETHTOOL_A_FEC_AUTO = 3, ETHTOOL_A_FEC_ACTIVE = 4, ETHTOOL_A_FEC_STATS = 5, __ETHTOOL_A_FEC_CNT = 6, ETHTOOL_A_FEC_MAX = 5, }; enum { ETHTOOL_A_HEADER_UNSPEC = 0, ETHTOOL_A_HEADER_DEV_INDEX = 1, ETHTOOL_A_HEADER_DEV_NAME = 2, ETHTOOL_A_HEADER_FLAGS = 3, ETHTOOL_A_HEADER_PHY_INDEX = 4, __ETHTOOL_A_HEADER_CNT = 5, ETHTOOL_A_HEADER_MAX = 4, }; enum { ETHTOOL_A_IRQ_MODERATION_UNSPEC = 0, ETHTOOL_A_IRQ_MODERATION_USEC = 1, ETHTOOL_A_IRQ_MODERATION_PKTS = 2, ETHTOOL_A_IRQ_MODERATION_COMPS = 3, __ETHTOOL_A_IRQ_MODERATION_CNT = 4, ETHTOOL_A_IRQ_MODERATION_MAX = 3, }; enum { ETHTOOL_A_LINKINFO_UNSPEC = 0, ETHTOOL_A_LINKINFO_HEADER = 1, ETHTOOL_A_LINKINFO_PORT = 2, ETHTOOL_A_LINKINFO_PHYADDR = 3, ETHTOOL_A_LINKINFO_TP_MDIX = 4, ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5, ETHTOOL_A_LINKINFO_TRANSCEIVER = 6, __ETHTOOL_A_LINKINFO_CNT = 7, ETHTOOL_A_LINKINFO_MAX = 6, }; enum { ETHTOOL_A_LINKMODES_UNSPEC = 0, ETHTOOL_A_LINKMODES_HEADER = 1, ETHTOOL_A_LINKMODES_AUTONEG = 2, ETHTOOL_A_LINKMODES_OURS = 3, ETHTOOL_A_LINKMODES_PEER = 4, ETHTOOL_A_LINKMODES_SPEED = 5, ETHTOOL_A_LINKMODES_DUPLEX = 6, ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7, ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8, ETHTOOL_A_LINKMODES_LANES = 9, ETHTOOL_A_LINKMODES_RATE_MATCHING = 10, __ETHTOOL_A_LINKMODES_CNT = 11, ETHTOOL_A_LINKMODES_MAX = 10, }; enum { ETHTOOL_A_LINKSTATE_UNSPEC = 0, ETHTOOL_A_LINKSTATE_HEADER = 1, ETHTOOL_A_LINKSTATE_LINK = 2, ETHTOOL_A_LINKSTATE_SQI = 3, ETHTOOL_A_LINKSTATE_SQI_MAX = 4, ETHTOOL_A_LINKSTATE_EXT_STATE = 5, ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 6, ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 7, __ETHTOOL_A_LINKSTATE_CNT = 8, ETHTOOL_A_LINKSTATE_MAX = 7, }; enum { ETHTOOL_A_MM_STAT_UNSPEC = 0, ETHTOOL_A_MM_STAT_PAD = 1, ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS = 2, ETHTOOL_A_MM_STAT_SMD_ERRORS = 3, ETHTOOL_A_MM_STAT_REASSEMBLY_OK = 4, ETHTOOL_A_MM_STAT_RX_FRAG_COUNT = 5, ETHTOOL_A_MM_STAT_TX_FRAG_COUNT = 6, ETHTOOL_A_MM_STAT_HOLD_COUNT = 7, __ETHTOOL_A_MM_STAT_CNT = 8, ETHTOOL_A_MM_STAT_MAX = 7, }; enum { ETHTOOL_A_MM_UNSPEC = 0, ETHTOOL_A_MM_HEADER = 1, ETHTOOL_A_MM_PMAC_ENABLED = 2, ETHTOOL_A_MM_TX_ENABLED = 3, ETHTOOL_A_MM_TX_ACTIVE = 4, ETHTOOL_A_MM_TX_MIN_FRAG_SIZE = 5, ETHTOOL_A_MM_RX_MIN_FRAG_SIZE = 6, ETHTOOL_A_MM_VERIFY_ENABLED = 7, ETHTOOL_A_MM_VERIFY_STATUS = 8, ETHTOOL_A_MM_VERIFY_TIME = 9, ETHTOOL_A_MM_MAX_VERIFY_TIME = 10, ETHTOOL_A_MM_STATS = 11, __ETHTOOL_A_MM_CNT = 12, ETHTOOL_A_MM_MAX = 11, }; enum { ETHTOOL_A_MODULE_EEPROM_UNSPEC = 0, ETHTOOL_A_MODULE_EEPROM_HEADER = 1, ETHTOOL_A_MODULE_EEPROM_OFFSET = 2, ETHTOOL_A_MODULE_EEPROM_LENGTH = 3, ETHTOOL_A_MODULE_EEPROM_PAGE = 4, ETHTOOL_A_MODULE_EEPROM_BANK = 5, ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS = 6, ETHTOOL_A_MODULE_EEPROM_DATA = 7, __ETHTOOL_A_MODULE_EEPROM_CNT = 8, ETHTOOL_A_MODULE_EEPROM_MAX = 7, }; enum { ETHTOOL_A_MODULE_FW_FLASH_UNSPEC = 0, ETHTOOL_A_MODULE_FW_FLASH_HEADER = 1, ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME = 2, ETHTOOL_A_MODULE_FW_FLASH_PASSWORD = 3, ETHTOOL_A_MODULE_FW_FLASH_STATUS = 4, ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG = 5, ETHTOOL_A_MODULE_FW_FLASH_DONE = 6, ETHTOOL_A_MODULE_FW_FLASH_TOTAL = 7, __ETHTOOL_A_MODULE_FW_FLASH_CNT = 8, ETHTOOL_A_MODULE_FW_FLASH_MAX = 7, }; enum { ETHTOOL_A_MODULE_UNSPEC = 0, ETHTOOL_A_MODULE_HEADER = 1, ETHTOOL_A_MODULE_POWER_MODE_POLICY = 2, ETHTOOL_A_MODULE_POWER_MODE = 3, __ETHTOOL_A_MODULE_CNT = 4, ETHTOOL_A_MODULE_MAX = 3, }; enum { ETHTOOL_A_PAUSE_STAT_UNSPEC = 0, ETHTOOL_A_PAUSE_STAT_PAD = 1, ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 2, ETHTOOL_A_PAUSE_STAT_RX_FRAMES = 3, __ETHTOOL_A_PAUSE_STAT_CNT = 4, ETHTOOL_A_PAUSE_STAT_MAX = 3, }; enum { ETHTOOL_A_PAUSE_UNSPEC = 0, ETHTOOL_A_PAUSE_HEADER = 1, ETHTOOL_A_PAUSE_AUTONEG = 2, ETHTOOL_A_PAUSE_RX = 3, ETHTOOL_A_PAUSE_TX = 4, ETHTOOL_A_PAUSE_STATS = 5, ETHTOOL_A_PAUSE_STATS_SRC = 6, __ETHTOOL_A_PAUSE_CNT = 7, ETHTOOL_A_PAUSE_MAX = 6, }; enum { ETHTOOL_A_PHC_VCLOCKS_UNSPEC = 0, ETHTOOL_A_PHC_VCLOCKS_HEADER = 1, ETHTOOL_A_PHC_VCLOCKS_NUM = 2, ETHTOOL_A_PHC_VCLOCKS_INDEX = 3, __ETHTOOL_A_PHC_VCLOCKS_CNT = 4, ETHTOOL_A_PHC_VCLOCKS_MAX = 3, }; enum { ETHTOOL_A_PHY_UNSPEC = 0, ETHTOOL_A_PHY_HEADER = 1, ETHTOOL_A_PHY_INDEX = 2, ETHTOOL_A_PHY_DRVNAME = 3, ETHTOOL_A_PHY_NAME = 4, ETHTOOL_A_PHY_UPSTREAM_TYPE = 5, ETHTOOL_A_PHY_UPSTREAM_INDEX = 6, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME = 7, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME = 8, __ETHTOOL_A_PHY_CNT = 9, ETHTOOL_A_PHY_MAX = 8, }; enum { ETHTOOL_A_PLCA_UNSPEC = 0, ETHTOOL_A_PLCA_HEADER = 1, ETHTOOL_A_PLCA_VERSION = 2, ETHTOOL_A_PLCA_ENABLED = 3, ETHTOOL_A_PLCA_STATUS = 4, ETHTOOL_A_PLCA_NODE_CNT = 5, ETHTOOL_A_PLCA_NODE_ID = 6, ETHTOOL_A_PLCA_TO_TMR = 7, ETHTOOL_A_PLCA_BURST_CNT = 8, ETHTOOL_A_PLCA_BURST_TMR = 9, __ETHTOOL_A_PLCA_CNT = 10, ETHTOOL_A_PLCA_MAX = 9, }; enum { ETHTOOL_A_PRIVFLAGS_UNSPEC = 0, ETHTOOL_A_PRIVFLAGS_HEADER = 1, ETHTOOL_A_PRIVFLAGS_FLAGS = 2, __ETHTOOL_A_PRIVFLAGS_CNT = 3, ETHTOOL_A_PRIVFLAGS_MAX = 2, }; enum { ETHTOOL_A_PROFILE_UNSPEC = 0, ETHTOOL_A_PROFILE_IRQ_MODERATION = 1, __ETHTOOL_A_PROFILE_CNT = 2, ETHTOOL_A_PROFILE_MAX = 1, }; enum { ETHTOOL_A_PSE_UNSPEC = 0, ETHTOOL_A_PSE_HEADER = 1, ETHTOOL_A_PODL_PSE_ADMIN_STATE = 2, ETHTOOL_A_PODL_PSE_ADMIN_CONTROL = 3, ETHTOOL_A_PODL_PSE_PW_D_STATUS = 4, ETHTOOL_A_C33_PSE_ADMIN_STATE = 5, ETHTOOL_A_C33_PSE_ADMIN_CONTROL = 6, ETHTOOL_A_C33_PSE_PW_D_STATUS = 7, ETHTOOL_A_C33_PSE_PW_CLASS = 8, ETHTOOL_A_C33_PSE_ACTUAL_PW = 9, ETHTOOL_A_C33_PSE_EXT_STATE = 10, ETHTOOL_A_C33_PSE_EXT_SUBSTATE = 11, ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT = 12, ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES = 13, __ETHTOOL_A_PSE_CNT = 14, ETHTOOL_A_PSE_MAX = 13, }; enum { ETHTOOL_A_RINGS_UNSPEC = 0, ETHTOOL_A_RINGS_HEADER = 1, ETHTOOL_A_RINGS_RX_MAX = 2, ETHTOOL_A_RINGS_RX_MINI_MAX = 3, ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4, ETHTOOL_A_RINGS_TX_MAX = 5, ETHTOOL_A_RINGS_RX = 6, ETHTOOL_A_RINGS_RX_MINI = 7, ETHTOOL_A_RINGS_RX_JUMBO = 8, ETHTOOL_A_RINGS_TX = 9, ETHTOOL_A_RINGS_RX_BUF_LEN = 10, ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 11, ETHTOOL_A_RINGS_CQE_SIZE = 12, ETHTOOL_A_RINGS_TX_PUSH = 13, ETHTOOL_A_RINGS_RX_PUSH = 14, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 15, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 16, __ETHTOOL_A_RINGS_CNT = 17, ETHTOOL_A_RINGS_MAX = 16, }; enum { ETHTOOL_A_RSS_UNSPEC = 0, ETHTOOL_A_RSS_HEADER = 1, ETHTOOL_A_RSS_CONTEXT = 2, ETHTOOL_A_RSS_HFUNC = 3, ETHTOOL_A_RSS_INDIR = 4, ETHTOOL_A_RSS_HKEY = 5, ETHTOOL_A_RSS_INPUT_XFRM = 6, ETHTOOL_A_RSS_START_CONTEXT = 7, __ETHTOOL_A_RSS_CNT = 8, ETHTOOL_A_RSS_MAX = 7, }; enum { ETHTOOL_A_STATS_ETH_CTRL_3_TX = 0, ETHTOOL_A_STATS_ETH_CTRL_4_RX = 1, ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP = 2, __ETHTOOL_A_STATS_ETH_CTRL_CNT = 3, ETHTOOL_A_STATS_ETH_CTRL_MAX = 2, }; enum { ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT = 0, ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL = 1, ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL = 2, ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT = 3, ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR = 4, ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR = 5, ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES = 6, ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER = 7, ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL = 8, ETHTOOL_A_STATS_ETH_MAC_11_XS_COL = 9, ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR = 10, ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR = 11, ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES = 12, ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR = 13, ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST = 14, ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST = 15, ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER = 16, ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST = 17, ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST = 18, ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR = 19, ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN = 20, ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR = 21, __ETHTOOL_A_STATS_ETH_MAC_CNT = 22, ETHTOOL_A_STATS_ETH_MAC_MAX = 21, }; enum { ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR = 0, __ETHTOOL_A_STATS_ETH_PHY_CNT = 1, ETHTOOL_A_STATS_ETH_PHY_MAX = 0, }; enum { ETHTOOL_A_STATS_GRP_UNSPEC = 0, ETHTOOL_A_STATS_GRP_PAD = 1, ETHTOOL_A_STATS_GRP_ID = 2, ETHTOOL_A_STATS_GRP_SS_ID = 3, ETHTOOL_A_STATS_GRP_STAT = 4, ETHTOOL_A_STATS_GRP_HIST_RX = 5, ETHTOOL_A_STATS_GRP_HIST_TX = 6, ETHTOOL_A_STATS_GRP_HIST_BKT_LOW = 7, ETHTOOL_A_STATS_GRP_HIST_BKT_HI = 8, ETHTOOL_A_STATS_GRP_HIST_VAL = 9, __ETHTOOL_A_STATS_GRP_CNT = 10, ETHTOOL_A_STATS_GRP_MAX = 9, }; enum { ETHTOOL_A_STATS_RMON_UNDERSIZE = 0, ETHTOOL_A_STATS_RMON_OVERSIZE = 1, ETHTOOL_A_STATS_RMON_FRAG = 2, ETHTOOL_A_STATS_RMON_JABBER = 3, __ETHTOOL_A_STATS_RMON_CNT = 4, ETHTOOL_A_STATS_RMON_MAX = 3, }; enum { ETHTOOL_A_STATS_UNSPEC = 0, ETHTOOL_A_STATS_PAD = 1, ETHTOOL_A_STATS_HEADER = 2, ETHTOOL_A_STATS_GROUPS = 3, ETHTOOL_A_STATS_GRP = 4, ETHTOOL_A_STATS_SRC = 5, __ETHTOOL_A_STATS_CNT = 6, ETHTOOL_A_STATS_MAX = 5, }; enum { ETHTOOL_A_STRINGSETS_UNSPEC = 0, ETHTOOL_A_STRINGSETS_STRINGSET = 1, __ETHTOOL_A_STRINGSETS_CNT = 2, ETHTOOL_A_STRINGSETS_MAX = 1, }; enum { ETHTOOL_A_STRINGSET_UNSPEC = 0, ETHTOOL_A_STRINGSET_ID = 1, ETHTOOL_A_STRINGSET_COUNT = 2, ETHTOOL_A_STRINGSET_STRINGS = 3, __ETHTOOL_A_STRINGSET_CNT = 4, ETHTOOL_A_STRINGSET_MAX = 3, }; enum { ETHTOOL_A_STRINGS_UNSPEC = 0, ETHTOOL_A_STRINGS_STRING = 1, __ETHTOOL_A_STRINGS_CNT = 2, ETHTOOL_A_STRINGS_MAX = 1, }; enum { ETHTOOL_A_STRING_UNSPEC = 0, ETHTOOL_A_STRING_INDEX = 1, ETHTOOL_A_STRING_VALUE = 2, __ETHTOOL_A_STRING_CNT = 3, ETHTOOL_A_STRING_MAX = 2, }; enum { ETHTOOL_A_STRSET_UNSPEC = 0, ETHTOOL_A_STRSET_HEADER = 1, ETHTOOL_A_STRSET_STRINGSETS = 2, ETHTOOL_A_STRSET_COUNTS_ONLY = 3, __ETHTOOL_A_STRSET_CNT = 4, ETHTOOL_A_STRSET_MAX = 3, }; enum { ETHTOOL_A_TSINFO_UNSPEC = 0, ETHTOOL_A_TSINFO_HEADER = 1, ETHTOOL_A_TSINFO_TIMESTAMPING = 2, ETHTOOL_A_TSINFO_TX_TYPES = 3, ETHTOOL_A_TSINFO_RX_FILTERS = 4, ETHTOOL_A_TSINFO_PHC_INDEX = 5, ETHTOOL_A_TSINFO_STATS = 6, __ETHTOOL_A_TSINFO_CNT = 7, ETHTOOL_A_TSINFO_MAX = 6, }; enum { ETHTOOL_A_TS_STAT_UNSPEC = 0, ETHTOOL_A_TS_STAT_TX_PKTS = 1, ETHTOOL_A_TS_STAT_TX_LOST = 2, ETHTOOL_A_TS_STAT_TX_ERR = 3, __ETHTOOL_A_TS_STAT_CNT = 4, ETHTOOL_A_TS_STAT_MAX = 3, }; enum { ETHTOOL_A_TUNNEL_INFO_UNSPEC = 0, ETHTOOL_A_TUNNEL_INFO_HEADER = 1, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 2, __ETHTOOL_A_TUNNEL_INFO_CNT = 3, ETHTOOL_A_TUNNEL_INFO_MAX = 2, }; enum { ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC = 0, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT = 1, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE = 2, __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT = 3, ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = 2, }; enum { ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC = 0, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE = 1, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES = 2, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY = 3, __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT = 4, ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = 3, }; enum { ETHTOOL_A_TUNNEL_UDP_UNSPEC = 0, ETHTOOL_A_TUNNEL_UDP_TABLE = 1, __ETHTOOL_A_TUNNEL_UDP_CNT = 2, ETHTOOL_A_TUNNEL_UDP_MAX = 1, }; enum { ETHTOOL_A_WOL_UNSPEC = 0, ETHTOOL_A_WOL_HEADER = 1, ETHTOOL_A_WOL_MODES = 2, ETHTOOL_A_WOL_SOPASS = 3, __ETHTOOL_A_WOL_CNT = 4, ETHTOOL_A_WOL_MAX = 3, }; enum { ETHTOOL_MSG_KERNEL_NONE = 0, ETHTOOL_MSG_STRSET_GET_REPLY = 1, ETHTOOL_MSG_LINKINFO_GET_REPLY = 2, ETHTOOL_MSG_LINKINFO_NTF = 3, ETHTOOL_MSG_LINKMODES_GET_REPLY = 4, ETHTOOL_MSG_LINKMODES_NTF = 5, ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6, ETHTOOL_MSG_DEBUG_GET_REPLY = 7, ETHTOOL_MSG_DEBUG_NTF = 8, ETHTOOL_MSG_WOL_GET_REPLY = 9, ETHTOOL_MSG_WOL_NTF = 10, ETHTOOL_MSG_FEATURES_GET_REPLY = 11, ETHTOOL_MSG_FEATURES_SET_REPLY = 12, ETHTOOL_MSG_FEATURES_NTF = 13, ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14, ETHTOOL_MSG_PRIVFLAGS_NTF = 15, ETHTOOL_MSG_RINGS_GET_REPLY = 16, ETHTOOL_MSG_RINGS_NTF = 17, ETHTOOL_MSG_CHANNELS_GET_REPLY = 18, ETHTOOL_MSG_CHANNELS_NTF = 19, ETHTOOL_MSG_COALESCE_GET_REPLY = 20, ETHTOOL_MSG_COALESCE_NTF = 21, ETHTOOL_MSG_PAUSE_GET_REPLY = 22, ETHTOOL_MSG_PAUSE_NTF = 23, ETHTOOL_MSG_EEE_GET_REPLY = 24, ETHTOOL_MSG_EEE_NTF = 25, ETHTOOL_MSG_TSINFO_GET_REPLY = 26, ETHTOOL_MSG_CABLE_TEST_NTF = 27, ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28, ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 29, ETHTOOL_MSG_FEC_GET_REPLY = 30, ETHTOOL_MSG_FEC_NTF = 31, ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 32, ETHTOOL_MSG_STATS_GET_REPLY = 33, ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 34, ETHTOOL_MSG_MODULE_GET_REPLY = 35, ETHTOOL_MSG_MODULE_NTF = 36, ETHTOOL_MSG_PSE_GET_REPLY = 37, ETHTOOL_MSG_RSS_GET_REPLY = 38, ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 39, ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 40, ETHTOOL_MSG_PLCA_NTF = 41, ETHTOOL_MSG_MM_GET_REPLY = 42, ETHTOOL_MSG_MM_NTF = 43, ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 44, ETHTOOL_MSG_PHY_GET_REPLY = 45, ETHTOOL_MSG_PHY_NTF = 46, __ETHTOOL_MSG_KERNEL_CNT = 47, ETHTOOL_MSG_KERNEL_MAX = 46, }; enum { ETHTOOL_MSG_USER_NONE = 0, ETHTOOL_MSG_STRSET_GET = 1, ETHTOOL_MSG_LINKINFO_GET = 2, ETHTOOL_MSG_LINKINFO_SET = 3, ETHTOOL_MSG_LINKMODES_GET = 4, ETHTOOL_MSG_LINKMODES_SET = 5, ETHTOOL_MSG_LINKSTATE_GET = 6, ETHTOOL_MSG_DEBUG_GET = 7, ETHTOOL_MSG_DEBUG_SET = 8, ETHTOOL_MSG_WOL_GET = 9, ETHTOOL_MSG_WOL_SET = 10, ETHTOOL_MSG_FEATURES_GET = 11, ETHTOOL_MSG_FEATURES_SET = 12, ETHTOOL_MSG_PRIVFLAGS_GET = 13, ETHTOOL_MSG_PRIVFLAGS_SET = 14, ETHTOOL_MSG_RINGS_GET = 15, ETHTOOL_MSG_RINGS_SET = 16, ETHTOOL_MSG_CHANNELS_GET = 17, ETHTOOL_MSG_CHANNELS_SET = 18, ETHTOOL_MSG_COALESCE_GET = 19, ETHTOOL_MSG_COALESCE_SET = 20, ETHTOOL_MSG_PAUSE_GET = 21, ETHTOOL_MSG_PAUSE_SET = 22, ETHTOOL_MSG_EEE_GET = 23, ETHTOOL_MSG_EEE_SET = 24, ETHTOOL_MSG_TSINFO_GET = 25, ETHTOOL_MSG_CABLE_TEST_ACT = 26, ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 27, ETHTOOL_MSG_TUNNEL_INFO_GET = 28, ETHTOOL_MSG_FEC_GET = 29, ETHTOOL_MSG_FEC_SET = 30, ETHTOOL_MSG_MODULE_EEPROM_GET = 31, ETHTOOL_MSG_STATS_GET = 32, ETHTOOL_MSG_PHC_VCLOCKS_GET = 33, ETHTOOL_MSG_MODULE_GET = 34, ETHTOOL_MSG_MODULE_SET = 35, ETHTOOL_MSG_PSE_GET = 36, ETHTOOL_MSG_PSE_SET = 37, ETHTOOL_MSG_RSS_GET = 38, ETHTOOL_MSG_PLCA_GET_CFG = 39, ETHTOOL_MSG_PLCA_SET_CFG = 40, ETHTOOL_MSG_PLCA_GET_STATUS = 41, ETHTOOL_MSG_MM_GET = 42, ETHTOOL_MSG_MM_SET = 43, ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 44, ETHTOOL_MSG_PHY_GET = 45, __ETHTOOL_MSG_USER_CNT = 46, ETHTOOL_MSG_USER_MAX = 45, }; enum { ETHTOOL_STATS_ETH_PHY = 0, ETHTOOL_STATS_ETH_MAC = 1, ETHTOOL_STATS_ETH_CTRL = 2, ETHTOOL_STATS_RMON = 3, __ETHTOOL_STATS_CNT = 4, }; enum { ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, ETHTOOL_TCP_DATA_SPLIT_DISABLED = 1, ETHTOOL_TCP_DATA_SPLIT_ENABLED = 2, }; enum { ETHTOOL_UDP_TUNNEL_TYPE_VXLAN = 0, ETHTOOL_UDP_TUNNEL_TYPE_GENEVE = 1, ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE = 2, __ETHTOOL_UDP_TUNNEL_TYPE_CNT = 3, }; enum { ETH_RSS_HASH_TOP_BIT = 0, ETH_RSS_HASH_XOR_BIT = 1, ETH_RSS_HASH_CRC32_BIT = 2, ETH_RSS_HASH_FUNCS_COUNT = 3, }; enum { EVENTFS_SAVE_MODE = 65536, EVENTFS_SAVE_UID = 131072, EVENTFS_SAVE_GID = 262144, }; enum { EVENT_FILE_FL_ENABLED = 1, EVENT_FILE_FL_RECORDED_CMD = 2, EVENT_FILE_FL_RECORDED_TGID = 4, EVENT_FILE_FL_FILTERED = 8, EVENT_FILE_FL_NO_SET_FILTER = 16, EVENT_FILE_FL_SOFT_MODE = 32, EVENT_FILE_FL_SOFT_DISABLED = 64, EVENT_FILE_FL_TRIGGER_MODE = 128, EVENT_FILE_FL_TRIGGER_COND = 256, EVENT_FILE_FL_PID_FILTER = 512, EVENT_FILE_FL_WAS_ENABLED = 1024, EVENT_FILE_FL_FREED = 2048, }; enum { EVENT_FILE_FL_ENABLED_BIT = 0, EVENT_FILE_FL_RECORDED_CMD_BIT = 1, EVENT_FILE_FL_RECORDED_TGID_BIT = 2, EVENT_FILE_FL_FILTERED_BIT = 3, EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, EVENT_FILE_FL_SOFT_MODE_BIT = 5, EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, EVENT_FILE_FL_TRIGGER_COND_BIT = 8, EVENT_FILE_FL_PID_FILTER_BIT = 9, EVENT_FILE_FL_WAS_ENABLED_BIT = 10, EVENT_FILE_FL_FREED_BIT = 11, }; enum { EVENT_TRIGGER_FL_PROBE = 1, }; enum { EXT4_FC_REASON_XATTR = 0, EXT4_FC_REASON_CROSS_RENAME = 1, EXT4_FC_REASON_JOURNAL_FLAG_CHANGE = 2, EXT4_FC_REASON_NOMEM = 3, EXT4_FC_REASON_SWAP_BOOT = 4, EXT4_FC_REASON_RESIZE = 5, EXT4_FC_REASON_RENAME_DIR = 6, EXT4_FC_REASON_FALLOC_RANGE = 7, EXT4_FC_REASON_INODE_JOURNAL_DATA = 8, EXT4_FC_REASON_ENCRYPTED_FILENAME = 9, EXT4_FC_REASON_MAX = 10, }; enum { EXT4_FC_STATUS_OK = 0, EXT4_FC_STATUS_INELIGIBLE = 1, EXT4_FC_STATUS_SKIPPED = 2, EXT4_FC_STATUS_FAILED = 3, }; enum { EXT4_INODE_SECRM = 0, EXT4_INODE_UNRM = 1, EXT4_INODE_COMPR = 2, EXT4_INODE_SYNC = 3, EXT4_INODE_IMMUTABLE = 4, EXT4_INODE_APPEND = 5, EXT4_INODE_NODUMP = 6, EXT4_INODE_NOATIME = 7, EXT4_INODE_DIRTY = 8, EXT4_INODE_COMPRBLK = 9, EXT4_INODE_NOCOMPR = 10, EXT4_INODE_ENCRYPT = 11, EXT4_INODE_INDEX = 12, EXT4_INODE_IMAGIC = 13, EXT4_INODE_JOURNAL_DATA = 14, EXT4_INODE_NOTAIL = 15, EXT4_INODE_DIRSYNC = 16, EXT4_INODE_TOPDIR = 17, EXT4_INODE_HUGE_FILE = 18, EXT4_INODE_EXTENTS = 19, EXT4_INODE_VERITY = 20, EXT4_INODE_EA_INODE = 21, EXT4_INODE_DAX = 25, EXT4_INODE_INLINE_DATA = 28, EXT4_INODE_PROJINHERIT = 29, EXT4_INODE_CASEFOLD = 30, EXT4_INODE_RESERVED = 31, }; enum { EXT4_MF_MNTDIR_SAMPLED = 0, EXT4_MF_FC_INELIGIBLE = 1, }; enum { EXT4_STATE_NEW = 0, EXT4_STATE_XATTR = 1, EXT4_STATE_NO_EXPAND = 2, EXT4_STATE_DA_ALLOC_CLOSE = 3, EXT4_STATE_EXT_MIGRATE = 4, EXT4_STATE_NEWENTRY = 5, EXT4_STATE_MAY_INLINE_DATA = 6, EXT4_STATE_EXT_PRECACHED = 7, EXT4_STATE_LUSTRE_EA_INODE = 8, EXT4_STATE_VERITY_IN_PROGRESS = 9, EXT4_STATE_FC_COMMITTING = 10, EXT4_STATE_ORPHAN_FILE = 11, }; enum { EXTRA_REG_NHMEX_M_FILTER = 0, EXTRA_REG_NHMEX_M_DSP = 1, EXTRA_REG_NHMEX_M_ISS = 2, EXTRA_REG_NHMEX_M_MAP = 3, EXTRA_REG_NHMEX_M_MSC_THR = 4, EXTRA_REG_NHMEX_M_PGT = 5, EXTRA_REG_NHMEX_M_PLD = 6, EXTRA_REG_NHMEX_M_ZDP_CTL_FVC = 7, }; enum { Enabled = 0, Magic = 1, }; enum { FBCON_LOGO_CANSHOW = -1, FBCON_LOGO_DRAW = -2, FBCON_LOGO_DONTSHOW = -3, }; enum { FB_BLANK_UNBLANK = 0, FB_BLANK_NORMAL = 1, FB_BLANK_VSYNC_SUSPEND = 2, FB_BLANK_HSYNC_SUSPEND = 3, FB_BLANK_POWERDOWN = 4, }; enum { FGRAPH_TYPE_RESERVED = 0, FGRAPH_TYPE_BITMAP = 1, FGRAPH_TYPE_DATA = 2, }; enum { FIB6_NO_SERNUM_CHANGE = 0, }; enum { FILTER_OTHER = 0, FILTER_STATIC_STRING = 1, FILTER_DYN_STRING = 2, FILTER_RDYN_STRING = 3, FILTER_PTR_STRING = 4, FILTER_TRACE_FN = 5, FILTER_CPUMASK = 6, FILTER_COMM = 7, FILTER_CPU = 8, FILTER_STACKTRACE = 9, }; enum { FILT_ERR_NONE = 0, FILT_ERR_INVALID_OP = 1, FILT_ERR_TOO_MANY_OPEN = 2, FILT_ERR_TOO_MANY_CLOSE = 3, FILT_ERR_MISSING_QUOTE = 4, FILT_ERR_MISSING_BRACE_OPEN = 5, FILT_ERR_MISSING_BRACE_CLOSE = 6, FILT_ERR_OPERAND_TOO_LONG = 7, FILT_ERR_EXPECT_STRING = 8, FILT_ERR_EXPECT_DIGIT = 9, FILT_ERR_ILLEGAL_FIELD_OP = 10, FILT_ERR_FIELD_NOT_FOUND = 11, FILT_ERR_ILLEGAL_INTVAL = 12, FILT_ERR_BAD_SUBSYS_FILTER = 13, FILT_ERR_TOO_MANY_PREDS = 14, FILT_ERR_INVALID_FILTER = 15, FILT_ERR_INVALID_CPULIST = 16, FILT_ERR_IP_FIELD_ONLY = 17, FILT_ERR_INVALID_VALUE = 18, FILT_ERR_NO_FUNCTION = 19, FILT_ERR_ERRNO = 20, FILT_ERR_NO_FILTER = 21, }; enum { FLAGS_FILL_FULL = 268435456, FLAGS_FILL_START = 536870912, FLAGS_FILL_END = 805306368, }; enum { FOLL_TOUCH = 65536, FOLL_TRIED = 131072, FOLL_REMOTE = 262144, FOLL_PIN = 524288, FOLL_FAST_ONLY = 1048576, FOLL_UNLOCKABLE = 2097152, FOLL_MADV_POPULATE = 4194304, }; enum { FOLL_WRITE = 1, FOLL_GET = 2, FOLL_DUMP = 4, FOLL_FORCE = 8, FOLL_NOWAIT = 16, FOLL_NOFAULT = 32, FOLL_HWPOISON = 64, FOLL_ANON = 128, FOLL_LONGTERM = 256, FOLL_SPLIT_PMD = 512, FOLL_PCI_P2PDMA = 1024, FOLL_INTERRUPTIBLE = 2048, FOLL_HONOR_NUMA_FAULT = 4096, }; enum { FORMAT_HEADER = 1, FORMAT_FIELD_SEPERATOR = 2, FORMAT_PRINTFMT = 3, }; enum { FOU_ATTR_UNSPEC = 0, FOU_ATTR_PORT = 1, FOU_ATTR_AF = 2, FOU_ATTR_IPPROTO = 3, FOU_ATTR_TYPE = 4, FOU_ATTR_REMCSUM_NOPARTIAL = 5, FOU_ATTR_LOCAL_V4 = 6, FOU_ATTR_LOCAL_V6 = 7, FOU_ATTR_PEER_V4 = 8, FOU_ATTR_PEER_V6 = 9, FOU_ATTR_PEER_PORT = 10, FOU_ATTR_IFINDEX = 11, __FOU_ATTR_MAX = 12, }; enum { FOU_CMD_UNSPEC = 0, FOU_CMD_ADD = 1, FOU_CMD_DEL = 2, FOU_CMD_GET = 3, __FOU_CMD_MAX = 4, }; enum { FOU_ENCAP_UNSPEC = 0, FOU_ENCAP_DIRECT = 1, FOU_ENCAP_GUE = 2, }; enum { FRA_UNSPEC = 0, FRA_DST = 1, FRA_SRC = 2, FRA_IIFNAME = 3, FRA_GOTO = 4, FRA_UNUSED2 = 5, FRA_PRIORITY = 6, FRA_UNUSED3 = 7, FRA_UNUSED4 = 8, FRA_UNUSED5 = 9, FRA_FWMARK = 10, FRA_FLOW = 11, FRA_TUN_ID = 12, FRA_SUPPRESS_IFGROUP = 13, FRA_SUPPRESS_PREFIXLEN = 14, FRA_TABLE = 15, FRA_FWMASK = 16, FRA_OIFNAME = 17, FRA_PAD = 18, FRA_L3MDEV = 19, FRA_UID_RANGE = 20, FRA_PROTOCOL = 21, FRA_IP_PROTO = 22, FRA_SPORT_RANGE = 23, FRA_DPORT_RANGE = 24, FRA_DSCP = 25, __FRA_MAX = 26, }; enum { FR_ACT_UNSPEC = 0, FR_ACT_TO_TBL = 1, FR_ACT_GOTO = 2, FR_ACT_NOP = 3, FR_ACT_RES3 = 4, FR_ACT_RES4 = 5, FR_ACT_BLACKHOLE = 6, FR_ACT_UNREACHABLE = 7, FR_ACT_PROHIBIT = 8, __FR_ACT_MAX = 9, }; enum { FTRACE_FL_ENABLED = 2147483648, FTRACE_FL_REGS = 1073741824, FTRACE_FL_REGS_EN = 536870912, FTRACE_FL_TRAMP = 268435456, FTRACE_FL_TRAMP_EN = 134217728, FTRACE_FL_IPMODIFY = 67108864, FTRACE_FL_DISABLED = 33554432, FTRACE_FL_DIRECT = 16777216, FTRACE_FL_DIRECT_EN = 8388608, FTRACE_FL_CALL_OPS = 4194304, FTRACE_FL_CALL_OPS_EN = 2097152, FTRACE_FL_TOUCHED = 1048576, FTRACE_FL_MODIFIED = 524288, }; enum { FTRACE_HASH_FL_MOD = 1, }; enum { FTRACE_ITER_FILTER = 1, FTRACE_ITER_NOTRACE = 2, FTRACE_ITER_PRINTALL = 4, FTRACE_ITER_DO_PROBES = 8, FTRACE_ITER_PROBE = 16, FTRACE_ITER_MOD = 32, FTRACE_ITER_ENABLED = 64, FTRACE_ITER_TOUCHED = 128, FTRACE_ITER_ADDRS = 256, }; enum { FTRACE_MODIFY_ENABLE_FL = 1, FTRACE_MODIFY_MAY_SLEEP_FL = 2, }; enum { FTRACE_OPS_FL_ENABLED = 1, FTRACE_OPS_FL_DYNAMIC = 2, FTRACE_OPS_FL_SAVE_REGS = 4, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 8, FTRACE_OPS_FL_RECURSION = 16, FTRACE_OPS_FL_STUB = 32, FTRACE_OPS_FL_INITIALIZED = 64, FTRACE_OPS_FL_DELETED = 128, FTRACE_OPS_FL_ADDING = 256, FTRACE_OPS_FL_REMOVING = 512, FTRACE_OPS_FL_MODIFYING = 1024, FTRACE_OPS_FL_ALLOC_TRAMP = 2048, FTRACE_OPS_FL_IPMODIFY = 4096, FTRACE_OPS_FL_PID = 8192, FTRACE_OPS_FL_RCU = 16384, FTRACE_OPS_FL_TRACE_ARRAY = 32768, FTRACE_OPS_FL_PERMANENT = 65536, FTRACE_OPS_FL_DIRECT = 131072, FTRACE_OPS_FL_SUBOP = 262144, }; enum { FTRACE_UPDATE_CALLS = 1, FTRACE_DISABLE_CALLS = 2, FTRACE_UPDATE_TRACE_FUNC = 4, FTRACE_START_FUNC_RET = 8, FTRACE_STOP_FUNC_RET = 16, FTRACE_MAY_SLEEP = 32, }; enum { FTRACE_UPDATE_IGNORE = 0, FTRACE_UPDATE_MAKE_CALL = 1, FTRACE_UPDATE_MODIFY_CALL = 2, FTRACE_UPDATE_MAKE_NOP = 3, }; enum { FUTEX_STATE_OK = 0, FUTEX_STATE_EXITING = 1, FUTEX_STATE_DEAD = 2, }; enum { GATE_INTERRUPT = 14, GATE_TRAP = 15, GATE_CALL = 12, GATE_TASK = 5, }; enum { GENHD_FL_REMOVABLE = 1, GENHD_FL_HIDDEN = 2, GENHD_FL_NO_PART = 4, }; enum { GP_IDLE = 0, GP_ENTER = 1, GP_PASSED = 2, GP_EXIT = 3, GP_REPLAY = 4, }; enum { HASH_SIZE = 128, }; enum { HI_SOFTIRQ = 0, TIMER_SOFTIRQ = 1, NET_TX_SOFTIRQ = 2, NET_RX_SOFTIRQ = 3, BLOCK_SOFTIRQ = 4, IRQ_POLL_SOFTIRQ = 5, TASKLET_SOFTIRQ = 6, SCHED_SOFTIRQ = 7, HRTIMER_SOFTIRQ = 8, RCU_SOFTIRQ = 9, NR_SOFTIRQS = 10, }; enum { HP_THREAD_NONE = 0, HP_THREAD_ACTIVE = 1, HP_THREAD_PARKED = 2, }; enum { HSWEP_PCI_UNCORE_HA = 0, HSWEP_PCI_UNCORE_IMC = 1, HSWEP_PCI_UNCORE_IRP = 2, HSWEP_PCI_UNCORE_QPI = 3, HSWEP_PCI_UNCORE_R2PCIE = 4, HSWEP_PCI_UNCORE_R3QPI = 5, }; enum { HUGETLB_SHMFS_INODE = 1, HUGETLB_ANONHUGE_INODE = 2, }; enum { HW_BREAKPOINT_EMPTY = 0, HW_BREAKPOINT_R = 1, HW_BREAKPOINT_W = 2, HW_BREAKPOINT_RW = 3, HW_BREAKPOINT_X = 4, HW_BREAKPOINT_INVALID = 7, }; enum { HW_BREAKPOINT_LEN_1 = 1, HW_BREAKPOINT_LEN_2 = 2, HW_BREAKPOINT_LEN_3 = 3, HW_BREAKPOINT_LEN_4 = 4, HW_BREAKPOINT_LEN_5 = 5, HW_BREAKPOINT_LEN_6 = 6, HW_BREAKPOINT_LEN_7 = 7, HW_BREAKPOINT_LEN_8 = 8, }; enum { ICMP6_MIB_NUM = 0, ICMP6_MIB_INMSGS = 1, ICMP6_MIB_INERRORS = 2, ICMP6_MIB_OUTMSGS = 3, ICMP6_MIB_OUTERRORS = 4, ICMP6_MIB_CSUMERRORS = 5, ICMP6_MIB_RATELIMITHOST = 6, __ICMP6_MIB_MAX = 7, }; enum { ICMP_MIB_NUM = 0, ICMP_MIB_INMSGS = 1, ICMP_MIB_INERRORS = 2, ICMP_MIB_INDESTUNREACHS = 3, ICMP_MIB_INTIMEEXCDS = 4, ICMP_MIB_INPARMPROBS = 5, ICMP_MIB_INSRCQUENCHS = 6, ICMP_MIB_INREDIRECTS = 7, ICMP_MIB_INECHOS = 8, ICMP_MIB_INECHOREPS = 9, ICMP_MIB_INTIMESTAMPS = 10, ICMP_MIB_INTIMESTAMPREPS = 11, ICMP_MIB_INADDRMASKS = 12, ICMP_MIB_INADDRMASKREPS = 13, ICMP_MIB_OUTMSGS = 14, ICMP_MIB_OUTERRORS = 15, ICMP_MIB_OUTDESTUNREACHS = 16, ICMP_MIB_OUTTIMEEXCDS = 17, ICMP_MIB_OUTPARMPROBS = 18, ICMP_MIB_OUTSRCQUENCHS = 19, ICMP_MIB_OUTREDIRECTS = 20, ICMP_MIB_OUTECHOS = 21, ICMP_MIB_OUTECHOREPS = 22, ICMP_MIB_OUTTIMESTAMPS = 23, ICMP_MIB_OUTTIMESTAMPREPS = 24, ICMP_MIB_OUTADDRMASKS = 25, ICMP_MIB_OUTADDRMASKREPS = 26, ICMP_MIB_CSUMERRORS = 27, ICMP_MIB_RATELIMITGLOBAL = 28, ICMP_MIB_RATELIMITHOST = 29, __ICMP_MIB_MAX = 30, }; enum { ICX_PCIE1_PMON_ID = 0, ICX_PCIE2_PMON_ID = 1, ICX_PCIE3_PMON_ID = 2, ICX_PCIE4_PMON_ID = 3, ICX_PCIE5_PMON_ID = 4, ICX_CBDMA_DMI_PMON_ID = 5, }; enum { ICX_PCI_UNCORE_M2M = 0, ICX_PCI_UNCORE_UPI = 1, ICX_PCI_UNCORE_M3UPI = 2, }; enum { IDX_MODULE_ID = 0, IDX_ST_OPS_COMMON_VALUE_ID = 1, }; enum { IFAL_ADDRESS = 1, IFAL_LABEL = 2, __IFAL_MAX = 3, }; enum { IFA_UNSPEC = 0, IFA_ADDRESS = 1, IFA_LOCAL = 2, IFA_LABEL = 3, IFA_BROADCAST = 4, IFA_ANYCAST = 5, IFA_CACHEINFO = 6, IFA_MULTICAST = 7, IFA_FLAGS = 8, IFA_RT_PRIORITY = 9, IFA_TARGET_NETNSID = 10, IFA_PROTO = 11, __IFA_MAX = 12, }; enum { IFLA_BOND_AD_INFO_UNSPEC = 0, IFLA_BOND_AD_INFO_AGGREGATOR = 1, IFLA_BOND_AD_INFO_NUM_PORTS = 2, IFLA_BOND_AD_INFO_ACTOR_KEY = 3, IFLA_BOND_AD_INFO_PARTNER_KEY = 4, IFLA_BOND_AD_INFO_PARTNER_MAC = 5, __IFLA_BOND_AD_INFO_MAX = 6, }; enum { IFLA_BOND_SLAVE_UNSPEC = 0, IFLA_BOND_SLAVE_STATE = 1, IFLA_BOND_SLAVE_MII_STATUS = 2, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT = 3, IFLA_BOND_SLAVE_PERM_HWADDR = 4, IFLA_BOND_SLAVE_QUEUE_ID = 5, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 6, IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 7, IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 8, IFLA_BOND_SLAVE_PRIO = 9, __IFLA_BOND_SLAVE_MAX = 10, }; enum { IFLA_BOND_UNSPEC = 0, IFLA_BOND_MODE = 1, IFLA_BOND_ACTIVE_SLAVE = 2, IFLA_BOND_MIIMON = 3, IFLA_BOND_UPDELAY = 4, IFLA_BOND_DOWNDELAY = 5, IFLA_BOND_USE_CARRIER = 6, IFLA_BOND_ARP_INTERVAL = 7, IFLA_BOND_ARP_IP_TARGET = 8, IFLA_BOND_ARP_VALIDATE = 9, IFLA_BOND_ARP_ALL_TARGETS = 10, IFLA_BOND_PRIMARY = 11, IFLA_BOND_PRIMARY_RESELECT = 12, IFLA_BOND_FAIL_OVER_MAC = 13, IFLA_BOND_XMIT_HASH_POLICY = 14, IFLA_BOND_RESEND_IGMP = 15, IFLA_BOND_NUM_PEER_NOTIF = 16, IFLA_BOND_ALL_SLAVES_ACTIVE = 17, IFLA_BOND_MIN_LINKS = 18, IFLA_BOND_LP_INTERVAL = 19, IFLA_BOND_PACKETS_PER_SLAVE = 20, IFLA_BOND_AD_LACP_RATE = 21, IFLA_BOND_AD_SELECT = 22, IFLA_BOND_AD_INFO = 23, IFLA_BOND_AD_ACTOR_SYS_PRIO = 24, IFLA_BOND_AD_USER_PORT_KEY = 25, IFLA_BOND_AD_ACTOR_SYSTEM = 26, IFLA_BOND_TLB_DYNAMIC_LB = 27, IFLA_BOND_PEER_NOTIF_DELAY = 28, IFLA_BOND_AD_LACP_ACTIVE = 29, IFLA_BOND_MISSED_MAX = 30, IFLA_BOND_NS_IP6_TARGET = 31, IFLA_BOND_COUPLED_CONTROL = 32, __IFLA_BOND_MAX = 33, }; enum { IFLA_BRIDGE_FLAGS = 0, IFLA_BRIDGE_MODE = 1, IFLA_BRIDGE_VLAN_INFO = 2, IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, IFLA_BRIDGE_MRP = 4, IFLA_BRIDGE_CFM = 5, IFLA_BRIDGE_MST = 6, __IFLA_BRIDGE_MAX = 7, }; enum { IFLA_BRPORT_UNSPEC = 0, IFLA_BRPORT_STATE = 1, IFLA_BRPORT_PRIORITY = 2, IFLA_BRPORT_COST = 3, IFLA_BRPORT_MODE = 4, IFLA_BRPORT_GUARD = 5, IFLA_BRPORT_PROTECT = 6, IFLA_BRPORT_FAST_LEAVE = 7, IFLA_BRPORT_LEARNING = 8, IFLA_BRPORT_UNICAST_FLOOD = 9, IFLA_BRPORT_PROXYARP = 10, IFLA_BRPORT_LEARNING_SYNC = 11, IFLA_BRPORT_PROXYARP_WIFI = 12, IFLA_BRPORT_ROOT_ID = 13, IFLA_BRPORT_BRIDGE_ID = 14, IFLA_BRPORT_DESIGNATED_PORT = 15, IFLA_BRPORT_DESIGNATED_COST = 16, IFLA_BRPORT_ID = 17, IFLA_BRPORT_NO = 18, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, IFLA_BRPORT_CONFIG_PENDING = 20, IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, IFLA_BRPORT_HOLD_TIMER = 23, IFLA_BRPORT_FLUSH = 24, IFLA_BRPORT_MULTICAST_ROUTER = 25, IFLA_BRPORT_PAD = 26, IFLA_BRPORT_MCAST_FLOOD = 27, IFLA_BRPORT_MCAST_TO_UCAST = 28, IFLA_BRPORT_VLAN_TUNNEL = 29, IFLA_BRPORT_BCAST_FLOOD = 30, IFLA_BRPORT_GROUP_FWD_MASK = 31, IFLA_BRPORT_NEIGH_SUPPRESS = 32, IFLA_BRPORT_ISOLATED = 33, IFLA_BRPORT_BACKUP_PORT = 34, IFLA_BRPORT_MRP_RING_OPEN = 35, IFLA_BRPORT_MRP_IN_OPEN = 36, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 37, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 38, IFLA_BRPORT_LOCKED = 39, IFLA_BRPORT_MAB = 40, IFLA_BRPORT_MCAST_N_GROUPS = 41, IFLA_BRPORT_MCAST_MAX_GROUPS = 42, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 43, IFLA_BRPORT_BACKUP_NHID = 44, __IFLA_BRPORT_MAX = 45, }; enum { IFLA_EVENT_NONE = 0, IFLA_EVENT_REBOOT = 1, IFLA_EVENT_FEATURES = 2, IFLA_EVENT_BONDING_FAILOVER = 3, IFLA_EVENT_NOTIFY_PEERS = 4, IFLA_EVENT_IGMP_RESEND = 5, IFLA_EVENT_BONDING_OPTIONS = 6, }; enum { IFLA_GENEVE_UNSPEC = 0, IFLA_GENEVE_ID = 1, IFLA_GENEVE_REMOTE = 2, IFLA_GENEVE_TTL = 3, IFLA_GENEVE_TOS = 4, IFLA_GENEVE_PORT = 5, IFLA_GENEVE_COLLECT_METADATA = 6, IFLA_GENEVE_REMOTE6 = 7, IFLA_GENEVE_UDP_CSUM = 8, IFLA_GENEVE_UDP_ZERO_CSUM6_TX = 9, IFLA_GENEVE_UDP_ZERO_CSUM6_RX = 10, IFLA_GENEVE_LABEL = 11, IFLA_GENEVE_TTL_INHERIT = 12, IFLA_GENEVE_DF = 13, IFLA_GENEVE_INNER_PROTO_INHERIT = 14, __IFLA_GENEVE_MAX = 15, }; enum { IFLA_GRE_UNSPEC = 0, IFLA_GRE_LINK = 1, IFLA_GRE_IFLAGS = 2, IFLA_GRE_OFLAGS = 3, IFLA_GRE_IKEY = 4, IFLA_GRE_OKEY = 5, IFLA_GRE_LOCAL = 6, IFLA_GRE_REMOTE = 7, IFLA_GRE_TTL = 8, IFLA_GRE_TOS = 9, IFLA_GRE_PMTUDISC = 10, IFLA_GRE_ENCAP_LIMIT = 11, IFLA_GRE_FLOWINFO = 12, IFLA_GRE_FLAGS = 13, IFLA_GRE_ENCAP_TYPE = 14, IFLA_GRE_ENCAP_FLAGS = 15, IFLA_GRE_ENCAP_SPORT = 16, IFLA_GRE_ENCAP_DPORT = 17, IFLA_GRE_COLLECT_METADATA = 18, IFLA_GRE_IGNORE_DF = 19, IFLA_GRE_FWMARK = 20, IFLA_GRE_ERSPAN_INDEX = 21, IFLA_GRE_ERSPAN_VER = 22, IFLA_GRE_ERSPAN_DIR = 23, IFLA_GRE_ERSPAN_HWID = 24, __IFLA_GRE_MAX = 25, }; enum { IFLA_INET6_UNSPEC = 0, IFLA_INET6_FLAGS = 1, IFLA_INET6_CONF = 2, IFLA_INET6_STATS = 3, IFLA_INET6_MCAST = 4, IFLA_INET6_CACHEINFO = 5, IFLA_INET6_ICMP6STATS = 6, IFLA_INET6_TOKEN = 7, IFLA_INET6_ADDR_GEN_MODE = 8, IFLA_INET6_RA_MTU = 9, __IFLA_INET6_MAX = 10, }; enum { IFLA_INET_UNSPEC = 0, IFLA_INET_CONF = 1, __IFLA_INET_MAX = 2, }; enum { IFLA_INFO_UNSPEC = 0, IFLA_INFO_KIND = 1, IFLA_INFO_DATA = 2, IFLA_INFO_XSTATS = 3, IFLA_INFO_SLAVE_KIND = 4, IFLA_INFO_SLAVE_DATA = 5, __IFLA_INFO_MAX = 6, }; enum { IFLA_IPTUN_UNSPEC = 0, IFLA_IPTUN_LINK = 1, IFLA_IPTUN_LOCAL = 2, IFLA_IPTUN_REMOTE = 3, IFLA_IPTUN_TTL = 4, IFLA_IPTUN_TOS = 5, IFLA_IPTUN_ENCAP_LIMIT = 6, IFLA_IPTUN_FLOWINFO = 7, IFLA_IPTUN_FLAGS = 8, IFLA_IPTUN_PROTO = 9, IFLA_IPTUN_PMTUDISC = 10, IFLA_IPTUN_6RD_PREFIX = 11, IFLA_IPTUN_6RD_RELAY_PREFIX = 12, IFLA_IPTUN_6RD_PREFIXLEN = 13, IFLA_IPTUN_6RD_RELAY_PREFIXLEN = 14, IFLA_IPTUN_ENCAP_TYPE = 15, IFLA_IPTUN_ENCAP_FLAGS = 16, IFLA_IPTUN_ENCAP_SPORT = 17, IFLA_IPTUN_ENCAP_DPORT = 18, IFLA_IPTUN_COLLECT_METADATA = 19, IFLA_IPTUN_FWMARK = 20, __IFLA_IPTUN_MAX = 21, }; enum { IFLA_NETKIT_UNSPEC = 0, IFLA_NETKIT_PEER_INFO = 1, IFLA_NETKIT_PRIMARY = 2, IFLA_NETKIT_POLICY = 3, IFLA_NETKIT_PEER_POLICY = 4, IFLA_NETKIT_MODE = 5, __IFLA_NETKIT_MAX = 6, }; enum { IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 1, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 2, __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX = 3, }; enum { IFLA_OFFLOAD_XSTATS_UNSPEC = 0, IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, IFLA_OFFLOAD_XSTATS_HW_S_INFO = 2, IFLA_OFFLOAD_XSTATS_L3_STATS = 3, __IFLA_OFFLOAD_XSTATS_MAX = 4, }; enum { IFLA_PORT_UNSPEC = 0, IFLA_PORT_VF = 1, IFLA_PORT_PROFILE = 2, IFLA_PORT_VSI_TYPE = 3, IFLA_PORT_INSTANCE_UUID = 4, IFLA_PORT_HOST_UUID = 5, IFLA_PORT_REQUEST = 6, IFLA_PORT_RESPONSE = 7, __IFLA_PORT_MAX = 8, }; enum { IFLA_PROTO_DOWN_REASON_UNSPEC = 0, IFLA_PROTO_DOWN_REASON_MASK = 1, IFLA_PROTO_DOWN_REASON_VALUE = 2, __IFLA_PROTO_DOWN_REASON_CNT = 3, IFLA_PROTO_DOWN_REASON_MAX = 2, }; enum { IFLA_STATS_GETSET_UNSPEC = 0, IFLA_STATS_GET_FILTERS = 1, IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 2, __IFLA_STATS_GETSET_MAX = 3, }; enum { IFLA_STATS_UNSPEC = 0, IFLA_STATS_LINK_64 = 1, IFLA_STATS_LINK_XSTATS = 2, IFLA_STATS_LINK_XSTATS_SLAVE = 3, IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, IFLA_STATS_AF_SPEC = 5, __IFLA_STATS_MAX = 6, }; enum { IFLA_TUN_UNSPEC = 0, IFLA_TUN_OWNER = 1, IFLA_TUN_GROUP = 2, IFLA_TUN_TYPE = 3, IFLA_TUN_PI = 4, IFLA_TUN_VNET_HDR = 5, IFLA_TUN_PERSIST = 6, IFLA_TUN_MULTI_QUEUE = 7, IFLA_TUN_NUM_QUEUES = 8, IFLA_TUN_NUM_DISABLED_QUEUES = 9, __IFLA_TUN_MAX = 10, }; enum { IFLA_UNSPEC = 0, IFLA_ADDRESS = 1, IFLA_BROADCAST = 2, IFLA_IFNAME = 3, IFLA_MTU = 4, IFLA_LINK = 5, IFLA_QDISC = 6, IFLA_STATS = 7, IFLA_COST = 8, IFLA_PRIORITY = 9, IFLA_MASTER = 10, IFLA_WIRELESS = 11, IFLA_PROTINFO = 12, IFLA_TXQLEN = 13, IFLA_MAP = 14, IFLA_WEIGHT = 15, IFLA_OPERSTATE = 16, IFLA_LINKMODE = 17, IFLA_LINKINFO = 18, IFLA_NET_NS_PID = 19, IFLA_IFALIAS = 20, IFLA_NUM_VF = 21, IFLA_VFINFO_LIST = 22, IFLA_STATS64 = 23, IFLA_VF_PORTS = 24, IFLA_PORT_SELF = 25, IFLA_AF_SPEC = 26, IFLA_GROUP = 27, IFLA_NET_NS_FD = 28, IFLA_EXT_MASK = 29, IFLA_PROMISCUITY = 30, IFLA_NUM_TX_QUEUES = 31, IFLA_NUM_RX_QUEUES = 32, IFLA_CARRIER = 33, IFLA_PHYS_PORT_ID = 34, IFLA_CARRIER_CHANGES = 35, IFLA_PHYS_SWITCH_ID = 36, IFLA_LINK_NETNSID = 37, IFLA_PHYS_PORT_NAME = 38, IFLA_PROTO_DOWN = 39, IFLA_GSO_MAX_SEGS = 40, IFLA_GSO_MAX_SIZE = 41, IFLA_PAD = 42, IFLA_XDP = 43, IFLA_EVENT = 44, IFLA_NEW_NETNSID = 45, IFLA_IF_NETNSID = 46, IFLA_TARGET_NETNSID = 46, IFLA_CARRIER_UP_COUNT = 47, IFLA_CARRIER_DOWN_COUNT = 48, IFLA_NEW_IFINDEX = 49, IFLA_MIN_MTU = 50, IFLA_MAX_MTU = 51, IFLA_PROP_LIST = 52, IFLA_ALT_IFNAME = 53, IFLA_PERM_ADDRESS = 54, IFLA_PROTO_DOWN_REASON = 55, IFLA_PARENT_DEV_NAME = 56, IFLA_PARENT_DEV_BUS_NAME = 57, IFLA_GRO_MAX_SIZE = 58, IFLA_TSO_MAX_SIZE = 59, IFLA_TSO_MAX_SEGS = 60, IFLA_ALLMULTI = 61, IFLA_DEVLINK_PORT = 62, IFLA_GSO_IPV4_MAX_SIZE = 63, IFLA_GRO_IPV4_MAX_SIZE = 64, IFLA_DPLL_PIN = 65, __IFLA_MAX = 66, }; enum { IFLA_VF_INFO_UNSPEC = 0, IFLA_VF_INFO = 1, __IFLA_VF_INFO_MAX = 2, }; enum { IFLA_VF_LINK_STATE_AUTO = 0, IFLA_VF_LINK_STATE_ENABLE = 1, IFLA_VF_LINK_STATE_DISABLE = 2, __IFLA_VF_LINK_STATE_MAX = 3, }; enum { IFLA_VF_PORT_UNSPEC = 0, IFLA_VF_PORT = 1, __IFLA_VF_PORT_MAX = 2, }; enum { IFLA_VF_STATS_RX_PACKETS = 0, IFLA_VF_STATS_TX_PACKETS = 1, IFLA_VF_STATS_RX_BYTES = 2, IFLA_VF_STATS_TX_BYTES = 3, IFLA_VF_STATS_BROADCAST = 4, IFLA_VF_STATS_MULTICAST = 5, IFLA_VF_STATS_PAD = 6, IFLA_VF_STATS_RX_DROPPED = 7, IFLA_VF_STATS_TX_DROPPED = 8, __IFLA_VF_STATS_MAX = 9, }; enum { IFLA_VF_UNSPEC = 0, IFLA_VF_MAC = 1, IFLA_VF_VLAN = 2, IFLA_VF_TX_RATE = 3, IFLA_VF_SPOOFCHK = 4, IFLA_VF_LINK_STATE = 5, IFLA_VF_RATE = 6, IFLA_VF_RSS_QUERY_EN = 7, IFLA_VF_STATS = 8, IFLA_VF_TRUST = 9, IFLA_VF_IB_NODE_GUID = 10, IFLA_VF_IB_PORT_GUID = 11, IFLA_VF_VLAN_LIST = 12, IFLA_VF_BROADCAST = 13, __IFLA_VF_MAX = 14, }; enum { IFLA_VF_VLAN_INFO_UNSPEC = 0, IFLA_VF_VLAN_INFO = 1, __IFLA_VF_VLAN_INFO_MAX = 2, }; enum { IFLA_VLAN_QOS_UNSPEC = 0, IFLA_VLAN_QOS_MAPPING = 1, __IFLA_VLAN_QOS_MAX = 2, }; enum { IFLA_VLAN_UNSPEC = 0, IFLA_VLAN_ID = 1, IFLA_VLAN_FLAGS = 2, IFLA_VLAN_EGRESS_QOS = 3, IFLA_VLAN_INGRESS_QOS = 4, IFLA_VLAN_PROTOCOL = 5, __IFLA_VLAN_MAX = 6, }; enum { IFLA_VRF_PORT_UNSPEC = 0, IFLA_VRF_PORT_TABLE = 1, __IFLA_VRF_PORT_MAX = 2, }; enum { IFLA_VRF_UNSPEC = 0, IFLA_VRF_TABLE = 1, __IFLA_VRF_MAX = 2, }; enum { IFLA_VXLAN_UNSPEC = 0, IFLA_VXLAN_ID = 1, IFLA_VXLAN_GROUP = 2, IFLA_VXLAN_LINK = 3, IFLA_VXLAN_LOCAL = 4, IFLA_VXLAN_TTL = 5, IFLA_VXLAN_TOS = 6, IFLA_VXLAN_LEARNING = 7, IFLA_VXLAN_AGEING = 8, IFLA_VXLAN_LIMIT = 9, IFLA_VXLAN_PORT_RANGE = 10, IFLA_VXLAN_PROXY = 11, IFLA_VXLAN_RSC = 12, IFLA_VXLAN_L2MISS = 13, IFLA_VXLAN_L3MISS = 14, IFLA_VXLAN_PORT = 15, IFLA_VXLAN_GROUP6 = 16, IFLA_VXLAN_LOCAL6 = 17, IFLA_VXLAN_UDP_CSUM = 18, IFLA_VXLAN_UDP_ZERO_CSUM6_TX = 19, IFLA_VXLAN_UDP_ZERO_CSUM6_RX = 20, IFLA_VXLAN_REMCSUM_TX = 21, IFLA_VXLAN_REMCSUM_RX = 22, IFLA_VXLAN_GBP = 23, IFLA_VXLAN_REMCSUM_NOPARTIAL = 24, IFLA_VXLAN_COLLECT_METADATA = 25, IFLA_VXLAN_LABEL = 26, IFLA_VXLAN_GPE = 27, IFLA_VXLAN_TTL_INHERIT = 28, IFLA_VXLAN_DF = 29, IFLA_VXLAN_VNIFILTER = 30, IFLA_VXLAN_LOCALBYPASS = 31, IFLA_VXLAN_LABEL_POLICY = 32, __IFLA_VXLAN_MAX = 33, }; enum { IFLA_XDP_UNSPEC = 0, IFLA_XDP_FD = 1, IFLA_XDP_ATTACHED = 2, IFLA_XDP_FLAGS = 3, IFLA_XDP_PROG_ID = 4, IFLA_XDP_DRV_PROG_ID = 5, IFLA_XDP_SKB_PROG_ID = 6, IFLA_XDP_HW_PROG_ID = 7, IFLA_XDP_EXPECTED_FD = 8, __IFLA_XDP_MAX = 9, }; enum { IFLA_XFRM_UNSPEC = 0, IFLA_XFRM_LINK = 1, IFLA_XFRM_IF_ID = 2, IFLA_XFRM_COLLECT_METADATA = 3, __IFLA_XFRM_MAX = 4, }; enum { IF_ACT_NONE = -1, IF_ACT_FILTER = 0, IF_ACT_START = 1, IF_ACT_STOP = 2, IF_SRC_FILE = 3, IF_SRC_KERNEL = 4, IF_SRC_FILEADDR = 5, IF_SRC_KERNELADDR = 6, }; enum { IF_LINK_MODE_DEFAULT = 0, IF_LINK_MODE_DORMANT = 1, IF_LINK_MODE_TESTING = 2, }; enum { IF_OPER_UNKNOWN = 0, IF_OPER_NOTPRESENT = 1, IF_OPER_DOWN = 2, IF_OPER_LOWERLAYERDOWN = 3, IF_OPER_TESTING = 4, IF_OPER_DORMANT = 5, IF_OPER_UP = 6, }; enum { IF_STATE_ACTION = 0, IF_STATE_SOURCE = 1, IF_STATE_END = 2, }; enum { IIO_TOPOLOGY_TYPE = 0, UPI_TOPOLOGY_TYPE = 1, TOPOLOGY_MAX = 2, }; enum { INET6_IFADDR_STATE_PREDAD = 0, INET6_IFADDR_STATE_DAD = 1, INET6_IFADDR_STATE_POSTDAD = 2, INET6_IFADDR_STATE_ERRDAD = 3, INET6_IFADDR_STATE_DEAD = 4, }; enum { INET_DIAG_BC_NOP = 0, INET_DIAG_BC_JMP = 1, INET_DIAG_BC_S_GE = 2, INET_DIAG_BC_S_LE = 3, INET_DIAG_BC_D_GE = 4, INET_DIAG_BC_D_LE = 5, INET_DIAG_BC_AUTO = 6, INET_DIAG_BC_S_COND = 7, INET_DIAG_BC_D_COND = 8, INET_DIAG_BC_DEV_COND = 9, INET_DIAG_BC_MARK_COND = 10, INET_DIAG_BC_S_EQ = 11, INET_DIAG_BC_D_EQ = 12, INET_DIAG_BC_CGROUP_COND = 13, }; enum { INET_DIAG_NONE = 0, INET_DIAG_MEMINFO = 1, INET_DIAG_INFO = 2, INET_DIAG_VEGASINFO = 3, INET_DIAG_CONG = 4, INET_DIAG_TOS = 5, INET_DIAG_TCLASS = 6, INET_DIAG_SKMEMINFO = 7, INET_DIAG_SHUTDOWN = 8, INET_DIAG_DCTCPINFO = 9, INET_DIAG_PROTOCOL = 10, INET_DIAG_SKV6ONLY = 11, INET_DIAG_LOCALS = 12, INET_DIAG_PEERS = 13, INET_DIAG_PAD = 14, INET_DIAG_MARK = 15, INET_DIAG_BBRINFO = 16, INET_DIAG_CLASS_ID = 17, INET_DIAG_MD5SIG = 18, INET_DIAG_ULP_INFO = 19, INET_DIAG_SK_BPF_STORAGES = 20, INET_DIAG_CGROUP_ID = 21, INET_DIAG_SOCKOPT = 22, __INET_DIAG_MAX = 23, }; enum { INET_DIAG_REQ_NONE = 0, INET_DIAG_REQ_BYTECODE = 1, INET_DIAG_REQ_SK_BPF_STORAGES = 2, INET_DIAG_REQ_PROTOCOL = 3, __INET_DIAG_REQ_MAX = 4, }; enum { INET_ECN_NOT_ECT = 0, INET_ECN_ECT_1 = 1, INET_ECN_ECT_0 = 2, INET_ECN_CE = 3, INET_ECN_MASK = 3, }; enum { INET_FLAGS_PKTINFO = 0, INET_FLAGS_TTL = 1, INET_FLAGS_TOS = 2, INET_FLAGS_RECVOPTS = 3, INET_FLAGS_RETOPTS = 4, INET_FLAGS_PASSSEC = 5, INET_FLAGS_ORIGDSTADDR = 6, INET_FLAGS_CHECKSUM = 7, INET_FLAGS_RECVFRAGSIZE = 8, INET_FLAGS_RECVERR = 9, INET_FLAGS_RECVERR_RFC4884 = 10, INET_FLAGS_FREEBIND = 11, INET_FLAGS_HDRINCL = 12, INET_FLAGS_MC_LOOP = 13, INET_FLAGS_MC_ALL = 14, INET_FLAGS_TRANSPARENT = 15, INET_FLAGS_IS_ICSK = 16, INET_FLAGS_NODEFRAG = 17, INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, INET_FLAGS_DEFER_CONNECT = 19, INET_FLAGS_MC6_LOOP = 20, INET_FLAGS_RECVERR6_RFC4884 = 21, INET_FLAGS_MC6_ALL = 22, INET_FLAGS_AUTOFLOWLABEL_SET = 23, INET_FLAGS_AUTOFLOWLABEL = 24, INET_FLAGS_DONTFRAG = 25, INET_FLAGS_RECVERR6 = 26, INET_FLAGS_REPFLOW = 27, INET_FLAGS_RTALERT_ISOLATE = 28, INET_FLAGS_SNDFLOW = 29, INET_FLAGS_RTALERT = 30, }; enum { INET_FRAG_FIRST_IN = 1, INET_FRAG_LAST_IN = 2, INET_FRAG_COMPLETE = 4, INET_FRAG_HASH_DEAD = 8, INET_FRAG_DROP = 16, }; enum { INET_ULP_INFO_UNSPEC = 0, INET_ULP_INFO_NAME = 1, INET_ULP_INFO_TLS = 2, INET_ULP_INFO_MPTCP = 3, __INET_ULP_INFO_MAX = 4, }; enum { INSN_F_FRAMENO_MASK = 7, INSN_F_SPI_MASK = 63, INSN_F_SPI_SHIFT = 3, INSN_F_STACK_ACCESS = 512, }; enum { INVERT = 1, PROCESS_AND = 2, PROCESS_OR = 4, }; enum { IOAM6_ATTR_UNSPEC = 0, IOAM6_ATTR_NS_ID = 1, IOAM6_ATTR_NS_DATA = 2, IOAM6_ATTR_NS_DATA_WIDE = 3, IOAM6_ATTR_SC_ID = 4, IOAM6_ATTR_SC_DATA = 5, IOAM6_ATTR_SC_NONE = 6, IOAM6_ATTR_PAD = 7, __IOAM6_ATTR_MAX = 8, }; enum { IOAM6_CMD_UNSPEC = 0, IOAM6_CMD_ADD_NAMESPACE = 1, IOAM6_CMD_DEL_NAMESPACE = 2, IOAM6_CMD_DUMP_NAMESPACES = 3, IOAM6_CMD_ADD_SCHEMA = 4, IOAM6_CMD_DEL_SCHEMA = 5, IOAM6_CMD_DUMP_SCHEMAS = 6, IOAM6_CMD_NS_SET_SCHEMA = 7, __IOAM6_CMD_MAX = 8, }; enum { IOBL_BUF_RING = 1, IOBL_MMAP = 2, IOBL_INC = 4, }; enum { IOCB_CMD_PREAD = 0, IOCB_CMD_PWRITE = 1, IOCB_CMD_FSYNC = 2, IOCB_CMD_FDSYNC = 3, IOCB_CMD_POLL = 5, IOCB_CMD_NOOP = 6, IOCB_CMD_PREADV = 7, IOCB_CMD_PWRITEV = 8, }; enum { IOMMU_SET_DOMAIN_MUST_SUCCEED = 1, }; enum { IOPRIO_CLASS_NONE = 0, IOPRIO_CLASS_RT = 1, IOPRIO_CLASS_BE = 2, IOPRIO_CLASS_IDLE = 3, IOPRIO_CLASS_INVALID = 7, }; enum { IOPRIO_HINT_NONE = 0, IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1, IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2, IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3, IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4, IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5, IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6, IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7, }; enum { IOPRIO_WHO_PROCESS = 1, IOPRIO_WHO_PGRP = 2, IOPRIO_WHO_USER = 3, }; enum { IORES_DESC_NONE = 0, IORES_DESC_CRASH_KERNEL = 1, IORES_DESC_ACPI_TABLES = 2, IORES_DESC_ACPI_NV_STORAGE = 3, IORES_DESC_PERSISTENT_MEMORY = 4, IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, IORES_DESC_RESERVED = 7, IORES_DESC_SOFT_RESERVED = 8, IORES_DESC_CXL = 9, }; enum { IORES_MAP_SYSTEM_RAM = 1, IORES_MAP_ENCRYPTED = 2, }; enum { IORING_REGISTER_SRC_REGISTERED = 1, }; enum { IORING_RSRC_FILE = 0, IORING_RSRC_BUFFER = 1, }; enum { IOU_F_TWQ_LAZY_WAKE = 1, }; enum { IOU_OK = 0, IOU_ISSUE_SKIP_COMPLETE = -529, IOU_REQUEUE = -3072, IOU_STOP_MULTISHOT = -125, }; enum { IOU_POLL_DONE = 0, IOU_POLL_NO_ACTION = 1, IOU_POLL_REMOVE_POLL_USE_RES = 2, IOU_POLL_REISSUE = 3, IOU_POLL_REQUEUE = 4, }; enum { IO_ACCT_STALLED_BIT = 0, }; enum { IO_APOLL_OK = 0, IO_APOLL_ABORTED = 1, IO_APOLL_READY = 2, }; enum { IO_CHECK_CQ_OVERFLOW_BIT = 0, IO_CHECK_CQ_DROPPED_BIT = 1, }; enum { IO_EVENTFD_OP_SIGNAL_BIT = 0, }; enum { IO_SQ_THREAD_SHOULD_STOP = 0, IO_SQ_THREAD_SHOULD_PARK = 1, }; enum { IO_WORKER_F_UP = 0, IO_WORKER_F_RUNNING = 1, IO_WORKER_F_FREE = 2, IO_WORKER_F_BOUND = 3, }; enum { IO_WQ_ACCT_BOUND = 0, IO_WQ_ACCT_UNBOUND = 1, IO_WQ_ACCT_NR = 2, }; enum { IO_WQ_BIT_EXIT = 0, }; enum { IO_WQ_WORK_CANCEL = 1, IO_WQ_WORK_HASHED = 2, IO_WQ_WORK_UNBOUND = 4, IO_WQ_WORK_CONCURRENT = 16, IO_WQ_HASH_SHIFT = 24, }; enum { IP6_FH_F_FRAG = 1, IP6_FH_F_AUTH = 2, IP6_FH_F_SKIP_RH = 4, }; enum { IPMRA_CREPORT_UNSPEC = 0, IPMRA_CREPORT_MSGTYPE = 1, IPMRA_CREPORT_VIF_ID = 2, IPMRA_CREPORT_SRC_ADDR = 3, IPMRA_CREPORT_DST_ADDR = 4, IPMRA_CREPORT_PKT = 5, IPMRA_CREPORT_TABLE = 6, __IPMRA_CREPORT_MAX = 7, }; enum { IPMRA_TABLE_UNSPEC = 0, IPMRA_TABLE_ID = 1, IPMRA_TABLE_CACHE_RES_QUEUE_LEN = 2, IPMRA_TABLE_MROUTE_REG_VIF_NUM = 3, IPMRA_TABLE_MROUTE_DO_ASSERT = 4, IPMRA_TABLE_MROUTE_DO_PIM = 5, IPMRA_TABLE_VIFS = 6, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE = 7, __IPMRA_TABLE_MAX = 8, }; enum { IPMRA_VIFA_UNSPEC = 0, IPMRA_VIFA_IFINDEX = 1, IPMRA_VIFA_VIF_ID = 2, IPMRA_VIFA_FLAGS = 3, IPMRA_VIFA_BYTES_IN = 4, IPMRA_VIFA_BYTES_OUT = 5, IPMRA_VIFA_PACKETS_IN = 6, IPMRA_VIFA_PACKETS_OUT = 7, IPMRA_VIFA_LOCAL_ADDR = 8, IPMRA_VIFA_REMOTE_ADDR = 9, IPMRA_VIFA_PAD = 10, __IPMRA_VIFA_MAX = 11, }; enum { IPMRA_VIF_UNSPEC = 0, IPMRA_VIF = 1, __IPMRA_VIF_MAX = 2, }; enum { IPPROTO_IP = 0, IPPROTO_ICMP = 1, IPPROTO_IGMP = 2, IPPROTO_IPIP = 4, IPPROTO_TCP = 6, IPPROTO_EGP = 8, IPPROTO_PUP = 12, IPPROTO_UDP = 17, IPPROTO_IDP = 22, IPPROTO_TP = 29, IPPROTO_DCCP = 33, IPPROTO_IPV6 = 41, IPPROTO_RSVP = 46, IPPROTO_GRE = 47, IPPROTO_ESP = 50, IPPROTO_AH = 51, IPPROTO_MTP = 92, IPPROTO_BEETPH = 94, IPPROTO_ENCAP = 98, IPPROTO_PIM = 103, IPPROTO_COMP = 108, IPPROTO_L2TP = 115, IPPROTO_SCTP = 132, IPPROTO_UDPLITE = 136, IPPROTO_MPLS = 137, IPPROTO_ETHERNET = 143, IPPROTO_RAW = 255, IPPROTO_SMC = 256, IPPROTO_MPTCP = 262, IPPROTO_MAX = 263, }; enum { IPSTATS_MIB_NUM = 0, IPSTATS_MIB_INPKTS = 1, IPSTATS_MIB_INOCTETS = 2, IPSTATS_MIB_INDELIVERS = 3, IPSTATS_MIB_OUTFORWDATAGRAMS = 4, IPSTATS_MIB_OUTREQUESTS = 5, IPSTATS_MIB_OUTOCTETS = 6, IPSTATS_MIB_INHDRERRORS = 7, IPSTATS_MIB_INTOOBIGERRORS = 8, IPSTATS_MIB_INNOROUTES = 9, IPSTATS_MIB_INADDRERRORS = 10, IPSTATS_MIB_INUNKNOWNPROTOS = 11, IPSTATS_MIB_INTRUNCATEDPKTS = 12, IPSTATS_MIB_INDISCARDS = 13, IPSTATS_MIB_OUTDISCARDS = 14, IPSTATS_MIB_OUTNOROUTES = 15, IPSTATS_MIB_REASMTIMEOUT = 16, IPSTATS_MIB_REASMREQDS = 17, IPSTATS_MIB_REASMOKS = 18, IPSTATS_MIB_REASMFAILS = 19, IPSTATS_MIB_FRAGOKS = 20, IPSTATS_MIB_FRAGFAILS = 21, IPSTATS_MIB_FRAGCREATES = 22, IPSTATS_MIB_INMCASTPKTS = 23, IPSTATS_MIB_OUTMCASTPKTS = 24, IPSTATS_MIB_INBCASTPKTS = 25, IPSTATS_MIB_OUTBCASTPKTS = 26, IPSTATS_MIB_INMCASTOCTETS = 27, IPSTATS_MIB_OUTMCASTOCTETS = 28, IPSTATS_MIB_INBCASTOCTETS = 29, IPSTATS_MIB_OUTBCASTOCTETS = 30, IPSTATS_MIB_CSUMERRORS = 31, IPSTATS_MIB_NOECTPKTS = 32, IPSTATS_MIB_ECT1PKTS = 33, IPSTATS_MIB_ECT0PKTS = 34, IPSTATS_MIB_CEPKTS = 35, IPSTATS_MIB_REASM_OVERLAPS = 36, IPSTATS_MIB_OUTPKTS = 37, __IPSTATS_MIB_MAX = 38, }; enum { IPV4_DEVCONF_FORWARDING = 1, IPV4_DEVCONF_MC_FORWARDING = 2, IPV4_DEVCONF_PROXY_ARP = 3, IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, IPV4_DEVCONF_SECURE_REDIRECTS = 5, IPV4_DEVCONF_SEND_REDIRECTS = 6, IPV4_DEVCONF_SHARED_MEDIA = 7, IPV4_DEVCONF_RP_FILTER = 8, IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, IPV4_DEVCONF_BOOTP_RELAY = 10, IPV4_DEVCONF_LOG_MARTIANS = 11, IPV4_DEVCONF_TAG = 12, IPV4_DEVCONF_ARPFILTER = 13, IPV4_DEVCONF_MEDIUM_ID = 14, IPV4_DEVCONF_NOXFRM = 15, IPV4_DEVCONF_NOPOLICY = 16, IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, IPV4_DEVCONF_ARP_ANNOUNCE = 18, IPV4_DEVCONF_ARP_IGNORE = 19, IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, IPV4_DEVCONF_ARP_ACCEPT = 21, IPV4_DEVCONF_ARP_NOTIFY = 22, IPV4_DEVCONF_ACCEPT_LOCAL = 23, IPV4_DEVCONF_SRC_VMARK = 24, IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, IPV4_DEVCONF_ROUTE_LOCALNET = 26, IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, IPV4_DEVCONF_BC_FORWARDING = 32, IPV4_DEVCONF_ARP_EVICT_NOCARRIER = 33, __IPV4_DEVCONF_MAX = 34, }; enum { IPV6_SADDR_RULE_INIT = 0, IPV6_SADDR_RULE_LOCAL = 1, IPV6_SADDR_RULE_SCOPE = 2, IPV6_SADDR_RULE_PREFERRED = 3, IPV6_SADDR_RULE_HOA = 4, IPV6_SADDR_RULE_OIF = 5, IPV6_SADDR_RULE_LABEL = 6, IPV6_SADDR_RULE_PRIVACY = 7, IPV6_SADDR_RULE_ORCHID = 8, IPV6_SADDR_RULE_PREFIX = 9, IPV6_SADDR_RULE_MAX = 10, }; enum { IP_TUNNEL_CSUM_BIT = 0, IP_TUNNEL_ROUTING_BIT = 1, IP_TUNNEL_KEY_BIT = 2, IP_TUNNEL_SEQ_BIT = 3, IP_TUNNEL_STRICT_BIT = 4, IP_TUNNEL_REC_BIT = 5, IP_TUNNEL_VERSION_BIT = 6, IP_TUNNEL_NO_KEY_BIT = 7, IP_TUNNEL_DONT_FRAGMENT_BIT = 8, IP_TUNNEL_OAM_BIT = 9, IP_TUNNEL_CRIT_OPT_BIT = 10, IP_TUNNEL_GENEVE_OPT_BIT = 11, IP_TUNNEL_VXLAN_OPT_BIT = 12, IP_TUNNEL_NOCACHE_BIT = 13, IP_TUNNEL_ERSPAN_OPT_BIT = 14, IP_TUNNEL_GTP_OPT_BIT = 15, IP_TUNNEL_VTI_BIT = 16, IP_TUNNEL_SIT_ISATAP_BIT = 16, IP_TUNNEL_PFCP_OPT_BIT = 17, __IP_TUNNEL_FLAG_NUM = 18, }; enum { IRQCHIP_FWNODE_REAL = 0, IRQCHIP_FWNODE_NAMED = 1, IRQCHIP_FWNODE_NAMED_ID = 2, }; enum { IRQCHIP_SET_TYPE_MASKED = 1, IRQCHIP_EOI_IF_HANDLED = 2, IRQCHIP_MASK_ON_SUSPEND = 4, IRQCHIP_ONOFFLINE_ENABLED = 8, IRQCHIP_SKIP_SET_WAKE = 16, IRQCHIP_ONESHOT_SAFE = 32, IRQCHIP_EOI_THREADED = 64, IRQCHIP_SUPPORTS_LEVEL_MSI = 128, IRQCHIP_SUPPORTS_NMI = 256, IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = 512, IRQCHIP_AFFINITY_PRE_STARTUP = 1024, IRQCHIP_IMMUTABLE = 2048, }; enum { IRQC_IS_HARDIRQ = 0, IRQC_IS_NESTED = 1, }; enum { IRQD_TRIGGER_MASK = 15, IRQD_SETAFFINITY_PENDING = 256, IRQD_ACTIVATED = 512, IRQD_NO_BALANCING = 1024, IRQD_PER_CPU = 2048, IRQD_AFFINITY_SET = 4096, IRQD_LEVEL = 8192, IRQD_WAKEUP_STATE = 16384, IRQD_MOVE_PCNTXT = 32768, IRQD_IRQ_DISABLED = 65536, IRQD_IRQ_MASKED = 131072, IRQD_IRQ_INPROGRESS = 262144, IRQD_WAKEUP_ARMED = 524288, IRQD_FORWARDED_TO_VCPU = 1048576, IRQD_AFFINITY_MANAGED = 2097152, IRQD_IRQ_STARTED = 4194304, IRQD_MANAGED_SHUTDOWN = 8388608, IRQD_SINGLE_TARGET = 16777216, IRQD_DEFAULT_TRIGGER_SET = 33554432, IRQD_CAN_RESERVE = 67108864, IRQD_HANDLE_ENFORCE_IRQCTX = 134217728, IRQD_AFFINITY_ON_ACTIVATE = 268435456, IRQD_IRQ_ENABLED_ON_SUSPEND = 536870912, IRQD_RESEND_WHEN_IN_PROGRESS = 1073741824, }; enum { IRQS_AUTODETECT = 1, IRQS_SPURIOUS_DISABLED = 2, IRQS_POLL_INPROGRESS = 8, IRQS_ONESHOT = 32, IRQS_REPLAY = 64, IRQS_WAITING = 128, IRQS_PENDING = 512, IRQS_SUSPENDED = 2048, IRQS_TIMINGS = 4096, IRQS_NMI = 8192, IRQS_SYSFS = 16384, }; enum { IRQTF_RUNTHREAD = 0, IRQTF_WARNED = 1, IRQTF_AFFINITY = 2, IRQTF_FORCED_THREAD = 3, IRQTF_READY = 4, }; enum { IRQ_DOMAIN_FLAG_HIERARCHY = 1, IRQ_DOMAIN_NAME_ALLOCATED = 2, IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, IRQ_DOMAIN_FLAG_MSI = 16, IRQ_DOMAIN_FLAG_ISOLATED_MSI = 32, IRQ_DOMAIN_FLAG_NO_MAP = 64, IRQ_DOMAIN_FLAG_MSI_PARENT = 256, IRQ_DOMAIN_FLAG_MSI_DEVICE = 512, IRQ_DOMAIN_FLAG_DESTROY_GC = 1024, IRQ_DOMAIN_FLAG_NONCORE = 65536, }; enum { IRQ_POLL_F_SCHED = 0, IRQ_POLL_F_DISABLE = 1, }; enum { IRQ_SET_MASK_OK = 0, IRQ_SET_MASK_OK_NOCOPY = 1, IRQ_SET_MASK_OK_DONE = 2, }; enum { IRQ_STARTUP_NORMAL = 0, IRQ_STARTUP_MANAGED = 1, IRQ_STARTUP_ABORT = 2, }; enum { IRQ_TYPE_NONE = 0, IRQ_TYPE_EDGE_RISING = 1, IRQ_TYPE_EDGE_FALLING = 2, IRQ_TYPE_EDGE_BOTH = 3, IRQ_TYPE_LEVEL_HIGH = 4, IRQ_TYPE_LEVEL_LOW = 8, IRQ_TYPE_LEVEL_MASK = 12, IRQ_TYPE_SENSE_MASK = 15, IRQ_TYPE_DEFAULT = 15, IRQ_TYPE_PROBE = 16, IRQ_LEVEL = 256, IRQ_PER_CPU = 512, IRQ_NOPROBE = 1024, IRQ_NOREQUEST = 2048, IRQ_NOAUTOEN = 4096, IRQ_NO_BALANCING = 8192, IRQ_MOVE_PCNTXT = 16384, IRQ_NESTED_THREAD = 32768, IRQ_NOTHREAD = 65536, IRQ_PER_CPU_DEVID = 131072, IRQ_IS_POLLED = 262144, IRQ_DISABLE_UNLAZY = 524288, IRQ_HIDDEN = 1048576, IRQ_NO_DEBUG = 2097152, }; enum { IVBEP_PCI_UNCORE_HA = 0, IVBEP_PCI_UNCORE_IMC = 1, IVBEP_PCI_UNCORE_IRP = 2, IVBEP_PCI_UNCORE_QPI = 3, IVBEP_PCI_UNCORE_R2PCIE = 4, IVBEP_PCI_UNCORE_R3QPI = 5, }; enum { I_DATA_SEM_NORMAL = 0, I_DATA_SEM_OTHER = 1, I_DATA_SEM_QUOTA = 2, I_DATA_SEM_EA = 3, }; enum { KBUF_MODE_EXPAND = 1, KBUF_MODE_FREE = 2, }; enum { KERNEL_PARAM_FL_UNSAFE = 1, KERNEL_PARAM_FL_HWPARAM = 2, }; enum { KERNEL_PARAM_OPS_FL_NOARG = 1, }; enum { KF_ARG_DYNPTR_ID = 0, KF_ARG_LIST_HEAD_ID = 1, KF_ARG_LIST_NODE_ID = 2, KF_ARG_RB_ROOT_ID = 3, KF_ARG_RB_NODE_ID = 4, KF_ARG_WORKQUEUE_ID = 5, }; enum { KNL_PCI_UNCORE_MC_UCLK = 0, KNL_PCI_UNCORE_MC_DCLK = 1, KNL_PCI_UNCORE_EDC_UCLK = 2, KNL_PCI_UNCORE_EDC_ECLK = 3, KNL_PCI_UNCORE_M2PCIE = 4, KNL_PCI_UNCORE_IRP = 5, }; enum { KTW_FREEZABLE = 1, }; enum { KYBER_ASYNC_PERCENT = 75, }; enum { KYBER_LATENCY_SHIFT = 2, KYBER_GOOD_BUCKETS = 4, KYBER_LATENCY_BUCKETS = 8, }; enum { KYBER_READ = 0, KYBER_WRITE = 1, KYBER_DISCARD = 2, KYBER_OTHER = 3, KYBER_NUM_DOMAINS = 4, }; enum { KYBER_TOTAL_LATENCY = 0, KYBER_IO_LATENCY = 1, }; enum { LAST_NORM = 0, LAST_ROOT = 1, LAST_DOT = 2, LAST_DOTDOT = 3, }; enum { LBR_FORMAT_32 = 0, LBR_FORMAT_LIP = 1, LBR_FORMAT_EIP = 2, LBR_FORMAT_EIP_FLAGS = 3, LBR_FORMAT_EIP_FLAGS2 = 4, LBR_FORMAT_INFO = 5, LBR_FORMAT_TIME = 6, LBR_FORMAT_INFO2 = 7, LBR_FORMAT_MAX_KNOWN = 7, }; enum { LBR_NONE = 0, LBR_VALID = 1, }; enum { LDISC_SEM_NORMAL = 0, LDISC_SEM_OTHER = 1, }; enum { LIBATA_MAX_PRD = 128, LIBATA_DUMB_MAX_PRD = 64, ATA_DEF_QUEUE = 1, ATA_MAX_QUEUE = 32, ATA_TAG_INTERNAL = 32, ATA_SHORT_PAUSE = 16, ATAPI_MAX_DRAIN = 16384, ATA_ALL_DEVICES = 3, ATA_SHT_EMULATED = 1, ATA_SHT_THIS_ID = -1, ATA_TFLAG_LBA48 = 1, ATA_TFLAG_ISADDR = 2, ATA_TFLAG_DEVICE = 4, ATA_TFLAG_WRITE = 8, ATA_TFLAG_LBA = 16, ATA_TFLAG_FUA = 32, ATA_TFLAG_POLLING = 64, ATA_DFLAG_LBA = 1, ATA_DFLAG_LBA48 = 2, ATA_DFLAG_CDB_INTR = 4, ATA_DFLAG_NCQ = 8, ATA_DFLAG_FLUSH_EXT = 16, ATA_DFLAG_ACPI_PENDING = 32, ATA_DFLAG_ACPI_FAILED = 64, ATA_DFLAG_AN = 128, ATA_DFLAG_TRUSTED = 256, ATA_DFLAG_FUA = 512, ATA_DFLAG_DMADIR = 1024, ATA_DFLAG_NCQ_SEND_RECV = 2048, ATA_DFLAG_NCQ_PRIO = 4096, ATA_DFLAG_CDL = 8192, ATA_DFLAG_CFG_MASK = 16383, ATA_DFLAG_PIO = 16384, ATA_DFLAG_NCQ_OFF = 32768, ATA_DFLAG_SLEEPING = 65536, ATA_DFLAG_DUBIOUS_XFER = 131072, ATA_DFLAG_NO_UNLOAD = 262144, ATA_DFLAG_UNLOCK_HPA = 524288, ATA_DFLAG_INIT_MASK = 1048575, ATA_DFLAG_NCQ_PRIO_ENABLED = 1048576, ATA_DFLAG_CDL_ENABLED = 2097152, ATA_DFLAG_RESUMING = 4194304, ATA_DFLAG_DETACH = 16777216, ATA_DFLAG_DETACHED = 33554432, ATA_DFLAG_DA = 67108864, ATA_DFLAG_DEVSLP = 134217728, ATA_DFLAG_ACPI_DISABLED = 268435456, ATA_DFLAG_D_SENSE = 536870912, ATA_DFLAG_ZAC = 1073741824, ATA_DFLAG_FEATURES_MASK = 201341696, ATA_DEV_UNKNOWN = 0, ATA_DEV_ATA = 1, ATA_DEV_ATA_UNSUP = 2, ATA_DEV_ATAPI = 3, ATA_DEV_ATAPI_UNSUP = 4, ATA_DEV_PMP = 5, ATA_DEV_PMP_UNSUP = 6, ATA_DEV_SEMB = 7, ATA_DEV_SEMB_UNSUP = 8, ATA_DEV_ZAC = 9, ATA_DEV_ZAC_UNSUP = 10, ATA_DEV_NONE = 11, ATA_LFLAG_NO_HRST = 2, ATA_LFLAG_NO_SRST = 4, ATA_LFLAG_ASSUME_ATA = 8, ATA_LFLAG_ASSUME_SEMB = 16, ATA_LFLAG_ASSUME_CLASS = 24, ATA_LFLAG_NO_RETRY = 32, ATA_LFLAG_DISABLED = 64, ATA_LFLAG_SW_ACTIVITY = 128, ATA_LFLAG_NO_LPM = 256, ATA_LFLAG_RST_ONCE = 512, ATA_LFLAG_CHANGED = 1024, ATA_LFLAG_NO_DEBOUNCE_DELAY = 2048, ATA_FLAG_SLAVE_POSS = 1, ATA_FLAG_SATA = 2, ATA_FLAG_NO_LPM = 4, ATA_FLAG_NO_LOG_PAGE = 32, ATA_FLAG_NO_ATAPI = 64, ATA_FLAG_PIO_DMA = 128, ATA_FLAG_PIO_LBA48 = 256, ATA_FLAG_PIO_POLLING = 512, ATA_FLAG_NCQ = 1024, ATA_FLAG_NO_POWEROFF_SPINDOWN = 2048, ATA_FLAG_NO_HIBERNATE_SPINDOWN = 4096, ATA_FLAG_DEBUGMSG = 8192, ATA_FLAG_FPDMA_AA = 16384, ATA_FLAG_IGN_SIMPLEX = 32768, ATA_FLAG_NO_IORDY = 65536, ATA_FLAG_ACPI_SATA = 131072, ATA_FLAG_AN = 262144, ATA_FLAG_PMP = 524288, ATA_FLAG_FPDMA_AUX = 1048576, ATA_FLAG_EM = 2097152, ATA_FLAG_SW_ACTIVITY = 4194304, ATA_FLAG_NO_DIPM = 8388608, ATA_FLAG_SAS_HOST = 16777216, ATA_PFLAG_EH_PENDING = 1, ATA_PFLAG_EH_IN_PROGRESS = 2, ATA_PFLAG_FROZEN = 4, ATA_PFLAG_RECOVERED = 8, ATA_PFLAG_LOADING = 16, ATA_PFLAG_SCSI_HOTPLUG = 64, ATA_PFLAG_INITIALIZING = 128, ATA_PFLAG_RESETTING = 256, ATA_PFLAG_UNLOADING = 512, ATA_PFLAG_UNLOADED = 1024, ATA_PFLAG_RESUMING = 65536, ATA_PFLAG_SUSPENDED = 131072, ATA_PFLAG_PM_PENDING = 262144, ATA_PFLAG_INIT_GTM_VALID = 524288, ATA_PFLAG_PIO32 = 1048576, ATA_PFLAG_PIO32CHANGE = 2097152, ATA_PFLAG_EXTERNAL = 4194304, ATA_QCFLAG_ACTIVE = 1, ATA_QCFLAG_DMAMAP = 2, ATA_QCFLAG_RTF_FILLED = 4, ATA_QCFLAG_IO = 8, ATA_QCFLAG_RESULT_TF = 16, ATA_QCFLAG_CLEAR_EXCL = 32, ATA_QCFLAG_QUIET = 64, ATA_QCFLAG_RETRY = 128, ATA_QCFLAG_HAS_CDL = 256, ATA_QCFLAG_EH = 65536, ATA_QCFLAG_SENSE_VALID = 131072, ATA_QCFLAG_EH_SCHEDULED = 262144, ATA_QCFLAG_EH_SUCCESS_CMD = 524288, ATA_HOST_SIMPLEX = 1, ATA_HOST_STARTED = 2, ATA_HOST_PARALLEL_SCAN = 4, ATA_HOST_IGNORE_ATA = 8, ATA_HOST_NO_PART = 16, ATA_HOST_NO_SSC = 32, ATA_HOST_NO_DEVSLP = 64, ATA_TMOUT_BOOT = 30000, ATA_TMOUT_BOOT_QUICK = 7000, ATA_TMOUT_INTERNAL_QUICK = 5000, ATA_TMOUT_MAX_PARK = 30000, ATA_TMOUT_FF_WAIT_LONG = 2000, ATA_TMOUT_FF_WAIT = 800, ATA_WAIT_AFTER_RESET = 150, ATA_TMOUT_PMP_SRST_WAIT = 10000, ATA_TMOUT_SPURIOUS_PHY = 10000, BUS_UNKNOWN = 0, BUS_DMA = 1, BUS_IDLE = 2, BUS_NOINTR = 3, BUS_NODATA = 4, BUS_TIMER = 5, BUS_PIO = 6, BUS_EDD = 7, BUS_IDENTIFY = 8, BUS_PACKET = 9, PORT_UNKNOWN = 0, PORT_ENABLED = 1, PORT_DISABLED = 2, ATA_NR_PIO_MODES = 7, ATA_NR_MWDMA_MODES = 5, ATA_NR_UDMA_MODES = 8, ATA_SHIFT_PIO = 0, ATA_SHIFT_MWDMA = 7, ATA_SHIFT_UDMA = 12, ATA_SHIFT_PRIO = 6, ATA_PRIO_HIGH = 2, ATA_DMA_PAD_SZ = 4, ATA_ERING_SIZE = 32, ATA_DEFER_LINK = 1, ATA_DEFER_PORT = 2, ATA_EH_DESC_LEN = 80, ATA_EH_REVALIDATE = 1, ATA_EH_SOFTRESET = 2, ATA_EH_HARDRESET = 4, ATA_EH_RESET = 6, ATA_EH_ENABLE_LINK = 8, ATA_EH_PARK = 32, ATA_EH_GET_SUCCESS_SENSE = 64, ATA_EH_SET_ACTIVE = 128, ATA_EH_PERDEV_MASK = 225, ATA_EH_ALL_ACTIONS = 15, ATA_EHI_HOTPLUGGED = 1, ATA_EHI_NO_AUTOPSY = 4, ATA_EHI_QUIET = 8, ATA_EHI_NO_RECOVERY = 16, ATA_EHI_DID_SOFTRESET = 65536, ATA_EHI_DID_HARDRESET = 131072, ATA_EHI_PRINTINFO = 262144, ATA_EHI_SETMODE = 524288, ATA_EHI_POST_SETMODE = 1048576, ATA_EHI_DID_PRINT_QUIRKS = 2097152, ATA_EHI_DID_RESET = 196608, ATA_EHI_TO_SLAVE_MASK = 12, ATA_EH_MAX_TRIES = 5, ATA_LINK_RESUME_TRIES = 5, ATA_EH_DEV_TRIES = 3, ATA_EH_PMP_TRIES = 5, ATA_EH_PMP_LINK_TRIES = 3, SATA_PMP_RW_TIMEOUT = 3000, ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, ATA_QUIRK_DIAGNOSTIC = 1, ATA_QUIRK_NODMA = 2, ATA_QUIRK_NONCQ = 4, ATA_QUIRK_MAX_SEC_128 = 8, ATA_QUIRK_BROKEN_HPA = 16, ATA_QUIRK_DISABLE = 32, ATA_QUIRK_HPA_SIZE = 64, ATA_QUIRK_IVB = 128, ATA_QUIRK_STUCK_ERR = 256, ATA_QUIRK_BRIDGE_OK = 512, ATA_QUIRK_ATAPI_MOD16_DMA = 1024, ATA_QUIRK_FIRMWARE_WARN = 2048, ATA_QUIRK_1_5_GBPS = 4096, ATA_QUIRK_NOSETXFER = 8192, ATA_QUIRK_BROKEN_FPDMA_AA = 16384, ATA_QUIRK_DUMP_ID = 32768, ATA_QUIRK_MAX_SEC_LBA48 = 65536, ATA_QUIRK_ATAPI_DMADIR = 131072, ATA_QUIRK_NO_NCQ_TRIM = 262144, ATA_QUIRK_NOLPM = 524288, ATA_QUIRK_WD_BROKEN_LPM = 1048576, ATA_QUIRK_ZERO_AFTER_TRIM = 2097152, ATA_QUIRK_NO_DMA_LOG = 4194304, ATA_QUIRK_NOTRIM = 8388608, ATA_QUIRK_MAX_SEC_1024 = 16777216, ATA_QUIRK_MAX_TRIM_128M = 33554432, ATA_QUIRK_NO_NCQ_ON_ATI = 67108864, ATA_QUIRK_NO_ID_DEV_LOG = 134217728, ATA_QUIRK_NO_LOG_DIR = 268435456, ATA_QUIRK_NO_FUA = 536870912, ATA_DMA_MASK_ATA = 1, ATA_DMA_MASK_ATAPI = 2, ATA_DMA_MASK_CFA = 4, ATAPI_READ = 0, ATAPI_WRITE = 1, ATAPI_READ_CD = 2, ATAPI_PASS_THRU = 3, ATAPI_MISC = 4, ATA_TIMING_SETUP = 1, ATA_TIMING_ACT8B = 2, ATA_TIMING_REC8B = 4, ATA_TIMING_CYC8B = 8, ATA_TIMING_8BIT = 14, ATA_TIMING_ACTIVE = 16, ATA_TIMING_RECOVER = 32, ATA_TIMING_DMACK_HOLD = 64, ATA_TIMING_CYCLE = 128, ATA_TIMING_UDMA = 256, ATA_TIMING_ALL = 511, ATA_ACPI_FILTER_SETXFER = 1, ATA_ACPI_FILTER_LOCK = 2, ATA_ACPI_FILTER_DIPM = 4, ATA_ACPI_FILTER_FPDMA_OFFSET = 8, ATA_ACPI_FILTER_FPDMA_AA = 16, ATA_ACPI_FILTER_DEFAULT = 7, }; enum { LINK_XSTATS_TYPE_UNSPEC = 0, LINK_XSTATS_TYPE_BRIDGE = 1, LINK_XSTATS_TYPE_BOND = 2, __LINK_XSTATS_TYPE_MAX = 3, }; enum { LINUX_MIB_NUM = 0, LINUX_MIB_SYNCOOKIESSENT = 1, LINUX_MIB_SYNCOOKIESRECV = 2, LINUX_MIB_SYNCOOKIESFAILED = 3, LINUX_MIB_EMBRYONICRSTS = 4, LINUX_MIB_PRUNECALLED = 5, LINUX_MIB_RCVPRUNED = 6, LINUX_MIB_OFOPRUNED = 7, LINUX_MIB_OUTOFWINDOWICMPS = 8, LINUX_MIB_LOCKDROPPEDICMPS = 9, LINUX_MIB_ARPFILTER = 10, LINUX_MIB_TIMEWAITED = 11, LINUX_MIB_TIMEWAITRECYCLED = 12, LINUX_MIB_TIMEWAITKILLED = 13, LINUX_MIB_PAWSACTIVEREJECTED = 14, LINUX_MIB_PAWSESTABREJECTED = 15, LINUX_MIB_DELAYEDACKS = 16, LINUX_MIB_DELAYEDACKLOCKED = 17, LINUX_MIB_DELAYEDACKLOST = 18, LINUX_MIB_LISTENOVERFLOWS = 19, LINUX_MIB_LISTENDROPS = 20, LINUX_MIB_TCPHPHITS = 21, LINUX_MIB_TCPPUREACKS = 22, LINUX_MIB_TCPHPACKS = 23, LINUX_MIB_TCPRENORECOVERY = 24, LINUX_MIB_TCPSACKRECOVERY = 25, LINUX_MIB_TCPSACKRENEGING = 26, LINUX_MIB_TCPSACKREORDER = 27, LINUX_MIB_TCPRENOREORDER = 28, LINUX_MIB_TCPTSREORDER = 29, LINUX_MIB_TCPFULLUNDO = 30, LINUX_MIB_TCPPARTIALUNDO = 31, LINUX_MIB_TCPDSACKUNDO = 32, LINUX_MIB_TCPLOSSUNDO = 33, LINUX_MIB_TCPLOSTRETRANSMIT = 34, LINUX_MIB_TCPRENOFAILURES = 35, LINUX_MIB_TCPSACKFAILURES = 36, LINUX_MIB_TCPLOSSFAILURES = 37, LINUX_MIB_TCPFASTRETRANS = 38, LINUX_MIB_TCPSLOWSTARTRETRANS = 39, LINUX_MIB_TCPTIMEOUTS = 40, LINUX_MIB_TCPLOSSPROBES = 41, LINUX_MIB_TCPLOSSPROBERECOVERY = 42, LINUX_MIB_TCPRENORECOVERYFAIL = 43, LINUX_MIB_TCPSACKRECOVERYFAIL = 44, LINUX_MIB_TCPRCVCOLLAPSED = 45, LINUX_MIB_TCPDSACKOLDSENT = 46, LINUX_MIB_TCPDSACKOFOSENT = 47, LINUX_MIB_TCPDSACKRECV = 48, LINUX_MIB_TCPDSACKOFORECV = 49, LINUX_MIB_TCPABORTONDATA = 50, LINUX_MIB_TCPABORTONCLOSE = 51, LINUX_MIB_TCPABORTONMEMORY = 52, LINUX_MIB_TCPABORTONTIMEOUT = 53, LINUX_MIB_TCPABORTONLINGER = 54, LINUX_MIB_TCPABORTFAILED = 55, LINUX_MIB_TCPMEMORYPRESSURES = 56, LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, LINUX_MIB_TCPSACKDISCARD = 58, LINUX_MIB_TCPDSACKIGNOREDOLD = 59, LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, LINUX_MIB_TCPSPURIOUSRTOS = 61, LINUX_MIB_TCPMD5NOTFOUND = 62, LINUX_MIB_TCPMD5UNEXPECTED = 63, LINUX_MIB_TCPMD5FAILURE = 64, LINUX_MIB_SACKSHIFTED = 65, LINUX_MIB_SACKMERGED = 66, LINUX_MIB_SACKSHIFTFALLBACK = 67, LINUX_MIB_TCPBACKLOGDROP = 68, LINUX_MIB_PFMEMALLOCDROP = 69, LINUX_MIB_TCPMINTTLDROP = 70, LINUX_MIB_TCPDEFERACCEPTDROP = 71, LINUX_MIB_IPRPFILTER = 72, LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, LINUX_MIB_TCPREQQFULLDROP = 75, LINUX_MIB_TCPRETRANSFAIL = 76, LINUX_MIB_TCPRCVCOALESCE = 77, LINUX_MIB_TCPBACKLOGCOALESCE = 78, LINUX_MIB_TCPOFOQUEUE = 79, LINUX_MIB_TCPOFODROP = 80, LINUX_MIB_TCPOFOMERGE = 81, LINUX_MIB_TCPCHALLENGEACK = 82, LINUX_MIB_TCPSYNCHALLENGE = 83, LINUX_MIB_TCPFASTOPENACTIVE = 84, LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, LINUX_MIB_TCPFASTOPENPASSIVE = 86, LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, LINUX_MIB_BUSYPOLLRXPACKETS = 92, LINUX_MIB_TCPAUTOCORKING = 93, LINUX_MIB_TCPFROMZEROWINDOWADV = 94, LINUX_MIB_TCPTOZEROWINDOWADV = 95, LINUX_MIB_TCPWANTZEROWINDOWADV = 96, LINUX_MIB_TCPSYNRETRANS = 97, LINUX_MIB_TCPORIGDATASENT = 98, LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, LINUX_MIB_TCPHYSTARTTRAINCWND = 100, LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, LINUX_MIB_TCPHYSTARTDELAYCWND = 102, LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, LINUX_MIB_TCPACKSKIPPEDPAWS = 104, LINUX_MIB_TCPACKSKIPPEDSEQ = 105, LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, LINUX_MIB_TCPWINPROBE = 109, LINUX_MIB_TCPKEEPALIVE = 110, LINUX_MIB_TCPMTUPFAIL = 111, LINUX_MIB_TCPMTUPSUCCESS = 112, LINUX_MIB_TCPDELIVERED = 113, LINUX_MIB_TCPDELIVEREDCE = 114, LINUX_MIB_TCPACKCOMPRESSED = 115, LINUX_MIB_TCPZEROWINDOWDROP = 116, LINUX_MIB_TCPRCVQDROP = 117, LINUX_MIB_TCPWQUEUETOOBIG = 118, LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119, LINUX_MIB_TCPTIMEOUTREHASH = 120, LINUX_MIB_TCPDUPLICATEDATAREHASH = 121, LINUX_MIB_TCPDSACKRECVSEGS = 122, LINUX_MIB_TCPDSACKIGNOREDDUBIOUS = 123, LINUX_MIB_TCPMIGRATEREQSUCCESS = 124, LINUX_MIB_TCPMIGRATEREQFAILURE = 125, LINUX_MIB_TCPPLBREHASH = 126, LINUX_MIB_TCPAOREQUIRED = 127, LINUX_MIB_TCPAOBAD = 128, LINUX_MIB_TCPAOKEYNOTFOUND = 129, LINUX_MIB_TCPAOGOOD = 130, LINUX_MIB_TCPAODROPPEDICMPS = 131, __LINUX_MIB_MAX = 132, }; enum { LINUX_MIB_TLSNUM = 0, LINUX_MIB_TLSCURRTXSW = 1, LINUX_MIB_TLSCURRRXSW = 2, LINUX_MIB_TLSCURRTXDEVICE = 3, LINUX_MIB_TLSCURRRXDEVICE = 4, LINUX_MIB_TLSTXSW = 5, LINUX_MIB_TLSRXSW = 6, LINUX_MIB_TLSTXDEVICE = 7, LINUX_MIB_TLSRXDEVICE = 8, LINUX_MIB_TLSDECRYPTERROR = 9, LINUX_MIB_TLSRXDEVICERESYNC = 10, LINUX_MIB_TLSDECRYPTRETRY = 11, LINUX_MIB_TLSRXNOPADVIOL = 12, __LINUX_MIB_TLSMAX = 13, }; enum { LINUX_MIB_XFRMNUM = 0, LINUX_MIB_XFRMINERROR = 1, LINUX_MIB_XFRMINBUFFERERROR = 2, LINUX_MIB_XFRMINHDRERROR = 3, LINUX_MIB_XFRMINNOSTATES = 4, LINUX_MIB_XFRMINSTATEPROTOERROR = 5, LINUX_MIB_XFRMINSTATEMODEERROR = 6, LINUX_MIB_XFRMINSTATESEQERROR = 7, LINUX_MIB_XFRMINSTATEEXPIRED = 8, LINUX_MIB_XFRMINSTATEMISMATCH = 9, LINUX_MIB_XFRMINSTATEINVALID = 10, LINUX_MIB_XFRMINTMPLMISMATCH = 11, LINUX_MIB_XFRMINNOPOLS = 12, LINUX_MIB_XFRMINPOLBLOCK = 13, LINUX_MIB_XFRMINPOLERROR = 14, LINUX_MIB_XFRMOUTERROR = 15, LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16, LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17, LINUX_MIB_XFRMOUTNOSTATES = 18, LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19, LINUX_MIB_XFRMOUTSTATEMODEERROR = 20, LINUX_MIB_XFRMOUTSTATESEQERROR = 21, LINUX_MIB_XFRMOUTSTATEEXPIRED = 22, LINUX_MIB_XFRMOUTPOLBLOCK = 23, LINUX_MIB_XFRMOUTPOLDEAD = 24, LINUX_MIB_XFRMOUTPOLERROR = 25, LINUX_MIB_XFRMFWDHDRERROR = 26, LINUX_MIB_XFRMOUTSTATEINVALID = 27, LINUX_MIB_XFRMACQUIREERROR = 28, LINUX_MIB_XFRMOUTSTATEDIRERROR = 29, LINUX_MIB_XFRMINSTATEDIRERROR = 30, __LINUX_MIB_XFRMMAX = 31, }; enum { LINUX_RAID_PARTITION = 253, }; enum { LOCKF_USED_IN_HARDIRQ = 1, LOCKF_USED_IN_HARDIRQ_READ = 2, LOCKF_ENABLED_HARDIRQ = 4, LOCKF_ENABLED_HARDIRQ_READ = 8, LOCKF_USED_IN_SOFTIRQ = 16, LOCKF_USED_IN_SOFTIRQ_READ = 32, LOCKF_ENABLED_SOFTIRQ = 64, LOCKF_ENABLED_SOFTIRQ_READ = 128, LOCKF_USED = 256, LOCKF_USED_READ = 512, }; enum { LOGIC_PIO_INDIRECT = 0, LOGIC_PIO_CPU_MMIO = 1, }; enum { LO_FLAGS_READ_ONLY = 1, LO_FLAGS_AUTOCLEAR = 4, LO_FLAGS_PARTSCAN = 8, LO_FLAGS_DIRECT_IO = 16, }; enum { LWTUNNEL_IP_OPTS_UNSPEC = 0, LWTUNNEL_IP_OPTS_GENEVE = 1, LWTUNNEL_IP_OPTS_VXLAN = 2, LWTUNNEL_IP_OPTS_ERSPAN = 3, __LWTUNNEL_IP_OPTS_MAX = 4, }; enum { LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0, LWTUNNEL_IP_OPT_ERSPAN_VER = 1, LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2, LWTUNNEL_IP_OPT_ERSPAN_DIR = 3, LWTUNNEL_IP_OPT_ERSPAN_HWID = 4, __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5, }; enum { LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0, LWTUNNEL_IP_OPT_GENEVE_CLASS = 1, LWTUNNEL_IP_OPT_GENEVE_TYPE = 2, LWTUNNEL_IP_OPT_GENEVE_DATA = 3, __LWTUNNEL_IP_OPT_GENEVE_MAX = 4, }; enum { LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0, LWTUNNEL_IP_OPT_VXLAN_GBP = 1, __LWTUNNEL_IP_OPT_VXLAN_MAX = 2, }; enum { LWTUNNEL_XMIT_DONE = 0, LWTUNNEL_XMIT_CONTINUE = 256, }; enum { LWT_BPF_PROG_UNSPEC = 0, LWT_BPF_PROG_FD = 1, LWT_BPF_PROG_NAME = 2, __LWT_BPF_PROG_MAX = 3, }; enum { LWT_BPF_UNSPEC = 0, LWT_BPF_IN = 1, LWT_BPF_OUT = 2, LWT_BPF_XMIT = 3, LWT_BPF_XMIT_HEADROOM = 4, __LWT_BPF_MAX = 5, }; enum { LWT_XFRM_UNSPEC = 0, LWT_XFRM_IF_ID = 1, LWT_XFRM_LINK = 2, __LWT_XFRM_MAX = 3, }; enum { Lo_unbound = 0, Lo_bound = 1, Lo_rundown = 2, Lo_deleting = 3, }; enum { MAGNITUDE_STRONG = 2, MAGNITUDE_WEAK = 3, MAGNITUDE_NUM = 4, }; enum { MATCH_MTR = 0, MATCH_MEQ = 1, MATCH_MLE = 2, MATCH_MLT = 3, MATCH_MGE = 4, MATCH_MGT = 5, }; enum { MAX_IORES_LEVEL = 5, }; enum { MAX_OPT_ARGS = 3, }; enum { MBE_REFERENCED_B = 0, MBE_REUSABLE_B = 1, }; enum { MB_INODE_PA = 0, MB_GROUP_PA = 1, }; enum { MDBA_GET_ENTRY_UNSPEC = 0, MDBA_GET_ENTRY = 1, MDBA_GET_ENTRY_ATTRS = 2, __MDBA_GET_ENTRY_MAX = 3, }; enum { MDBA_MDB_EATTR_UNSPEC = 0, MDBA_MDB_EATTR_TIMER = 1, MDBA_MDB_EATTR_SRC_LIST = 2, MDBA_MDB_EATTR_GROUP_MODE = 3, MDBA_MDB_EATTR_SOURCE = 4, MDBA_MDB_EATTR_RTPROT = 5, MDBA_MDB_EATTR_DST = 6, MDBA_MDB_EATTR_DST_PORT = 7, MDBA_MDB_EATTR_VNI = 8, MDBA_MDB_EATTR_IFINDEX = 9, MDBA_MDB_EATTR_SRC_VNI = 10, __MDBA_MDB_EATTR_MAX = 11, }; enum { MDBA_MDB_ENTRY_UNSPEC = 0, MDBA_MDB_ENTRY_INFO = 1, __MDBA_MDB_ENTRY_MAX = 2, }; enum { MDBA_MDB_SRCATTR_UNSPEC = 0, MDBA_MDB_SRCATTR_ADDRESS = 1, MDBA_MDB_SRCATTR_TIMER = 2, __MDBA_MDB_SRCATTR_MAX = 3, }; enum { MDBA_MDB_SRCLIST_UNSPEC = 0, MDBA_MDB_SRCLIST_ENTRY = 1, __MDBA_MDB_SRCLIST_MAX = 2, }; enum { MDBA_MDB_UNSPEC = 0, MDBA_MDB_ENTRY = 1, __MDBA_MDB_MAX = 2, }; enum { MDBA_SET_ENTRY_UNSPEC = 0, MDBA_SET_ENTRY = 1, MDBA_SET_ENTRY_ATTRS = 2, __MDBA_SET_ENTRY_MAX = 3, }; enum { MDBA_UNSPEC = 0, MDBA_MDB = 1, MDBA_ROUTER = 2, __MDBA_MAX = 3, }; enum { MDBE_ATTR_UNSPEC = 0, MDBE_ATTR_SOURCE = 1, MDBE_ATTR_SRC_LIST = 2, MDBE_ATTR_GROUP_MODE = 3, MDBE_ATTR_RTPROT = 4, MDBE_ATTR_DST = 5, MDBE_ATTR_DST_PORT = 6, MDBE_ATTR_VNI = 7, MDBE_ATTR_IFINDEX = 8, MDBE_ATTR_SRC_VNI = 9, MDBE_ATTR_STATE_MASK = 10, __MDBE_ATTR_MAX = 11, }; enum { MDBE_SRCATTR_UNSPEC = 0, MDBE_SRCATTR_ADDRESS = 1, __MDBE_SRCATTR_MAX = 2, }; enum { MDBE_SRC_LIST_UNSPEC = 0, MDBE_SRC_LIST_ENTRY = 1, __MDBE_SRC_LIST_MAX = 2, }; enum { MEMBARRIER_FLAG_SYNC_CORE = 1, MEMBARRIER_FLAG_RSEQ = 2, }; enum { MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, }; enum { MEMORY_RECLAIM_SWAPPINESS = 0, MEMORY_RECLAIM_NULL = 1, }; enum { MEMREMAP_WB = 1, MEMREMAP_WT = 2, MEMREMAP_WC = 4, MEMREMAP_ENC = 8, MEMREMAP_DEC = 16, }; enum { MEMTYPE_EXACT_MATCH = 0, MEMTYPE_END_MATCH = 1, }; enum { MFC_STATIC = 1, MFC_OFFLOAD = 2, }; enum { MIX_INFLIGHT = 2147483648, }; enum { MM_FILEPAGES = 0, MM_ANONPAGES = 1, MM_SWAPENTS = 2, MM_SHMEMPAGES = 3, NR_MM_COUNTERS = 4, }; enum { MOXA_SUPP_RS232 = 1, MOXA_SUPP_RS422 = 2, MOXA_SUPP_RS485 = 4, }; enum { MPLS_IPTUNNEL_UNSPEC = 0, MPLS_IPTUNNEL_DST = 1, MPLS_IPTUNNEL_TTL = 2, __MPLS_IPTUNNEL_MAX = 3, }; enum { MPLS_STATS_UNSPEC = 0, MPLS_STATS_LINK = 1, __MPLS_STATS_MAX = 2, }; enum { MPOL_DEFAULT = 0, MPOL_PREFERRED = 1, MPOL_BIND = 2, MPOL_INTERLEAVE = 3, MPOL_LOCAL = 4, MPOL_PREFERRED_MANY = 5, MPOL_WEIGHTED_INTERLEAVE = 6, MPOL_MAX = 7, }; enum { MPTCP_CMSG_TS = 1, MPTCP_CMSG_INQ = 2, }; enum { MPTCP_PM_ADDR_ATTR_UNSPEC = 0, MPTCP_PM_ADDR_ATTR_FAMILY = 1, MPTCP_PM_ADDR_ATTR_ID = 2, MPTCP_PM_ADDR_ATTR_ADDR4 = 3, MPTCP_PM_ADDR_ATTR_ADDR6 = 4, MPTCP_PM_ADDR_ATTR_PORT = 5, MPTCP_PM_ADDR_ATTR_FLAGS = 6, MPTCP_PM_ADDR_ATTR_IF_IDX = 7, __MPTCP_PM_ADDR_ATTR_MAX = 8, }; enum { MPTCP_PM_ATTR_UNSPEC = 0, MPTCP_PM_ATTR_ADDR = 1, MPTCP_PM_ATTR_RCV_ADD_ADDRS = 2, MPTCP_PM_ATTR_SUBFLOWS = 3, MPTCP_PM_ATTR_TOKEN = 4, MPTCP_PM_ATTR_LOC_ID = 5, MPTCP_PM_ATTR_ADDR_REMOTE = 6, __MPTCP_ATTR_AFTER_LAST = 7, }; enum { MPTCP_PM_CMD_UNSPEC = 0, MPTCP_PM_CMD_ADD_ADDR = 1, MPTCP_PM_CMD_DEL_ADDR = 2, MPTCP_PM_CMD_GET_ADDR = 3, MPTCP_PM_CMD_FLUSH_ADDRS = 4, MPTCP_PM_CMD_SET_LIMITS = 5, MPTCP_PM_CMD_GET_LIMITS = 6, MPTCP_PM_CMD_SET_FLAGS = 7, MPTCP_PM_CMD_ANNOUNCE = 8, MPTCP_PM_CMD_REMOVE = 9, MPTCP_PM_CMD_SUBFLOW_CREATE = 10, MPTCP_PM_CMD_SUBFLOW_DESTROY = 11, __MPTCP_PM_CMD_AFTER_LAST = 12, }; enum { MPTCP_PM_ENDPOINT_ADDR = 1, __MPTCP_PM_ENDPOINT_MAX = 2, }; enum { MPTCP_SUBFLOW_ATTR_UNSPEC = 0, MPTCP_SUBFLOW_ATTR_TOKEN_REM = 1, MPTCP_SUBFLOW_ATTR_TOKEN_LOC = 2, MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ = 3, MPTCP_SUBFLOW_ATTR_MAP_SEQ = 4, MPTCP_SUBFLOW_ATTR_MAP_SFSEQ = 5, MPTCP_SUBFLOW_ATTR_SSN_OFFSET = 6, MPTCP_SUBFLOW_ATTR_MAP_DATALEN = 7, MPTCP_SUBFLOW_ATTR_FLAGS = 8, MPTCP_SUBFLOW_ATTR_ID_REM = 9, MPTCP_SUBFLOW_ATTR_ID_LOC = 10, MPTCP_SUBFLOW_ATTR_PAD = 11, __MPTCP_SUBFLOW_ATTR_MAX = 12, }; enum { MSI_FLAG_USE_DEF_DOM_OPS = 1, MSI_FLAG_USE_DEF_CHIP_OPS = 2, MSI_FLAG_ACTIVATE_EARLY = 4, MSI_FLAG_MUST_REACTIVATE = 8, MSI_FLAG_DEV_SYSFS = 16, MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = 32, MSI_FLAG_FREE_MSI_DESCS = 64, MSI_FLAG_USE_DEV_FWNODE = 128, MSI_FLAG_PARENT_PM_DEV = 256, MSI_FLAG_PCI_MSI_MASK_PARENT = 512, MSI_GENERIC_FLAGS_MASK = 65535, MSI_DOMAIN_FLAGS_MASK = 4294901760, MSI_FLAG_MULTI_PCI_MSI = 65536, MSI_FLAG_PCI_MSIX = 131072, MSI_FLAG_LEVEL_CAPABLE = 262144, MSI_FLAG_MSIX_CONTIGUOUS = 524288, MSI_FLAG_PCI_MSIX_ALLOC_DYN = 1048576, MSI_FLAG_NO_AFFINITY = 2097152, }; enum { MTTG_TRAV_INIT = 0, MTTG_TRAV_NFP_UNSPEC = 1, MTTG_TRAV_NFP_SPEC = 2, MTTG_TRAV_DONE = 3, }; enum { M_I17 = 0, M_I20 = 1, M_I20_SR = 2, M_I24 = 3, M_I24_8_1 = 4, M_I24_10_1 = 5, M_I27_11_1 = 6, M_MINI = 7, M_MINI_3_1 = 8, M_MINI_4_1 = 9, M_MB = 10, M_MB_2 = 11, M_MB_3 = 12, M_MB_5_1 = 13, M_MB_6_1 = 14, M_MB_7_1 = 15, M_MB_SR = 16, M_MBA = 17, M_MBA_3 = 18, M_MBP = 19, M_MBP_2 = 20, M_MBP_2_2 = 21, M_MBP_SR = 22, M_MBP_4 = 23, M_MBP_5_1 = 24, M_MBP_5_2 = 25, M_MBP_5_3 = 26, M_MBP_6_1 = 27, M_MBP_6_2 = 28, M_MBP_7_1 = 29, M_MBP_8_2 = 30, M_UNKNOWN = 31, }; enum { NAPIF_STATE_SCHED = 1, NAPIF_STATE_MISSED = 2, NAPIF_STATE_DISABLE = 4, NAPIF_STATE_NPSVC = 8, NAPIF_STATE_LISTED = 16, NAPIF_STATE_NO_BUSY_POLL = 32, NAPIF_STATE_IN_BUSY_POLL = 64, NAPIF_STATE_PREFER_BUSY_POLL = 128, NAPIF_STATE_THREADED = 256, NAPIF_STATE_SCHED_THREADED = 512, }; enum { NAPI_F_PREFER_BUSY_POLL = 1, NAPI_F_END_ON_RESCHED = 2, }; enum { NAPI_STATE_SCHED = 0, NAPI_STATE_MISSED = 1, NAPI_STATE_DISABLE = 2, NAPI_STATE_NPSVC = 3, NAPI_STATE_LISTED = 4, NAPI_STATE_NO_BUSY_POLL = 5, NAPI_STATE_IN_BUSY_POLL = 6, NAPI_STATE_PREFER_BUSY_POLL = 7, NAPI_STATE_THREADED = 8, NAPI_STATE_SCHED_THREADED = 9, }; enum { NDA_UNSPEC = 0, NDA_DST = 1, NDA_LLADDR = 2, NDA_CACHEINFO = 3, NDA_PROBES = 4, NDA_VLAN = 5, NDA_PORT = 6, NDA_VNI = 7, NDA_IFINDEX = 8, NDA_MASTER = 9, NDA_LINK_NETNSID = 10, NDA_SRC_VNI = 11, NDA_PROTOCOL = 12, NDA_NH_ID = 13, NDA_FDB_EXT_ATTRS = 14, NDA_FLAGS_EXT = 15, NDA_NDM_STATE_MASK = 16, NDA_NDM_FLAGS_MASK = 17, __NDA_MAX = 18, }; enum { NDD_UNARMED = 1, NDD_LOCKED = 2, NDD_SECURITY_OVERWRITE = 3, NDD_WORK_PENDING = 4, NDD_LABELING = 6, NDD_INCOHERENT = 7, NDD_REGISTER_SYNC = 8, ND_IOCTL_MAX_BUFLEN = 4194304, ND_CMD_MAX_ELEM = 5, ND_CMD_MAX_ENVELOPE = 256, ND_MAX_MAPPINGS = 32, ND_REGION_PAGEMAP = 0, ND_REGION_PERSIST_CACHE = 1, ND_REGION_PERSIST_MEMCTRL = 2, ND_REGION_ASYNC = 3, ND_REGION_CXL = 4, DPA_RESOURCE_ADJUSTED = 1, }; enum { NDTA_UNSPEC = 0, NDTA_NAME = 1, NDTA_THRESH1 = 2, NDTA_THRESH2 = 3, NDTA_THRESH3 = 4, NDTA_CONFIG = 5, NDTA_PARMS = 6, NDTA_STATS = 7, NDTA_GC_INTERVAL = 8, NDTA_PAD = 9, __NDTA_MAX = 10, }; enum { NDTPA_UNSPEC = 0, NDTPA_IFINDEX = 1, NDTPA_REFCNT = 2, NDTPA_REACHABLE_TIME = 3, NDTPA_BASE_REACHABLE_TIME = 4, NDTPA_RETRANS_TIME = 5, NDTPA_GC_STALETIME = 6, NDTPA_DELAY_PROBE_TIME = 7, NDTPA_QUEUE_LEN = 8, NDTPA_APP_PROBES = 9, NDTPA_UCAST_PROBES = 10, NDTPA_MCAST_PROBES = 11, NDTPA_ANYCAST_DELAY = 12, NDTPA_PROXY_DELAY = 13, NDTPA_PROXY_QLEN = 14, NDTPA_LOCKTIME = 15, NDTPA_QUEUE_LENBYTES = 16, NDTPA_MCAST_REPROBES = 17, NDTPA_PAD = 18, NDTPA_INTERVAL_PROBE_TIME_MS = 19, __NDTPA_MAX = 20, }; enum { NDUSEROPT_UNSPEC = 0, NDUSEROPT_SRCADDR = 1, __NDUSEROPT_MAX = 2, }; enum { NEIGH_ARP_TABLE = 0, NEIGH_ND_TABLE = 1, NEIGH_DN_TABLE = 2, NEIGH_NR_TABLES = 3, NEIGH_LINK_TABLE = 3, }; enum { NEIGH_VAR_MCAST_PROBES = 0, NEIGH_VAR_UCAST_PROBES = 1, NEIGH_VAR_APP_PROBES = 2, NEIGH_VAR_MCAST_REPROBES = 3, NEIGH_VAR_RETRANS_TIME = 4, NEIGH_VAR_BASE_REACHABLE_TIME = 5, NEIGH_VAR_DELAY_PROBE_TIME = 6, NEIGH_VAR_INTERVAL_PROBE_TIME_MS = 7, NEIGH_VAR_GC_STALETIME = 8, NEIGH_VAR_QUEUE_LEN_BYTES = 9, NEIGH_VAR_PROXY_QLEN = 10, NEIGH_VAR_ANYCAST_DELAY = 11, NEIGH_VAR_PROXY_DELAY = 12, NEIGH_VAR_LOCKTIME = 13, NEIGH_VAR_QUEUE_LEN = 14, NEIGH_VAR_RETRANS_TIME_MS = 15, NEIGH_VAR_BASE_REACHABLE_TIME_MS = 16, NEIGH_VAR_GC_INTERVAL = 17, NEIGH_VAR_GC_THRESH1 = 18, NEIGH_VAR_GC_THRESH2 = 19, NEIGH_VAR_GC_THRESH3 = 20, NEIGH_VAR_MAX = 21, }; enum { NESTED_SYNC_IMM_BIT = 0, NESTED_SYNC_TODO_BIT = 1, }; enum { NETCONFA_UNSPEC = 0, NETCONFA_IFINDEX = 1, NETCONFA_FORWARDING = 2, NETCONFA_RP_FILTER = 3, NETCONFA_MC_FORWARDING = 4, NETCONFA_PROXY_NEIGH = 5, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, NETCONFA_INPUT = 7, NETCONFA_BC_FORWARDING = 8, __NETCONFA_MAX = 9, }; enum { NETDEV_A_DEV_IFINDEX = 1, NETDEV_A_DEV_PAD = 2, NETDEV_A_DEV_XDP_FEATURES = 3, NETDEV_A_DEV_XDP_ZC_MAX_SEGS = 4, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES = 5, NETDEV_A_DEV_XSK_FEATURES = 6, __NETDEV_A_DEV_MAX = 7, NETDEV_A_DEV_MAX = 6, }; enum { NETDEV_A_DMABUF_IFINDEX = 1, NETDEV_A_DMABUF_QUEUES = 2, NETDEV_A_DMABUF_FD = 3, NETDEV_A_DMABUF_ID = 4, __NETDEV_A_DMABUF_MAX = 5, NETDEV_A_DMABUF_MAX = 4, }; enum { NETDEV_A_NAPI_IFINDEX = 1, NETDEV_A_NAPI_ID = 2, NETDEV_A_NAPI_IRQ = 3, NETDEV_A_NAPI_PID = 4, __NETDEV_A_NAPI_MAX = 5, NETDEV_A_NAPI_MAX = 4, }; enum { NETDEV_A_PAGE_POOL_ID = 1, NETDEV_A_PAGE_POOL_IFINDEX = 2, NETDEV_A_PAGE_POOL_NAPI_ID = 3, NETDEV_A_PAGE_POOL_INFLIGHT = 4, NETDEV_A_PAGE_POOL_INFLIGHT_MEM = 5, NETDEV_A_PAGE_POOL_DETACH_TIME = 6, NETDEV_A_PAGE_POOL_DMABUF = 7, __NETDEV_A_PAGE_POOL_MAX = 8, NETDEV_A_PAGE_POOL_MAX = 7, }; enum { NETDEV_A_PAGE_POOL_STATS_INFO = 1, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW = 9, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER = 10, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY = 11, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL = 12, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE = 13, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED = 14, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL = 15, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING = 16, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL = 17, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT = 18, __NETDEV_A_PAGE_POOL_STATS_MAX = 19, NETDEV_A_PAGE_POOL_STATS_MAX = 18, }; enum { NETDEV_A_QSTATS_IFINDEX = 1, NETDEV_A_QSTATS_QUEUE_TYPE = 2, NETDEV_A_QSTATS_QUEUE_ID = 3, NETDEV_A_QSTATS_SCOPE = 4, NETDEV_A_QSTATS_RX_PACKETS = 8, NETDEV_A_QSTATS_RX_BYTES = 9, NETDEV_A_QSTATS_TX_PACKETS = 10, NETDEV_A_QSTATS_TX_BYTES = 11, NETDEV_A_QSTATS_RX_ALLOC_FAIL = 12, NETDEV_A_QSTATS_RX_HW_DROPS = 13, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS = 14, NETDEV_A_QSTATS_RX_CSUM_COMPLETE = 15, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY = 16, NETDEV_A_QSTATS_RX_CSUM_NONE = 17, NETDEV_A_QSTATS_RX_CSUM_BAD = 18, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS = 19, NETDEV_A_QSTATS_RX_HW_GRO_BYTES = 20, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS = 21, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES = 22, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS = 23, NETDEV_A_QSTATS_TX_HW_DROPS = 24, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS = 25, NETDEV_A_QSTATS_TX_CSUM_NONE = 26, NETDEV_A_QSTATS_TX_NEEDS_CSUM = 27, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS = 28, NETDEV_A_QSTATS_TX_HW_GSO_BYTES = 29, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS = 30, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES = 31, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS = 32, NETDEV_A_QSTATS_TX_STOP = 33, NETDEV_A_QSTATS_TX_WAKE = 34, __NETDEV_A_QSTATS_MAX = 35, NETDEV_A_QSTATS_MAX = 34, }; enum { NETDEV_A_QUEUE_ID = 1, NETDEV_A_QUEUE_IFINDEX = 2, NETDEV_A_QUEUE_TYPE = 3, NETDEV_A_QUEUE_NAPI_ID = 4, NETDEV_A_QUEUE_DMABUF = 5, __NETDEV_A_QUEUE_MAX = 6, NETDEV_A_QUEUE_MAX = 5, }; enum { NETDEV_CMD_DEV_GET = 1, NETDEV_CMD_DEV_ADD_NTF = 2, NETDEV_CMD_DEV_DEL_NTF = 3, NETDEV_CMD_DEV_CHANGE_NTF = 4, NETDEV_CMD_PAGE_POOL_GET = 5, NETDEV_CMD_PAGE_POOL_ADD_NTF = 6, NETDEV_CMD_PAGE_POOL_DEL_NTF = 7, NETDEV_CMD_PAGE_POOL_CHANGE_NTF = 8, NETDEV_CMD_PAGE_POOL_STATS_GET = 9, NETDEV_CMD_QUEUE_GET = 10, NETDEV_CMD_NAPI_GET = 11, NETDEV_CMD_QSTATS_GET = 12, NETDEV_CMD_BIND_RX = 13, __NETDEV_CMD_MAX = 14, NETDEV_CMD_MAX = 13, }; enum { NETDEV_NLGRP_MGMT = 0, NETDEV_NLGRP_PAGE_POOL = 1, }; enum { NETIF_F_SG_BIT = 0, NETIF_F_IP_CSUM_BIT = 1, __UNUSED_NETIF_F_1 = 2, NETIF_F_HW_CSUM_BIT = 3, NETIF_F_IPV6_CSUM_BIT = 4, NETIF_F_HIGHDMA_BIT = 5, NETIF_F_FRAGLIST_BIT = 6, NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, NETIF_F_VLAN_CHALLENGED_BIT = 10, NETIF_F_GSO_BIT = 11, __UNUSED_NETIF_F_12 = 12, __UNUSED_NETIF_F_13 = 13, NETIF_F_GRO_BIT = 14, NETIF_F_LRO_BIT = 15, NETIF_F_GSO_SHIFT = 16, NETIF_F_TSO_BIT = 16, NETIF_F_GSO_ROBUST_BIT = 17, NETIF_F_TSO_ECN_BIT = 18, NETIF_F_TSO_MANGLEID_BIT = 19, NETIF_F_TSO6_BIT = 20, NETIF_F_FSO_BIT = 21, NETIF_F_GSO_GRE_BIT = 22, NETIF_F_GSO_GRE_CSUM_BIT = 23, NETIF_F_GSO_IPXIP4_BIT = 24, NETIF_F_GSO_IPXIP6_BIT = 25, NETIF_F_GSO_UDP_TUNNEL_BIT = 26, NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, NETIF_F_GSO_PARTIAL_BIT = 28, NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, NETIF_F_GSO_SCTP_BIT = 30, NETIF_F_GSO_ESP_BIT = 31, NETIF_F_GSO_UDP_BIT = 32, NETIF_F_GSO_UDP_L4_BIT = 33, NETIF_F_GSO_FRAGLIST_BIT = 34, NETIF_F_GSO_LAST = 34, NETIF_F_FCOE_CRC_BIT = 35, NETIF_F_SCTP_CRC_BIT = 36, __UNUSED_NETIF_F_37 = 37, NETIF_F_NTUPLE_BIT = 38, NETIF_F_RXHASH_BIT = 39, NETIF_F_RXCSUM_BIT = 40, NETIF_F_NOCACHE_COPY_BIT = 41, NETIF_F_LOOPBACK_BIT = 42, NETIF_F_RXFCS_BIT = 43, NETIF_F_RXALL_BIT = 44, NETIF_F_HW_VLAN_STAG_TX_BIT = 45, NETIF_F_HW_VLAN_STAG_RX_BIT = 46, NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47, NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48, NETIF_F_HW_TC_BIT = 49, NETIF_F_HW_ESP_BIT = 50, NETIF_F_HW_ESP_TX_CSUM_BIT = 51, NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52, NETIF_F_HW_TLS_TX_BIT = 53, NETIF_F_HW_TLS_RX_BIT = 54, NETIF_F_GRO_HW_BIT = 55, NETIF_F_HW_TLS_RECORD_BIT = 56, NETIF_F_GRO_FRAGLIST_BIT = 57, NETIF_F_HW_MACSEC_BIT = 58, NETIF_F_GRO_UDP_FWD_BIT = 59, NETIF_F_HW_HSR_TAG_INS_BIT = 60, NETIF_F_HW_HSR_TAG_RM_BIT = 61, NETIF_F_HW_HSR_FWD_BIT = 62, NETIF_F_HW_HSR_DUP_BIT = 63, NETDEV_FEATURE_COUNT = 64, }; enum { NETIF_MSG_DRV_BIT = 0, NETIF_MSG_PROBE_BIT = 1, NETIF_MSG_LINK_BIT = 2, NETIF_MSG_TIMER_BIT = 3, NETIF_MSG_IFDOWN_BIT = 4, NETIF_MSG_IFUP_BIT = 5, NETIF_MSG_RX_ERR_BIT = 6, NETIF_MSG_TX_ERR_BIT = 7, NETIF_MSG_TX_QUEUED_BIT = 8, NETIF_MSG_INTR_BIT = 9, NETIF_MSG_TX_DONE_BIT = 10, NETIF_MSG_RX_STATUS_BIT = 11, NETIF_MSG_PKTDATA_BIT = 12, NETIF_MSG_HW_BIT = 13, NETIF_MSG_WOL_BIT = 14, NETIF_MSG_CLASS_COUNT = 15, }; enum { NETLINK_F_KERNEL_SOCKET = 0, NETLINK_F_RECV_PKTINFO = 1, NETLINK_F_BROADCAST_SEND_ERROR = 2, NETLINK_F_RECV_NO_ENOBUFS = 3, NETLINK_F_LISTEN_ALL_NSID = 4, NETLINK_F_CAP_ACK = 5, NETLINK_F_EXT_ACK = 6, NETLINK_F_STRICT_CHK = 7, }; enum { NETLINK_UNCONNECTED = 0, NETLINK_CONNECTED = 1, }; enum { NETNSA_NONE = 0, NETNSA_NSID = 1, NETNSA_PID = 2, NETNSA_FD = 3, NETNSA_TARGET_NSID = 4, NETNSA_CURRENT_NSID = 5, __NETNSA_MAX = 6, }; enum { NET_NS_INDEX = 0, UTS_NS_INDEX = 1, IPC_NS_INDEX = 2, PID_NS_INDEX = 3, USER_NS_INDEX = 4, MNT_NS_INDEX = 5, CGROUP_NS_INDEX = 6, NR_NAMESPACES = 7, }; enum { NEXTHOP_GRP_TYPE_MPATH = 0, NEXTHOP_GRP_TYPE_RES = 1, __NEXTHOP_GRP_TYPE_MAX = 2, }; enum { NFNL_BATCH_FAILURE = 1, NFNL_BATCH_DONE = 2, NFNL_BATCH_REPLAY = 4, }; enum { NFPROTO_UNSPEC = 0, NFPROTO_INET = 1, NFPROTO_IPV4 = 2, NFPROTO_ARP = 3, NFPROTO_NETDEV = 5, NFPROTO_BRIDGE = 7, NFPROTO_IPV6 = 10, NFPROTO_NUMPROTO = 11, }; enum { NFT_INNER_EXPR_PAYLOAD = 0, NFT_INNER_EXPR_META = 1, }; enum { NFT_PAYLOAD_CTX_INNER_TUN = 1, NFT_PAYLOAD_CTX_INNER_LL = 2, NFT_PAYLOAD_CTX_INNER_NH = 4, NFT_PAYLOAD_CTX_INNER_TH = 8, }; enum { NFT_PKTINFO_L4PROTO = 1, NFT_PKTINFO_INNER = 2, NFT_PKTINFO_INNER_FULL = 4, }; enum { NFT_VALIDATE_SKIP = 0, NFT_VALIDATE_NEED = 1, NFT_VALIDATE_DO = 2, }; enum { NF_BPF_CT_OPTS_SZ = 16, }; enum { NF_BPF_FLOWTABLE_OPTS_SZ = 4, }; enum { NHA_GROUP_STATS_ENTRY_UNSPEC = 0, NHA_GROUP_STATS_ENTRY_ID = 1, NHA_GROUP_STATS_ENTRY_PACKETS = 2, NHA_GROUP_STATS_ENTRY_PACKETS_HW = 3, __NHA_GROUP_STATS_ENTRY_MAX = 4, }; enum { NHA_GROUP_STATS_UNSPEC = 0, NHA_GROUP_STATS_ENTRY = 1, __NHA_GROUP_STATS_MAX = 2, }; enum { NHA_RES_BUCKET_UNSPEC = 0, NHA_RES_BUCKET_PAD = 0, NHA_RES_BUCKET_INDEX = 1, NHA_RES_BUCKET_IDLE_TIME = 2, NHA_RES_BUCKET_NH_ID = 3, __NHA_RES_BUCKET_MAX = 4, }; enum { NHA_RES_GROUP_UNSPEC = 0, NHA_RES_GROUP_PAD = 0, NHA_RES_GROUP_BUCKETS = 1, NHA_RES_GROUP_IDLE_TIMER = 2, NHA_RES_GROUP_UNBALANCED_TIMER = 3, NHA_RES_GROUP_UNBALANCED_TIME = 4, __NHA_RES_GROUP_MAX = 5, }; enum { NHA_UNSPEC = 0, NHA_ID = 1, NHA_GROUP = 2, NHA_GROUP_TYPE = 3, NHA_BLACKHOLE = 4, NHA_OIF = 5, NHA_GATEWAY = 6, NHA_ENCAP_TYPE = 7, NHA_ENCAP = 8, NHA_GROUPS = 9, NHA_MASTER = 10, NHA_FDB = 11, NHA_RES_GROUP = 12, NHA_RES_BUCKET = 13, NHA_OP_FLAGS = 14, NHA_GROUP_STATS = 15, NHA_HW_STATS_ENABLE = 16, NHA_HW_STATS_USED = 17, __NHA_MAX = 18, }; enum { NLA_UNSPEC = 0, NLA_U8 = 1, NLA_U16 = 2, NLA_U32 = 3, NLA_U64 = 4, NLA_STRING = 5, NLA_FLAG = 6, NLA_MSECS = 7, NLA_NESTED = 8, NLA_NESTED_ARRAY = 9, NLA_NUL_STRING = 10, NLA_BINARY = 11, NLA_S8 = 12, NLA_S16 = 13, NLA_S32 = 14, NLA_S64 = 15, NLA_BITFIELD32 = 16, NLA_REJECT = 17, NLA_BE16 = 18, NLA_BE32 = 19, NLA_SINT = 20, NLA_UINT = 21, __NLA_TYPE_MAX = 22, }; enum { NLBL_CALIPSO_A_UNSPEC = 0, NLBL_CALIPSO_A_DOI = 1, NLBL_CALIPSO_A_MTYPE = 2, __NLBL_CALIPSO_A_MAX = 3, }; enum { NLBL_CALIPSO_C_UNSPEC = 0, NLBL_CALIPSO_C_ADD = 1, NLBL_CALIPSO_C_REMOVE = 2, NLBL_CALIPSO_C_LIST = 3, NLBL_CALIPSO_C_LISTALL = 4, __NLBL_CALIPSO_C_MAX = 5, }; enum { NLBL_CIPSOV4_A_UNSPEC = 0, NLBL_CIPSOV4_A_DOI = 1, NLBL_CIPSOV4_A_MTYPE = 2, NLBL_CIPSOV4_A_TAG = 3, NLBL_CIPSOV4_A_TAGLST = 4, NLBL_CIPSOV4_A_MLSLVLLOC = 5, NLBL_CIPSOV4_A_MLSLVLREM = 6, NLBL_CIPSOV4_A_MLSLVL = 7, NLBL_CIPSOV4_A_MLSLVLLST = 8, NLBL_CIPSOV4_A_MLSCATLOC = 9, NLBL_CIPSOV4_A_MLSCATREM = 10, NLBL_CIPSOV4_A_MLSCAT = 11, NLBL_CIPSOV4_A_MLSCATLST = 12, __NLBL_CIPSOV4_A_MAX = 13, }; enum { NLBL_CIPSOV4_C_UNSPEC = 0, NLBL_CIPSOV4_C_ADD = 1, NLBL_CIPSOV4_C_REMOVE = 2, NLBL_CIPSOV4_C_LIST = 3, NLBL_CIPSOV4_C_LISTALL = 4, __NLBL_CIPSOV4_C_MAX = 5, }; enum { NLBL_MGMT_A_UNSPEC = 0, NLBL_MGMT_A_DOMAIN = 1, NLBL_MGMT_A_PROTOCOL = 2, NLBL_MGMT_A_VERSION = 3, NLBL_MGMT_A_CV4DOI = 4, NLBL_MGMT_A_IPV6ADDR = 5, NLBL_MGMT_A_IPV6MASK = 6, NLBL_MGMT_A_IPV4ADDR = 7, NLBL_MGMT_A_IPV4MASK = 8, NLBL_MGMT_A_ADDRSELECTOR = 9, NLBL_MGMT_A_SELECTORLIST = 10, NLBL_MGMT_A_FAMILY = 11, NLBL_MGMT_A_CLPDOI = 12, __NLBL_MGMT_A_MAX = 13, }; enum { NLBL_MGMT_C_UNSPEC = 0, NLBL_MGMT_C_ADD = 1, NLBL_MGMT_C_REMOVE = 2, NLBL_MGMT_C_LISTALL = 3, NLBL_MGMT_C_ADDDEF = 4, NLBL_MGMT_C_REMOVEDEF = 5, NLBL_MGMT_C_LISTDEF = 6, NLBL_MGMT_C_PROTOCOLS = 7, NLBL_MGMT_C_VERSION = 8, __NLBL_MGMT_C_MAX = 9, }; enum { NLBL_UNLABEL_A_UNSPEC = 0, NLBL_UNLABEL_A_ACPTFLG = 1, NLBL_UNLABEL_A_IPV6ADDR = 2, NLBL_UNLABEL_A_IPV6MASK = 3, NLBL_UNLABEL_A_IPV4ADDR = 4, NLBL_UNLABEL_A_IPV4MASK = 5, NLBL_UNLABEL_A_IFACE = 6, NLBL_UNLABEL_A_SECCTX = 7, __NLBL_UNLABEL_A_MAX = 8, }; enum { NLBL_UNLABEL_C_UNSPEC = 0, NLBL_UNLABEL_C_ACCEPT = 1, NLBL_UNLABEL_C_LIST = 2, NLBL_UNLABEL_C_STATICADD = 3, NLBL_UNLABEL_C_STATICREMOVE = 4, NLBL_UNLABEL_C_STATICLIST = 5, NLBL_UNLABEL_C_STATICADDDEF = 6, NLBL_UNLABEL_C_STATICREMOVEDEF = 7, NLBL_UNLABEL_C_STATICLISTDEF = 8, __NLBL_UNLABEL_C_MAX = 9, }; enum { NMI_LOCAL = 0, NMI_UNKNOWN = 1, NMI_SERR = 2, NMI_IO_CHECK = 3, NMI_MAX = 4, }; enum { NONE_FORCE_HPET_RESUME = 0, OLD_ICH_FORCE_HPET_RESUME = 1, ICH_FORCE_HPET_RESUME = 2, VT8237_FORCE_HPET_RESUME = 3, NVIDIA_FORCE_HPET_RESUME = 4, ATI_FORCE_HPET_RESUME = 5, }; enum { NSIM_TRAP_ID_BASE = 93, NSIM_TRAP_ID_FID_MISS = 94, }; enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 66, }; enum { NVMEM_ADD = 1, NVMEM_REMOVE = 2, NVMEM_CELL_ADD = 3, NVMEM_CELL_REMOVE = 4, NVMEM_LAYOUT_ADD = 5, NVMEM_LAYOUT_REMOVE = 6, }; enum { NVME_AEN_BIT_NS_ATTR = 8, NVME_AEN_BIT_FW_ACT = 9, NVME_AEN_BIT_ANA_CHANGE = 11, NVME_AEN_BIT_DISC_CHANGE = 31, }; enum { NVME_CC_ENABLE = 1, NVME_CC_EN_SHIFT = 0, NVME_CC_CSS_SHIFT = 4, NVME_CC_MPS_SHIFT = 7, NVME_CC_AMS_SHIFT = 11, NVME_CC_SHN_SHIFT = 14, NVME_CC_IOSQES_SHIFT = 16, NVME_CC_IOCQES_SHIFT = 20, NVME_CC_CSS_NVM = 0, NVME_CC_CSS_CSI = 96, NVME_CC_CSS_MASK = 112, NVME_CC_AMS_RR = 0, NVME_CC_AMS_WRRU = 2048, NVME_CC_AMS_VS = 14336, NVME_CC_SHN_NONE = 0, NVME_CC_SHN_NORMAL = 16384, NVME_CC_SHN_ABRUPT = 32768, NVME_CC_SHN_MASK = 49152, NVME_CC_IOSQES = 393216, NVME_CC_IOCQES = 4194304, NVME_CC_CRIME = 16777216, }; enum { NVME_CSTS_RDY = 1, NVME_CSTS_CFS = 2, NVME_CSTS_NSSRO = 16, NVME_CSTS_PP = 32, NVME_CSTS_SHST_NORMAL = 0, NVME_CSTS_SHST_OCCUR = 4, NVME_CSTS_SHST_CMPLT = 8, NVME_CSTS_SHST_MASK = 12, }; enum { NVME_REG_CAP = 0, NVME_REG_VS = 8, NVME_REG_INTMS = 12, NVME_REG_INTMC = 16, NVME_REG_CC = 20, NVME_REG_CSTS = 28, NVME_REG_NSSR = 32, NVME_REG_AQA = 36, NVME_REG_ASQ = 40, NVME_REG_ACQ = 48, NVME_REG_CMBLOC = 56, NVME_REG_CMBSZ = 60, NVME_REG_BPINFO = 64, NVME_REG_BPRSEL = 68, NVME_REG_BPMBL = 72, NVME_REG_CMBMSC = 80, NVME_REG_CRTO = 104, NVME_REG_PMRCAP = 3584, NVME_REG_PMRCTL = 3588, NVME_REG_PMRSTS = 3592, NVME_REG_PMREBS = 3596, NVME_REG_PMRSWTP = 3600, NVME_REG_DBS = 4096, }; enum { OD_NORMAL_SAMPLE = 0, OD_SUB_SAMPLE = 1, }; enum { OPT_UID = 0, OPT_GID = 1, OPT_MODE = 2, OPT_DELEGATE_CMDS = 3, OPT_DELEGATE_MAPS = 4, OPT_DELEGATE_PROGS = 5, OPT_DELEGATE_ATTACHS = 6, }; enum { OVERRIDE_NONE = 0, OVERRIDE_BASE = 1, OVERRIDE_STRIDE = 2, OVERRIDE_HEIGHT = 4, OVERRIDE_WIDTH = 8, }; enum { Opt_bsd_df = 0, Opt_minix_df = 1, Opt_grpid = 2, Opt_nogrpid = 3, Opt_resgid = 4, Opt_resuid = 5, Opt_sb = 6, Opt_nouid32 = 7, Opt_debug = 8, Opt_removed = 9, Opt_user_xattr = 10, Opt_acl = 11, Opt_auto_da_alloc = 12, Opt_noauto_da_alloc = 13, Opt_noload = 14, Opt_commit = 15, Opt_min_batch_time = 16, Opt_max_batch_time = 17, Opt_journal_dev = 18, Opt_journal_path = 19, Opt_journal_checksum = 20, Opt_journal_async_commit = 21, Opt_abort = 22, Opt_data_journal = 23, Opt_data_ordered = 24, Opt_data_writeback = 25, Opt_data_err_abort = 26, Opt_data_err_ignore = 27, Opt_test_dummy_encryption = 28, Opt_inlinecrypt = 29, Opt_usrjquota = 30, Opt_grpjquota = 31, Opt_quota = 32, Opt_noquota = 33, Opt_barrier = 34, Opt_nobarrier = 35, Opt_err = 36, Opt_usrquota = 37, Opt_grpquota = 38, Opt_prjquota = 39, Opt_dax = 40, Opt_dax_always = 41, Opt_dax_inode = 42, Opt_dax_never = 43, Opt_stripe = 44, Opt_delalloc = 45, Opt_nodelalloc = 46, Opt_warn_on_error = 47, Opt_nowarn_on_error = 48, Opt_mblk_io_submit = 49, Opt_debug_want_extra_isize = 50, Opt_nomblk_io_submit = 51, Opt_block_validity = 52, Opt_noblock_validity = 53, Opt_inode_readahead_blks = 54, Opt_journal_ioprio = 55, Opt_dioread_nolock = 56, Opt_dioread_lock = 57, Opt_discard = 58, Opt_nodiscard = 59, Opt_init_itable = 60, Opt_noinit_itable = 61, Opt_max_dir_size_kb = 62, Opt_nojournal_checksum = 63, Opt_nombcache = 64, Opt_no_prefetch_block_bitmaps = 65, Opt_mb_optimize_scan = 66, Opt_errors = 67, Opt_data = 68, Opt_data_err = 69, Opt_jqfmt = 70, Opt_dax_type = 71, }; enum { Opt_err___2 = 0, Opt_enc = 1, Opt_hash = 2, }; enum { Opt_error = -1, Opt_context = 0, Opt_defcontext = 1, Opt_fscontext = 2, Opt_rootcontext = 3, Opt_seclabel = 4, }; enum { Opt_uid = 0, Opt_gid = 1, Opt_mode = 2, Opt_source = 3, }; enum { Opt_uid___2 = 0, Opt_gid___2 = 1, Opt_mode___2 = 2, }; enum { Opt_uid___3 = 0, Opt_gid___3 = 1, Opt_mode___3 = 2, Opt_ptmxmode = 3, Opt_newinstance = 4, Opt_max = 5, Opt_err___3 = 6, }; enum { PAGE_WAS_MAPPED = 1, PAGE_WAS_MLOCKED = 2, PAGE_OLD_STATES = 3, }; enum { PCI_REASSIGN_ALL_RSRC = 1, PCI_REASSIGN_ALL_BUS = 2, PCI_PROBE_ONLY = 4, PCI_CAN_SKIP_ISA_ALIGN = 8, PCI_ENABLE_PROC_DOMAINS = 16, PCI_COMPAT_DOMAIN_0 = 32, PCI_SCAN_ALL_PCIE_DEVS = 64, }; enum { PCI_STD_RESOURCES = 0, PCI_STD_RESOURCE_END = 5, PCI_ROM_RESOURCE = 6, PCI_IOV_RESOURCES = 7, PCI_IOV_RESOURCE_END = 12, PCI_BRIDGE_RESOURCES = 13, PCI_BRIDGE_RESOURCE_END = 16, PCI_NUM_RESOURCES = 17, DEVICE_COUNT_RESOURCE = 17, }; enum { PERCPU_REF_INIT_ATOMIC = 1, PERCPU_REF_INIT_DEAD = 2, PERCPU_REF_ALLOW_REINIT = 4, }; enum { PERF_BR_SPEC_NA = 0, PERF_BR_SPEC_WRONG_PATH = 1, PERF_BR_NON_SPEC_CORRECT_PATH = 2, PERF_BR_SPEC_CORRECT_PATH = 3, PERF_BR_SPEC_MAX = 4, }; enum { PERF_BR_UNKNOWN = 0, PERF_BR_COND = 1, PERF_BR_UNCOND = 2, PERF_BR_IND = 3, PERF_BR_CALL = 4, PERF_BR_IND_CALL = 5, PERF_BR_RET = 6, PERF_BR_SYSCALL = 7, PERF_BR_SYSRET = 8, PERF_BR_COND_CALL = 9, PERF_BR_COND_RET = 10, PERF_BR_ERET = 11, PERF_BR_IRQ = 12, PERF_BR_SERROR = 13, PERF_BR_NO_TX = 14, PERF_BR_EXTEND_ABI = 15, PERF_BR_MAX = 16, }; enum { PERF_TXN_ELISION = 1ULL, PERF_TXN_TRANSACTION = 2ULL, PERF_TXN_SYNC = 4ULL, PERF_TXN_ASYNC = 8ULL, PERF_TXN_RETRY = 16ULL, PERF_TXN_CONFLICT = 32ULL, PERF_TXN_CAPACITY_WRITE = 64ULL, PERF_TXN_CAPACITY_READ = 128ULL, PERF_TXN_MAX = 256ULL, PERF_TXN_ABORT_MASK = 18446744069414584320ULL, PERF_TXN_ABORT_SHIFT = 32ULL, }; enum { PERF_X86_EVENT_PEBS_LDLAT = 1, PERF_X86_EVENT_PEBS_ST = 2, PERF_X86_EVENT_PEBS_ST_HSW = 4, PERF_X86_EVENT_PEBS_LD_HSW = 8, PERF_X86_EVENT_PEBS_NA_HSW = 16, PERF_X86_EVENT_EXCL = 32, PERF_X86_EVENT_DYNAMIC = 64, PERF_X86_EVENT_EXCL_ACCT = 256, PERF_X86_EVENT_AUTO_RELOAD = 512, PERF_X86_EVENT_LARGE_PEBS = 1024, PERF_X86_EVENT_PEBS_VIA_PT = 2048, PERF_X86_EVENT_PAIR = 4096, PERF_X86_EVENT_LBR_SELECT = 8192, PERF_X86_EVENT_TOPDOWN = 16384, PERF_X86_EVENT_PEBS_STLAT = 32768, PERF_X86_EVENT_AMD_BRS = 65536, PERF_X86_EVENT_PEBS_LAT_HYBRID = 131072, PERF_X86_EVENT_NEEDS_BRANCH_STACK = 262144, PERF_X86_EVENT_BRANCH_COUNTERS = 524288, }; enum { PER_LINUX = 0, PER_LINUX_32BIT = 8388608, PER_LINUX_FDPIC = 524288, PER_SVR4 = 68157441, PER_SVR3 = 83886082, PER_SCOSVR3 = 117440515, PER_OSR5 = 100663299, PER_WYSEV386 = 83886084, PER_ISCR4 = 67108869, PER_BSD = 6, PER_SUNOS = 67108870, PER_XENIX = 83886087, PER_LINUX32 = 8, PER_LINUX32_3GB = 134217736, PER_IRIX32 = 67108873, PER_IRIXN32 = 67108874, PER_IRIX64 = 67108875, PER_RISCOS = 12, PER_SOLARIS = 67108877, PER_UW7 = 68157454, PER_OSF4 = 15, PER_HPUX = 16, PER_MASK = 255, }; enum { PIM_TYPE_HELLO = 0, PIM_TYPE_REGISTER = 1, PIM_TYPE_REGISTER_STOP = 2, PIM_TYPE_JOIN_PRUNE = 3, PIM_TYPE_BOOTSTRAP = 4, PIM_TYPE_ASSERT = 5, PIM_TYPE_GRAFT = 6, PIM_TYPE_GRAFT_ACK = 7, PIM_TYPE_CANDIDATE_RP_ADV = 8, }; enum { PLAT8250_DEV_LEGACY = -1, PLAT8250_DEV_PLATFORM = 0, PLAT8250_DEV_PLATFORM1 = 1, PLAT8250_DEV_PLATFORM2 = 2, PLAT8250_DEV_FOURPORT = 3, PLAT8250_DEV_ACCENT = 4, PLAT8250_DEV_BOCA = 5, PLAT8250_DEV_EXAR_ST16C554 = 6, PLAT8250_DEV_HUB6 = 7, PLAT8250_DEV_AU1X00 = 8, PLAT8250_DEV_SM501 = 9, }; enum { POLICYDB_CAP_NETPEER = 0, POLICYDB_CAP_OPENPERM = 1, POLICYDB_CAP_EXTSOCKCLASS = 2, POLICYDB_CAP_ALWAYSNETWORK = 3, POLICYDB_CAP_CGROUPSECLABEL = 4, POLICYDB_CAP_NNP_NOSUID_TRANSITION = 5, POLICYDB_CAP_GENFS_SECLABEL_SYMLINKS = 6, POLICYDB_CAP_IOCTL_SKIP_CLOEXEC = 7, POLICYDB_CAP_USERSPACE_INITIAL_CONTEXT = 8, __POLICYDB_CAP_MAX = 9, }; enum { POOL_BITS = 256, POOL_READY_BITS = 256, POOL_EARLY_BITS = 128, }; enum { POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1, POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2, POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3, POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4, POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5, }; enum { POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, POWER_SUPPLY_CHARGE_TYPE_NONE = 1, POWER_SUPPLY_CHARGE_TYPE_TRICKLE = 2, POWER_SUPPLY_CHARGE_TYPE_FAST = 3, POWER_SUPPLY_CHARGE_TYPE_STANDARD = 4, POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE = 5, POWER_SUPPLY_CHARGE_TYPE_CUSTOM = 6, POWER_SUPPLY_CHARGE_TYPE_LONGLIFE = 7, POWER_SUPPLY_CHARGE_TYPE_BYPASS = 8, }; enum { POWER_SUPPLY_HEALTH_UNKNOWN = 0, POWER_SUPPLY_HEALTH_GOOD = 1, POWER_SUPPLY_HEALTH_OVERHEAT = 2, POWER_SUPPLY_HEALTH_DEAD = 3, POWER_SUPPLY_HEALTH_OVERVOLTAGE = 4, POWER_SUPPLY_HEALTH_UNSPEC_FAILURE = 5, POWER_SUPPLY_HEALTH_COLD = 6, POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE = 7, POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE = 8, POWER_SUPPLY_HEALTH_OVERCURRENT = 9, POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED = 10, POWER_SUPPLY_HEALTH_WARM = 11, POWER_SUPPLY_HEALTH_COOL = 12, POWER_SUPPLY_HEALTH_HOT = 13, POWER_SUPPLY_HEALTH_NO_BATTERY = 14, }; enum { POWER_SUPPLY_SCOPE_UNKNOWN = 0, POWER_SUPPLY_SCOPE_SYSTEM = 1, POWER_SUPPLY_SCOPE_DEVICE = 2, }; enum { POWER_SUPPLY_STATUS_UNKNOWN = 0, POWER_SUPPLY_STATUS_CHARGING = 1, POWER_SUPPLY_STATUS_DISCHARGING = 2, POWER_SUPPLY_STATUS_NOT_CHARGING = 3, POWER_SUPPLY_STATUS_FULL = 4, }; enum { POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, POWER_SUPPLY_TECHNOLOGY_NiMH = 1, POWER_SUPPLY_TECHNOLOGY_LION = 2, POWER_SUPPLY_TECHNOLOGY_LIPO = 3, POWER_SUPPLY_TECHNOLOGY_LiFe = 4, POWER_SUPPLY_TECHNOLOGY_NiCd = 5, POWER_SUPPLY_TECHNOLOGY_LiMn = 6, }; enum { PREFIX_UNSPEC = 0, PREFIX_ADDRESS = 1, PREFIX_CACHEINFO = 2, __PREFIX_MAX = 3, }; enum { PROC_ENTRY_PERMANENT = 1, }; enum { PROC_ROOT_INO = 1, PROC_IPC_INIT_INO = 4026531839, PROC_UTS_INIT_INO = 4026531838, PROC_USER_INIT_INO = 4026531837, PROC_PID_INIT_INO = 4026531836, PROC_CGROUP_INIT_INO = 4026531835, PROC_TIME_INIT_INO = 4026531834, }; enum { PSS = 0, PPC = 1, }; enum { QUEUE_FLAG_DYING = 0, QUEUE_FLAG_NOMERGES = 1, QUEUE_FLAG_SAME_COMP = 2, QUEUE_FLAG_FAIL_IO = 3, QUEUE_FLAG_NOXMERGES = 4, QUEUE_FLAG_SAME_FORCE = 5, QUEUE_FLAG_INIT_DONE = 6, QUEUE_FLAG_STATS = 7, QUEUE_FLAG_REGISTERED = 8, QUEUE_FLAG_QUIESCED = 9, QUEUE_FLAG_RQ_ALLOC_TIME = 10, QUEUE_FLAG_HCTX_ACTIVE = 11, QUEUE_FLAG_SQ_SCHED = 12, QUEUE_FLAG_MAX = 13, }; enum { Q_REQUEUE_PI_NONE = 0, Q_REQUEUE_PI_IGNORE = 1, Q_REQUEUE_PI_IN_PROGRESS = 2, Q_REQUEUE_PI_WAIT = 3, Q_REQUEUE_PI_DONE = 4, Q_REQUEUE_PI_LOCKED = 5, }; enum { RADIX_TREE_ITER_TAG_MASK = 15, RADIX_TREE_ITER_TAGGED = 16, RADIX_TREE_ITER_CONTIG = 32, }; enum { RB_ADD_STAMP_NONE = 0, RB_ADD_STAMP_EXTEND = 2, RB_ADD_STAMP_ABSOLUTE = 4, RB_ADD_STAMP_FORCE = 8, }; enum { RB_CTX_TRANSITION = 0, RB_CTX_NMI = 1, RB_CTX_IRQ = 2, RB_CTX_SOFTIRQ = 3, RB_CTX_NORMAL = 4, RB_CTX_MAX = 5, }; enum { RB_LEN_TIME_EXTEND = 8, RB_LEN_TIME_STAMP = 8, }; enum { REASON_BOUNDS = -1, REASON_TYPE = -2, REASON_PATHS = -3, REASON_LIMIT = -4, REASON_STACK = -5, }; enum { REGION_INTERSECTS = 0, REGION_DISJOINT = 1, REGION_MIXED = 2, }; enum { REQ_FSEQ_PREFLUSH = 1, REQ_FSEQ_DATA = 2, REQ_FSEQ_POSTFLUSH = 4, REQ_FSEQ_DONE = 8, REQ_FSEQ_ACTIONS = 7, FLUSH_PENDING_TIMEOUT = 5000, }; enum { REQ_F_FIXED_FILE = 1ULL, REQ_F_IO_DRAIN = 2ULL, REQ_F_LINK = 4ULL, REQ_F_HARDLINK = 8ULL, REQ_F_FORCE_ASYNC = 16ULL, REQ_F_BUFFER_SELECT = 32ULL, REQ_F_CQE_SKIP = 64ULL, REQ_F_FAIL = 256ULL, REQ_F_INFLIGHT = 512ULL, REQ_F_CUR_POS = 1024ULL, REQ_F_NOWAIT = 2048ULL, REQ_F_LINK_TIMEOUT = 4096ULL, REQ_F_NEED_CLEANUP = 8192ULL, REQ_F_POLLED = 16384ULL, REQ_F_BUFFER_SELECTED = 32768ULL, REQ_F_BUFFER_RING = 65536ULL, REQ_F_REISSUE = 131072ULL, REQ_F_SUPPORT_NOWAIT = 268435456ULL, REQ_F_ISREG = 536870912ULL, REQ_F_CREDS = 262144ULL, REQ_F_REFCOUNT = 524288ULL, REQ_F_ARM_LTIMEOUT = 1048576ULL, REQ_F_ASYNC_DATA = 2097152ULL, REQ_F_SKIP_LINK_CQES = 4194304ULL, REQ_F_SINGLE_POLL = 8388608ULL, REQ_F_DOUBLE_POLL = 16777216ULL, REQ_F_APOLL_MULTISHOT = 33554432ULL, REQ_F_CLEAR_POLLIN = 67108864ULL, REQ_F_HASH_LOCKED = 134217728ULL, REQ_F_POLL_NO_LAZY = 1073741824ULL, REQ_F_CAN_POLL = 2147483648ULL, REQ_F_BL_EMPTY = 4294967296ULL, REQ_F_BL_NO_RECYCLE = 8589934592ULL, REQ_F_BUFFERS_COMMIT = 17179869184ULL, }; enum { REQ_F_FIXED_FILE_BIT = 0, REQ_F_IO_DRAIN_BIT = 1, REQ_F_LINK_BIT = 2, REQ_F_HARDLINK_BIT = 3, REQ_F_FORCE_ASYNC_BIT = 4, REQ_F_BUFFER_SELECT_BIT = 5, REQ_F_CQE_SKIP_BIT = 6, REQ_F_FAIL_BIT = 8, REQ_F_INFLIGHT_BIT = 9, REQ_F_CUR_POS_BIT = 10, REQ_F_NOWAIT_BIT = 11, REQ_F_LINK_TIMEOUT_BIT = 12, REQ_F_NEED_CLEANUP_BIT = 13, REQ_F_POLLED_BIT = 14, REQ_F_BUFFER_SELECTED_BIT = 15, REQ_F_BUFFER_RING_BIT = 16, REQ_F_REISSUE_BIT = 17, REQ_F_CREDS_BIT = 18, REQ_F_REFCOUNT_BIT = 19, REQ_F_ARM_LTIMEOUT_BIT = 20, REQ_F_ASYNC_DATA_BIT = 21, REQ_F_SKIP_LINK_CQES_BIT = 22, REQ_F_SINGLE_POLL_BIT = 23, REQ_F_DOUBLE_POLL_BIT = 24, REQ_F_APOLL_MULTISHOT_BIT = 25, REQ_F_CLEAR_POLLIN_BIT = 26, REQ_F_HASH_LOCKED_BIT = 27, REQ_F_SUPPORT_NOWAIT_BIT = 28, REQ_F_ISREG_BIT = 29, REQ_F_POLL_NO_LAZY_BIT = 30, REQ_F_CAN_POLL_BIT = 31, REQ_F_BL_EMPTY_BIT = 32, REQ_F_BL_NO_RECYCLE_BIT = 33, REQ_F_BUFFERS_COMMIT_BIT = 34, __REQ_F_LAST_BIT = 35, }; enum { RES_USAGE = 0, RES_RSVD_USAGE = 1, RES_LIMIT = 2, RES_RSVD_LIMIT = 3, RES_MAX_USAGE = 4, RES_RSVD_MAX_USAGE = 5, RES_FAILCNT = 6, RES_RSVD_FAILCNT = 7, }; enum { RTAX_UNSPEC = 0, RTAX_LOCK = 1, RTAX_MTU = 2, RTAX_WINDOW = 3, RTAX_RTT = 4, RTAX_RTTVAR = 5, RTAX_SSTHRESH = 6, RTAX_CWND = 7, RTAX_ADVMSS = 8, RTAX_REORDERING = 9, RTAX_HOPLIMIT = 10, RTAX_INITCWND = 11, RTAX_FEATURES = 12, RTAX_RTO_MIN = 13, RTAX_INITRWND = 14, RTAX_QUICKACK = 15, RTAX_CC_ALGO = 16, RTAX_FASTOPEN_NO_COOKIE = 17, __RTAX_MAX = 18, }; enum { RTM_BASE = 16, RTM_NEWLINK = 16, RTM_DELLINK = 17, RTM_GETLINK = 18, RTM_SETLINK = 19, RTM_NEWADDR = 20, RTM_DELADDR = 21, RTM_GETADDR = 22, RTM_NEWROUTE = 24, RTM_DELROUTE = 25, RTM_GETROUTE = 26, RTM_NEWNEIGH = 28, RTM_DELNEIGH = 29, RTM_GETNEIGH = 30, RTM_NEWRULE = 32, RTM_DELRULE = 33, RTM_GETRULE = 34, RTM_NEWQDISC = 36, RTM_DELQDISC = 37, RTM_GETQDISC = 38, RTM_NEWTCLASS = 40, RTM_DELTCLASS = 41, RTM_GETTCLASS = 42, RTM_NEWTFILTER = 44, RTM_DELTFILTER = 45, RTM_GETTFILTER = 46, RTM_NEWACTION = 48, RTM_DELACTION = 49, RTM_GETACTION = 50, RTM_NEWPREFIX = 52, RTM_GETMULTICAST = 58, RTM_GETANYCAST = 62, RTM_NEWNEIGHTBL = 64, RTM_GETNEIGHTBL = 66, RTM_SETNEIGHTBL = 67, RTM_NEWNDUSEROPT = 68, RTM_NEWADDRLABEL = 72, RTM_DELADDRLABEL = 73, RTM_GETADDRLABEL = 74, RTM_GETDCB = 78, RTM_SETDCB = 79, RTM_NEWNETCONF = 80, RTM_DELNETCONF = 81, RTM_GETNETCONF = 82, RTM_NEWMDB = 84, RTM_DELMDB = 85, RTM_GETMDB = 86, RTM_NEWNSID = 88, RTM_DELNSID = 89, RTM_GETNSID = 90, RTM_NEWSTATS = 92, RTM_GETSTATS = 94, RTM_SETSTATS = 95, RTM_NEWCACHEREPORT = 96, RTM_NEWCHAIN = 100, RTM_DELCHAIN = 101, RTM_GETCHAIN = 102, RTM_NEWNEXTHOP = 104, RTM_DELNEXTHOP = 105, RTM_GETNEXTHOP = 106, RTM_NEWLINKPROP = 108, RTM_DELLINKPROP = 109, RTM_GETLINKPROP = 110, RTM_NEWVLAN = 112, RTM_DELVLAN = 113, RTM_GETVLAN = 114, RTM_NEWNEXTHOPBUCKET = 116, RTM_DELNEXTHOPBUCKET = 117, RTM_GETNEXTHOPBUCKET = 118, RTM_NEWTUNNEL = 120, RTM_DELTUNNEL = 121, RTM_GETTUNNEL = 122, __RTM_MAX = 123, }; enum { RTN_UNSPEC = 0, RTN_UNICAST = 1, RTN_LOCAL = 2, RTN_BROADCAST = 3, RTN_ANYCAST = 4, RTN_MULTICAST = 5, RTN_BLACKHOLE = 6, RTN_UNREACHABLE = 7, RTN_PROHIBIT = 8, RTN_THROW = 9, RTN_NAT = 10, RTN_XRESOLVE = 11, __RTN_MAX = 12, }; enum { Root_NFS = 255, Root_CIFS = 254, Root_Generic = 253, Root_RAM0 = 1048576, }; enum { SAMPLES = 8, MIN_CHANGE = 5, }; enum { SB_UNFROZEN = 0, SB_FREEZE_WRITE = 1, SB_FREEZE_PAGEFAULT = 2, SB_FREEZE_FS = 3, SB_FREEZE_COMPLETE = 4, }; enum { SCM_TSTAMP_SND = 0, SCM_TSTAMP_SCHED = 1, SCM_TSTAMP_ACK = 2, }; enum { SCTP_AUTH_HMAC_ID_RESERVED_0 = 0, SCTP_AUTH_HMAC_ID_SHA1 = 1, SCTP_AUTH_HMAC_ID_RESERVED_2 = 2, SCTP_AUTH_HMAC_ID_SHA256 = 3, __SCTP_AUTH_HMAC_MAX = 4, }; enum { SCTP_CHUNK_FLAG_T = 1, }; enum { SCTP_MAX_DUP_TSNS = 16, }; enum { SCTP_MAX_STREAM = 65535, }; enum { SD_BALANCE_NEWIDLE = 1, SD_BALANCE_EXEC = 2, SD_BALANCE_FORK = 4, SD_BALANCE_WAKE = 8, SD_WAKE_AFFINE = 16, SD_ASYM_CPUCAPACITY = 32, SD_ASYM_CPUCAPACITY_FULL = 64, SD_SHARE_CPUCAPACITY = 128, SD_CLUSTER = 256, SD_SHARE_LLC = 512, SD_SERIALIZE = 1024, SD_ASYM_PACKING = 2048, SD_PREFER_SIBLING = 4096, SD_OVERLAP = 8192, SD_NUMA = 16384, }; enum { SECTION_MARKED_PRESENT_BIT = 0, SECTION_HAS_MEM_MAP_BIT = 1, SECTION_IS_ONLINE_BIT = 2, SECTION_IS_EARLY_BIT = 3, SECTION_MAP_LAST_BIT = 4, }; enum { SEG6_ATTR_UNSPEC = 0, SEG6_ATTR_DST = 1, SEG6_ATTR_DSTLEN = 2, SEG6_ATTR_HMACKEYID = 3, SEG6_ATTR_SECRET = 4, SEG6_ATTR_SECRETLEN = 5, SEG6_ATTR_ALGID = 6, SEG6_ATTR_HMACINFO = 7, __SEG6_ATTR_MAX = 8, }; enum { SEG6_CMD_UNSPEC = 0, SEG6_CMD_SETHMAC = 1, SEG6_CMD_DUMPHMAC = 2, SEG6_CMD_SET_TUNSRC = 3, SEG6_CMD_GET_TUNSRC = 4, __SEG6_CMD_MAX = 5, }; enum { SEG6_IPTUNNEL_UNSPEC = 0, SEG6_IPTUNNEL_SRH = 1, __SEG6_IPTUNNEL_MAX = 2, }; enum { SEG6_IPTUN_MODE_INLINE = 0, SEG6_IPTUN_MODE_ENCAP = 1, SEG6_IPTUN_MODE_L2ENCAP = 2, SEG6_IPTUN_MODE_ENCAP_RED = 3, SEG6_IPTUN_MODE_L2ENCAP_RED = 4, }; enum { SEG6_LOCAL_ACTION_UNSPEC = 0, SEG6_LOCAL_ACTION_END = 1, SEG6_LOCAL_ACTION_END_X = 2, SEG6_LOCAL_ACTION_END_T = 3, SEG6_LOCAL_ACTION_END_DX2 = 4, SEG6_LOCAL_ACTION_END_DX6 = 5, SEG6_LOCAL_ACTION_END_DX4 = 6, SEG6_LOCAL_ACTION_END_DT6 = 7, SEG6_LOCAL_ACTION_END_DT4 = 8, SEG6_LOCAL_ACTION_END_B6 = 9, SEG6_LOCAL_ACTION_END_B6_ENCAP = 10, SEG6_LOCAL_ACTION_END_BM = 11, SEG6_LOCAL_ACTION_END_S = 12, SEG6_LOCAL_ACTION_END_AS = 13, SEG6_LOCAL_ACTION_END_AM = 14, SEG6_LOCAL_ACTION_END_BPF = 15, SEG6_LOCAL_ACTION_END_DT46 = 16, __SEG6_LOCAL_ACTION_MAX = 17, }; enum { SEG6_LOCAL_BPF_PROG_UNSPEC = 0, SEG6_LOCAL_BPF_PROG = 1, SEG6_LOCAL_BPF_PROG_NAME = 2, __SEG6_LOCAL_BPF_PROG_MAX = 3, }; enum { SEG6_LOCAL_CNT_UNSPEC = 0, SEG6_LOCAL_CNT_PAD = 1, SEG6_LOCAL_CNT_PACKETS = 2, SEG6_LOCAL_CNT_BYTES = 3, SEG6_LOCAL_CNT_ERRORS = 4, __SEG6_LOCAL_CNT_MAX = 5, }; enum { SEG6_LOCAL_FLV_OP_UNSPEC = 0, SEG6_LOCAL_FLV_OP_PSP = 1, SEG6_LOCAL_FLV_OP_USP = 2, SEG6_LOCAL_FLV_OP_USD = 3, SEG6_LOCAL_FLV_OP_NEXT_CSID = 4, __SEG6_LOCAL_FLV_OP_MAX = 5, }; enum { SEG6_LOCAL_FLV_UNSPEC = 0, SEG6_LOCAL_FLV_OPERATION = 1, SEG6_LOCAL_FLV_LCBLOCK_BITS = 2, SEG6_LOCAL_FLV_LCNODE_FN_BITS = 3, __SEG6_LOCAL_FLV_MAX = 4, }; enum { SEG6_LOCAL_UNSPEC = 0, SEG6_LOCAL_ACTION = 1, SEG6_LOCAL_SRH = 2, SEG6_LOCAL_TABLE = 3, SEG6_LOCAL_NH4 = 4, SEG6_LOCAL_NH6 = 5, SEG6_LOCAL_IIF = 6, SEG6_LOCAL_OIF = 7, SEG6_LOCAL_BPF = 8, SEG6_LOCAL_VRFTABLE = 9, SEG6_LOCAL_COUNTERS = 10, SEG6_LOCAL_FLAVORS = 11, __SEG6_LOCAL_MAX = 12, }; enum { SELNL_MSG_SETENFORCE = 16, SELNL_MSG_POLICYLOAD = 17, SELNL_MSG_MAX = 18, }; enum { SFF8024_ID_UNK = 0, SFF8024_ID_SFF_8472 = 2, SFF8024_ID_SFP = 3, SFF8024_ID_DWDM_SFP = 11, SFF8024_ID_QSFP_8438 = 12, SFF8024_ID_QSFP_8436_8636 = 13, SFF8024_ID_QSFP28_8636 = 17, SFF8024_ID_QSFP_DD = 24, SFF8024_ID_OSFP = 25, SFF8024_ID_DSFP = 27, SFF8024_ID_QSFP_PLUS_CMIS = 30, SFF8024_ID_SFP_DD_CMIS = 31, SFF8024_ID_SFP_PLUS_CMIS = 32, SFF8024_ENCODING_UNSPEC = 0, SFF8024_ENCODING_8B10B = 1, SFF8024_ENCODING_4B5B = 2, SFF8024_ENCODING_NRZ = 3, SFF8024_ENCODING_8472_MANCHESTER = 4, SFF8024_ENCODING_8472_SONET = 5, SFF8024_ENCODING_8472_64B66B = 6, SFF8024_ENCODING_8436_MANCHESTER = 6, SFF8024_ENCODING_8436_SONET = 4, SFF8024_ENCODING_8436_64B66B = 5, SFF8024_ENCODING_256B257B = 7, SFF8024_ENCODING_PAM4 = 8, SFF8024_CONNECTOR_UNSPEC = 0, SFF8024_CONNECTOR_SC = 1, SFF8024_CONNECTOR_FIBERJACK = 6, SFF8024_CONNECTOR_LC = 7, SFF8024_CONNECTOR_MT_RJ = 8, SFF8024_CONNECTOR_MU = 9, SFF8024_CONNECTOR_SG = 10, SFF8024_CONNECTOR_OPTICAL_PIGTAIL = 11, SFF8024_CONNECTOR_MPO_1X12 = 12, SFF8024_CONNECTOR_MPO_2X16 = 13, SFF8024_CONNECTOR_HSSDC_II = 32, SFF8024_CONNECTOR_COPPER_PIGTAIL = 33, SFF8024_CONNECTOR_RJ45 = 34, SFF8024_CONNECTOR_NOSEPARATE = 35, SFF8024_CONNECTOR_MXC_2X16 = 36, SFF8024_ECC_UNSPEC = 0, SFF8024_ECC_100G_25GAUI_C2M_AOC = 1, SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 2, SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 3, SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 4, SFF8024_ECC_100GBASE_SR10 = 5, SFF8024_ECC_100GBASE_CR4 = 11, SFF8024_ECC_25GBASE_CR_S = 12, SFF8024_ECC_25GBASE_CR_N = 13, SFF8024_ECC_10GBASE_T_SFI = 22, SFF8024_ECC_10GBASE_T_SR = 28, SFF8024_ECC_5GBASE_T = 29, SFF8024_ECC_2_5GBASE_T = 30, }; enum { SFP_PHYS_ID = 0, SFP_PHYS_EXT_ID = 1, SFP_PHYS_EXT_ID_SFP = 4, SFP_CONNECTOR = 2, SFP_COMPLIANCE = 3, SFP_ENCODING = 11, SFP_BR_NOMINAL = 12, SFP_RATE_ID = 13, SFF_RID_8079 = 1, SFF_RID_8431_RX_ONLY = 2, SFF_RID_8431_TX_ONLY = 4, SFF_RID_8431 = 6, SFF_RID_10G8G = 14, SFP_LINK_LEN_SM_KM = 14, SFP_LINK_LEN_SM_100M = 15, SFP_LINK_LEN_50UM_OM2_10M = 16, SFP_LINK_LEN_62_5UM_OM1_10M = 17, SFP_LINK_LEN_COPPER_1M = 18, SFP_LINK_LEN_50UM_OM4_10M = 18, SFP_LINK_LEN_50UM_OM3_10M = 19, SFP_VENDOR_NAME = 20, SFP_VENDOR_OUI = 37, SFP_VENDOR_PN = 40, SFP_VENDOR_REV = 56, SFP_OPTICAL_WAVELENGTH_MSB = 60, SFP_OPTICAL_WAVELENGTH_LSB = 61, SFP_CABLE_SPEC = 60, SFP_CC_BASE = 63, SFP_OPTIONS = 64, SFP_OPTIONS_HIGH_POWER_LEVEL = 8192, SFP_OPTIONS_PAGING_A2 = 4096, SFP_OPTIONS_RETIMER = 2048, SFP_OPTIONS_COOLED_XCVR = 1024, SFP_OPTIONS_POWER_DECL = 512, SFP_OPTIONS_RX_LINEAR_OUT = 256, SFP_OPTIONS_RX_DECISION_THRESH = 128, SFP_OPTIONS_TUNABLE_TX = 64, SFP_OPTIONS_RATE_SELECT = 32, SFP_OPTIONS_TX_DISABLE = 16, SFP_OPTIONS_TX_FAULT = 8, SFP_OPTIONS_LOS_INVERTED = 4, SFP_OPTIONS_LOS_NORMAL = 2, SFP_BR_MAX = 66, SFP_BR_MIN = 67, SFP_VENDOR_SN = 68, SFP_DATECODE = 84, SFP_DIAGMON = 92, SFP_DIAGMON_DDM = 64, SFP_DIAGMON_INT_CAL = 32, SFP_DIAGMON_EXT_CAL = 16, SFP_DIAGMON_RXPWR_AVG = 8, SFP_DIAGMON_ADDRMODE = 4, SFP_ENHOPTS = 93, SFP_ENHOPTS_ALARMWARN = 128, SFP_ENHOPTS_SOFT_TX_DISABLE = 64, SFP_ENHOPTS_SOFT_TX_FAULT = 32, SFP_ENHOPTS_SOFT_RX_LOS = 16, SFP_ENHOPTS_SOFT_RATE_SELECT = 8, SFP_ENHOPTS_APP_SELECT_SFF8079 = 4, SFP_ENHOPTS_SOFT_RATE_SFF8431 = 2, SFP_SFF8472_COMPLIANCE = 94, SFP_SFF8472_COMPLIANCE_NONE = 0, SFP_SFF8472_COMPLIANCE_REV9_3 = 1, SFP_SFF8472_COMPLIANCE_REV9_5 = 2, SFP_SFF8472_COMPLIANCE_REV10_2 = 3, SFP_SFF8472_COMPLIANCE_REV10_4 = 4, SFP_SFF8472_COMPLIANCE_REV11_0 = 5, SFP_SFF8472_COMPLIANCE_REV11_3 = 6, SFP_SFF8472_COMPLIANCE_REV11_4 = 7, SFP_SFF8472_COMPLIANCE_REV12_0 = 8, SFP_CC_EXT = 95, }; enum { SKBFL_ZEROCOPY_ENABLE = 1, SKBFL_SHARED_FRAG = 2, SKBFL_PURE_ZEROCOPY = 4, SKBFL_DONT_ORPHAN = 8, SKBFL_MANAGED_FRAG_REFS = 16, }; enum { SKBTX_HW_TSTAMP = 1, SKBTX_SW_TSTAMP = 2, SKBTX_IN_PROGRESS = 4, SKBTX_HW_TSTAMP_USE_CYCLES = 8, SKBTX_WIFI_STATUS = 16, SKBTX_HW_TSTAMP_NETDEV = 32, SKBTX_SCHED_TSTAMP = 64, }; enum { SKB_FCLONE_UNAVAILABLE = 0, SKB_FCLONE_ORIG = 1, SKB_FCLONE_CLONE = 2, }; enum { SKB_GSO_TCPV4 = 1, SKB_GSO_DODGY = 2, SKB_GSO_TCP_ECN = 4, SKB_GSO_TCP_FIXEDID = 8, SKB_GSO_TCPV6 = 16, SKB_GSO_FCOE = 32, SKB_GSO_GRE = 64, SKB_GSO_GRE_CSUM = 128, SKB_GSO_IPXIP4 = 256, SKB_GSO_IPXIP6 = 512, SKB_GSO_UDP_TUNNEL = 1024, SKB_GSO_UDP_TUNNEL_CSUM = 2048, SKB_GSO_PARTIAL = 4096, SKB_GSO_TUNNEL_REMCSUM = 8192, SKB_GSO_SCTP = 16384, SKB_GSO_ESP = 32768, SKB_GSO_UDP = 65536, SKB_GSO_UDP_L4 = 131072, SKB_GSO_FRAGLIST = 262144, }; enum { SKCIPHER_WALK_PHYS = 1, SKCIPHER_WALK_SLOW = 2, SKCIPHER_WALK_COPY = 4, SKCIPHER_WALK_DIFF = 8, SKCIPHER_WALK_SLEEP = 16, }; enum { SKX_PCI_UNCORE_IMC = 0, SKX_PCI_UNCORE_M2M = 1, SKX_PCI_UNCORE_UPI = 2, SKX_PCI_UNCORE_M2PCIE = 3, SKX_PCI_UNCORE_M3UPI = 4, }; enum { SK_DIAG_BPF_STORAGE_NONE = 0, SK_DIAG_BPF_STORAGE_PAD = 1, SK_DIAG_BPF_STORAGE_MAP_ID = 2, SK_DIAG_BPF_STORAGE_MAP_VALUE = 3, __SK_DIAG_BPF_STORAGE_MAX = 4, }; enum { SK_DIAG_BPF_STORAGE_REP_NONE = 0, SK_DIAG_BPF_STORAGE = 1, __SK_DIAG_BPF_STORAGE_REP_MAX = 2, }; enum { SK_DIAG_BPF_STORAGE_REQ_NONE = 0, SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1, __SK_DIAG_BPF_STORAGE_REQ_MAX = 2, }; enum { SK_MEMINFO_RMEM_ALLOC = 0, SK_MEMINFO_RCVBUF = 1, SK_MEMINFO_WMEM_ALLOC = 2, SK_MEMINFO_SNDBUF = 3, SK_MEMINFO_FWD_ALLOC = 4, SK_MEMINFO_WMEM_QUEUED = 5, SK_MEMINFO_OPTMEM = 6, SK_MEMINFO_BACKLOG = 7, SK_MEMINFO_DROPS = 8, SK_MEMINFO_VARS = 9, }; enum { SNBEP_PCI_QPI_PORT0_FILTER = 0, SNBEP_PCI_QPI_PORT1_FILTER = 1, BDX_PCI_QPI_PORT2_FILTER = 2, }; enum { SNBEP_PCI_UNCORE_HA = 0, SNBEP_PCI_UNCORE_IMC = 1, SNBEP_PCI_UNCORE_QPI = 2, SNBEP_PCI_UNCORE_R2PCIE = 3, SNBEP_PCI_UNCORE_R3QPI = 4, }; enum { SNB_PCI_UNCORE_IMC = 0, }; enum { SNR_PCI_UNCORE_M2M = 0, SNR_PCI_UNCORE_PCIE3 = 1, }; enum { SNR_QAT_PMON_ID = 0, SNR_CBDMA_DMI_PMON_ID = 1, SNR_NIS_PMON_ID = 2, SNR_DLB_PMON_ID = 3, SNR_PCIE_GEN3_PMON_ID = 4, }; enum { SOCK_WAKE_IO = 0, SOCK_WAKE_WAITD = 1, SOCK_WAKE_SPACE = 2, SOCK_WAKE_URG = 3, }; enum { SOF_TIMESTAMPING_TX_HARDWARE = 1, SOF_TIMESTAMPING_TX_SOFTWARE = 2, SOF_TIMESTAMPING_RX_HARDWARE = 4, SOF_TIMESTAMPING_RX_SOFTWARE = 8, SOF_TIMESTAMPING_SOFTWARE = 16, SOF_TIMESTAMPING_SYS_HARDWARE = 32, SOF_TIMESTAMPING_RAW_HARDWARE = 64, SOF_TIMESTAMPING_OPT_ID = 128, SOF_TIMESTAMPING_TX_SCHED = 256, SOF_TIMESTAMPING_TX_ACK = 512, SOF_TIMESTAMPING_OPT_CMSG = 1024, SOF_TIMESTAMPING_OPT_TSONLY = 2048, SOF_TIMESTAMPING_OPT_STATS = 4096, SOF_TIMESTAMPING_OPT_PKTINFO = 8192, SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, SOF_TIMESTAMPING_BIND_PHC = 32768, SOF_TIMESTAMPING_OPT_ID_TCP = 65536, SOF_TIMESTAMPING_OPT_RX_FILTER = 131072, SOF_TIMESTAMPING_LAST = 131072, SOF_TIMESTAMPING_MASK = 262143, }; enum { SUN_WHOLE_DISK = 5, LINUX_RAID_PARTITION___2 = 253, }; enum { SWITCHTEC_GAS_MRPC_OFFSET = 0, SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, SWITCHTEC_GAS_NTB_OFFSET = 65536, SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, }; enum { SWITCHTEC_NTB_REG_INFO_OFFSET = 0, SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, }; enum { SWP_USED = 1, SWP_WRITEOK = 2, SWP_DISCARDABLE = 4, SWP_DISCARDING = 8, SWP_SOLIDSTATE = 16, SWP_CONTINUED = 32, SWP_BLKDEV = 64, SWP_ACTIVATED = 128, SWP_FS_OPS = 256, SWP_AREA_DISCARD = 512, SWP_PAGE_DISCARD = 1024, SWP_STABLE_WRITES = 2048, SWP_SYNCHRONOUS_IO = 4096, SWP_SCANNING = 16384, }; enum { TASKLET_STATE_SCHED = 0, TASKLET_STATE_RUN = 1, }; enum { TASKSTATS_CMD_ATTR_UNSPEC = 0, TASKSTATS_CMD_ATTR_PID = 1, TASKSTATS_CMD_ATTR_TGID = 2, TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, __TASKSTATS_CMD_ATTR_MAX = 5, }; enum { TASKSTATS_CMD_UNSPEC = 0, TASKSTATS_CMD_GET = 1, TASKSTATS_CMD_NEW = 2, __TASKSTATS_CMD_MAX = 3, }; enum { TASKSTATS_TYPE_UNSPEC = 0, TASKSTATS_TYPE_PID = 1, TASKSTATS_TYPE_TGID = 2, TASKSTATS_TYPE_STATS = 3, TASKSTATS_TYPE_AGGR_PID = 4, TASKSTATS_TYPE_AGGR_TGID = 5, TASKSTATS_TYPE_NULL = 6, __TASKSTATS_TYPE_MAX = 7, }; enum { TASK_COMM_LEN = 16, }; enum { TCA_ACT_BPF_UNSPEC = 0, TCA_ACT_BPF_TM = 1, TCA_ACT_BPF_PARMS = 2, TCA_ACT_BPF_OPS_LEN = 3, TCA_ACT_BPF_OPS = 4, TCA_ACT_BPF_FD = 5, TCA_ACT_BPF_NAME = 6, TCA_ACT_BPF_PAD = 7, TCA_ACT_BPF_TAG = 8, TCA_ACT_BPF_ID = 9, __TCA_ACT_BPF_MAX = 10, }; enum { TCA_ACT_UNSPEC = 0, TCA_ACT_KIND = 1, TCA_ACT_OPTIONS = 2, TCA_ACT_INDEX = 3, TCA_ACT_STATS = 4, TCA_ACT_PAD = 5, TCA_ACT_COOKIE = 6, TCA_ACT_FLAGS = 7, TCA_ACT_HW_STATS = 8, TCA_ACT_USED_HW_STATS = 9, TCA_ACT_IN_HW_COUNT = 10, __TCA_ACT_MAX = 11, }; enum { TCA_BPF_UNSPEC = 0, TCA_BPF_ACT = 1, TCA_BPF_POLICE = 2, TCA_BPF_CLASSID = 3, TCA_BPF_OPS_LEN = 4, TCA_BPF_OPS = 5, TCA_BPF_FD = 6, TCA_BPF_NAME = 7, TCA_BPF_FLAGS = 8, TCA_BPF_FLAGS_GEN = 9, TCA_BPF_TAG = 10, TCA_BPF_ID = 11, __TCA_BPF_MAX = 12, }; enum { TCA_CGROUP_UNSPEC = 0, TCA_CGROUP_ACT = 1, TCA_CGROUP_POLICE = 2, TCA_CGROUP_EMATCHES = 3, __TCA_CGROUP_MAX = 4, }; enum { TCA_CSUM_UPDATE_FLAG_IPV4HDR = 1, TCA_CSUM_UPDATE_FLAG_ICMP = 2, TCA_CSUM_UPDATE_FLAG_IGMP = 4, TCA_CSUM_UPDATE_FLAG_TCP = 8, TCA_CSUM_UPDATE_FLAG_UDP = 16, TCA_CSUM_UPDATE_FLAG_UDPLITE = 32, TCA_CSUM_UPDATE_FLAG_SCTP = 64, }; enum { TCA_EMATCH_TREE_UNSPEC = 0, TCA_EMATCH_TREE_HDR = 1, TCA_EMATCH_TREE_LIST = 2, __TCA_EMATCH_TREE_MAX = 3, }; enum { TCA_FLOWER_KEY_CFM_OPT_UNSPEC = 0, TCA_FLOWER_KEY_CFM_MD_LEVEL = 1, TCA_FLOWER_KEY_CFM_OPCODE = 2, __TCA_FLOWER_KEY_CFM_OPT_MAX = 3, }; enum { TCA_FLOWER_KEY_CT_FLAGS_NEW = 1, TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 2, TCA_FLOWER_KEY_CT_FLAGS_RELATED = 4, TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 8, TCA_FLOWER_KEY_CT_FLAGS_INVALID = 16, TCA_FLOWER_KEY_CT_FLAGS_REPLY = 32, __TCA_FLOWER_KEY_CT_FLAGS_MAX = 33, }; enum { TCA_FLOWER_KEY_ENC_OPTS_UNSPEC = 0, TCA_FLOWER_KEY_ENC_OPTS_GENEVE = 1, TCA_FLOWER_KEY_ENC_OPTS_VXLAN = 2, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN = 3, TCA_FLOWER_KEY_ENC_OPTS_GTP = 4, TCA_FLOWER_KEY_ENC_OPTS_PFCP = 5, __TCA_FLOWER_KEY_ENC_OPTS_MAX = 6, }; enum { TCA_FLOWER_KEY_ENC_OPT_ERSPAN_UNSPEC = 0, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER = 1, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX = 2, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR = 3, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID = 4, __TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX = 5, }; enum { TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC = 0, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS = 1, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE = 2, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA = 3, __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX = 4, }; enum { TCA_FLOWER_KEY_ENC_OPT_GTP_UNSPEC = 0, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE = 1, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI = 2, __TCA_FLOWER_KEY_ENC_OPT_GTP_MAX = 3, }; enum { TCA_FLOWER_KEY_ENC_OPT_PFCP_UNSPEC = 0, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE = 1, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID = 2, __TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX = 3, }; enum { TCA_FLOWER_KEY_ENC_OPT_VXLAN_UNSPEC = 0, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP = 1, __TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX = 2, }; enum { TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = 1, TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = 2, TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM = 4, TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT = 8, TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM = 16, TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT = 32, __TCA_FLOWER_KEY_FLAGS_MAX = 33, }; enum { TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC = 0, TCA_FLOWER_KEY_MPLS_OPTS_LSE = 1, __TCA_FLOWER_KEY_MPLS_OPTS_MAX = 2, }; enum { TCA_FLOWER_KEY_MPLS_OPT_LSE_UNSPEC = 0, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH = 1, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL = 2, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS = 3, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC = 4, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL = 5, __TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX = 6, }; enum { TCA_FLOWER_UNSPEC = 0, TCA_FLOWER_CLASSID = 1, TCA_FLOWER_INDEV = 2, TCA_FLOWER_ACT = 3, TCA_FLOWER_KEY_ETH_DST = 4, TCA_FLOWER_KEY_ETH_DST_MASK = 5, TCA_FLOWER_KEY_ETH_SRC = 6, TCA_FLOWER_KEY_ETH_SRC_MASK = 7, TCA_FLOWER_KEY_ETH_TYPE = 8, TCA_FLOWER_KEY_IP_PROTO = 9, TCA_FLOWER_KEY_IPV4_SRC = 10, TCA_FLOWER_KEY_IPV4_SRC_MASK = 11, TCA_FLOWER_KEY_IPV4_DST = 12, TCA_FLOWER_KEY_IPV4_DST_MASK = 13, TCA_FLOWER_KEY_IPV6_SRC = 14, TCA_FLOWER_KEY_IPV6_SRC_MASK = 15, TCA_FLOWER_KEY_IPV6_DST = 16, TCA_FLOWER_KEY_IPV6_DST_MASK = 17, TCA_FLOWER_KEY_TCP_SRC = 18, TCA_FLOWER_KEY_TCP_DST = 19, TCA_FLOWER_KEY_UDP_SRC = 20, TCA_FLOWER_KEY_UDP_DST = 21, TCA_FLOWER_FLAGS = 22, TCA_FLOWER_KEY_VLAN_ID = 23, TCA_FLOWER_KEY_VLAN_PRIO = 24, TCA_FLOWER_KEY_VLAN_ETH_TYPE = 25, TCA_FLOWER_KEY_ENC_KEY_ID = 26, TCA_FLOWER_KEY_ENC_IPV4_SRC = 27, TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK = 28, TCA_FLOWER_KEY_ENC_IPV4_DST = 29, TCA_FLOWER_KEY_ENC_IPV4_DST_MASK = 30, TCA_FLOWER_KEY_ENC_IPV6_SRC = 31, TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK = 32, TCA_FLOWER_KEY_ENC_IPV6_DST = 33, TCA_FLOWER_KEY_ENC_IPV6_DST_MASK = 34, TCA_FLOWER_KEY_TCP_SRC_MASK = 35, TCA_FLOWER_KEY_TCP_DST_MASK = 36, TCA_FLOWER_KEY_UDP_SRC_MASK = 37, TCA_FLOWER_KEY_UDP_DST_MASK = 38, TCA_FLOWER_KEY_SCTP_SRC_MASK = 39, TCA_FLOWER_KEY_SCTP_DST_MASK = 40, TCA_FLOWER_KEY_SCTP_SRC = 41, TCA_FLOWER_KEY_SCTP_DST = 42, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT = 43, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK = 44, TCA_FLOWER_KEY_ENC_UDP_DST_PORT = 45, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK = 46, TCA_FLOWER_KEY_FLAGS = 47, TCA_FLOWER_KEY_FLAGS_MASK = 48, TCA_FLOWER_KEY_ICMPV4_CODE = 49, TCA_FLOWER_KEY_ICMPV4_CODE_MASK = 50, TCA_FLOWER_KEY_ICMPV4_TYPE = 51, TCA_FLOWER_KEY_ICMPV4_TYPE_MASK = 52, TCA_FLOWER_KEY_ICMPV6_CODE = 53, TCA_FLOWER_KEY_ICMPV6_CODE_MASK = 54, TCA_FLOWER_KEY_ICMPV6_TYPE = 55, TCA_FLOWER_KEY_ICMPV6_TYPE_MASK = 56, TCA_FLOWER_KEY_ARP_SIP = 57, TCA_FLOWER_KEY_ARP_SIP_MASK = 58, TCA_FLOWER_KEY_ARP_TIP = 59, TCA_FLOWER_KEY_ARP_TIP_MASK = 60, TCA_FLOWER_KEY_ARP_OP = 61, TCA_FLOWER_KEY_ARP_OP_MASK = 62, TCA_FLOWER_KEY_ARP_SHA = 63, TCA_FLOWER_KEY_ARP_SHA_MASK = 64, TCA_FLOWER_KEY_ARP_THA = 65, TCA_FLOWER_KEY_ARP_THA_MASK = 66, TCA_FLOWER_KEY_MPLS_TTL = 67, TCA_FLOWER_KEY_MPLS_BOS = 68, TCA_FLOWER_KEY_MPLS_TC = 69, TCA_FLOWER_KEY_MPLS_LABEL = 70, TCA_FLOWER_KEY_TCP_FLAGS = 71, TCA_FLOWER_KEY_TCP_FLAGS_MASK = 72, TCA_FLOWER_KEY_IP_TOS = 73, TCA_FLOWER_KEY_IP_TOS_MASK = 74, TCA_FLOWER_KEY_IP_TTL = 75, TCA_FLOWER_KEY_IP_TTL_MASK = 76, TCA_FLOWER_KEY_CVLAN_ID = 77, TCA_FLOWER_KEY_CVLAN_PRIO = 78, TCA_FLOWER_KEY_CVLAN_ETH_TYPE = 79, TCA_FLOWER_KEY_ENC_IP_TOS = 80, TCA_FLOWER_KEY_ENC_IP_TOS_MASK = 81, TCA_FLOWER_KEY_ENC_IP_TTL = 82, TCA_FLOWER_KEY_ENC_IP_TTL_MASK = 83, TCA_FLOWER_KEY_ENC_OPTS = 84, TCA_FLOWER_KEY_ENC_OPTS_MASK = 85, TCA_FLOWER_IN_HW_COUNT = 86, TCA_FLOWER_KEY_PORT_SRC_MIN = 87, TCA_FLOWER_KEY_PORT_SRC_MAX = 88, TCA_FLOWER_KEY_PORT_DST_MIN = 89, TCA_FLOWER_KEY_PORT_DST_MAX = 90, TCA_FLOWER_KEY_CT_STATE = 91, TCA_FLOWER_KEY_CT_STATE_MASK = 92, TCA_FLOWER_KEY_CT_ZONE = 93, TCA_FLOWER_KEY_CT_ZONE_MASK = 94, TCA_FLOWER_KEY_CT_MARK = 95, TCA_FLOWER_KEY_CT_MARK_MASK = 96, TCA_FLOWER_KEY_CT_LABELS = 97, TCA_FLOWER_KEY_CT_LABELS_MASK = 98, TCA_FLOWER_KEY_MPLS_OPTS = 99, TCA_FLOWER_KEY_HASH = 100, TCA_FLOWER_KEY_HASH_MASK = 101, TCA_FLOWER_KEY_NUM_OF_VLANS = 102, TCA_FLOWER_KEY_PPPOE_SID = 103, TCA_FLOWER_KEY_PPP_PROTO = 104, TCA_FLOWER_KEY_L2TPV3_SID = 105, TCA_FLOWER_L2_MISS = 106, TCA_FLOWER_KEY_CFM = 107, TCA_FLOWER_KEY_SPI = 108, TCA_FLOWER_KEY_SPI_MASK = 109, TCA_FLOWER_KEY_ENC_FLAGS = 110, TCA_FLOWER_KEY_ENC_FLAGS_MASK = 111, __TCA_FLOWER_MAX = 112, }; enum { TCA_FQ_CODEL_UNSPEC = 0, TCA_FQ_CODEL_TARGET = 1, TCA_FQ_CODEL_LIMIT = 2, TCA_FQ_CODEL_INTERVAL = 3, TCA_FQ_CODEL_ECN = 4, TCA_FQ_CODEL_FLOWS = 5, TCA_FQ_CODEL_QUANTUM = 6, TCA_FQ_CODEL_CE_THRESHOLD = 7, TCA_FQ_CODEL_DROP_BATCH_SIZE = 8, TCA_FQ_CODEL_MEMORY_LIMIT = 9, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR = 10, TCA_FQ_CODEL_CE_THRESHOLD_MASK = 11, __TCA_FQ_CODEL_MAX = 12, }; enum { TCA_FQ_CODEL_XSTATS_QDISC = 0, TCA_FQ_CODEL_XSTATS_CLASS = 1, }; enum { TCA_FQ_UNSPEC = 0, TCA_FQ_PLIMIT = 1, TCA_FQ_FLOW_PLIMIT = 2, TCA_FQ_QUANTUM = 3, TCA_FQ_INITIAL_QUANTUM = 4, TCA_FQ_RATE_ENABLE = 5, TCA_FQ_FLOW_DEFAULT_RATE = 6, TCA_FQ_FLOW_MAX_RATE = 7, TCA_FQ_BUCKETS_LOG = 8, TCA_FQ_FLOW_REFILL_DELAY = 9, TCA_FQ_ORPHAN_MASK = 10, TCA_FQ_LOW_RATE_THRESHOLD = 11, TCA_FQ_CE_THRESHOLD = 12, TCA_FQ_TIMER_SLACK = 13, TCA_FQ_HORIZON = 14, TCA_FQ_HORIZON_DROP = 15, TCA_FQ_PRIOMAP = 16, TCA_FQ_WEIGHTS = 17, __TCA_FQ_MAX = 18, }; enum { TCA_MATCHALL_UNSPEC = 0, TCA_MATCHALL_CLASSID = 1, TCA_MATCHALL_ACT = 2, TCA_MATCHALL_FLAGS = 3, TCA_MATCHALL_PCNT = 4, TCA_MATCHALL_PAD = 5, __TCA_MATCHALL_MAX = 6, }; enum { TCA_ROOT_UNSPEC = 0, TCA_ROOT_TAB = 1, TCA_ROOT_FLAGS = 2, TCA_ROOT_COUNT = 3, TCA_ROOT_TIME_DELTA = 4, TCA_ROOT_EXT_WARN_MSG = 5, __TCA_ROOT_MAX = 6, }; enum { TCA_SKBMOD_UNSPEC = 0, TCA_SKBMOD_TM = 1, TCA_SKBMOD_PARMS = 2, TCA_SKBMOD_DMAC = 3, TCA_SKBMOD_SMAC = 4, TCA_SKBMOD_ETYPE = 5, TCA_SKBMOD_PAD = 6, __TCA_SKBMOD_MAX = 7, }; enum { TCA_STAB_UNSPEC = 0, TCA_STAB_BASE = 1, TCA_STAB_DATA = 2, __TCA_STAB_MAX = 3, }; enum { TCA_STATS_UNSPEC = 0, TCA_STATS_BASIC = 1, TCA_STATS_RATE_EST = 2, TCA_STATS_QUEUE = 3, TCA_STATS_APP = 4, TCA_STATS_RATE_EST64 = 5, TCA_STATS_PAD = 6, TCA_STATS_BASIC_HW = 7, TCA_STATS_PKT64 = 8, __TCA_STATS_MAX = 9, }; enum { TCA_UNSPEC = 0, TCA_KIND = 1, TCA_OPTIONS = 2, TCA_STATS = 3, TCA_XSTATS = 4, TCA_RATE = 5, TCA_FCNT = 6, TCA_STATS2 = 7, TCA_STAB = 8, TCA_PAD = 9, TCA_DUMP_INVISIBLE = 10, TCA_CHAIN = 11, TCA_HW_OFFLOAD = 12, TCA_INGRESS_BLOCK = 13, TCA_EGRESS_BLOCK = 14, TCA_DUMP_FLAGS = 15, TCA_EXT_WARN_MSG = 16, __TCA_MAX = 17, }; enum { TCPF_ESTABLISHED = 2, TCPF_SYN_SENT = 4, TCPF_SYN_RECV = 8, TCPF_FIN_WAIT1 = 16, TCPF_FIN_WAIT2 = 32, TCPF_TIME_WAIT = 64, TCPF_CLOSE = 128, TCPF_CLOSE_WAIT = 256, TCPF_LAST_ACK = 512, TCPF_LISTEN = 1024, TCPF_CLOSING = 2048, TCPF_NEW_SYN_RECV = 4096, TCPF_BOUND_INACTIVE = 8192, }; enum { TCP_BPF_BASE = 0, TCP_BPF_TX = 1, TCP_BPF_RX = 2, TCP_BPF_TXRX = 3, TCP_BPF_NUM_CFGS = 4, }; enum { TCP_BPF_IPV4 = 0, TCP_BPF_IPV6 = 1, TCP_BPF_NUM_PROTS = 2, }; enum { TCP_BPF_IW = 1001, TCP_BPF_SNDCWND_CLAMP = 1002, TCP_BPF_DELACK_MAX = 1003, TCP_BPF_RTO_MIN = 1004, TCP_BPF_SYN = 1005, TCP_BPF_SYN_IP = 1006, TCP_BPF_SYN_MAC = 1007, TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, }; enum { TCP_CMSG_INQ = 1, TCP_CMSG_TS = 2, }; enum { TCP_ESTABLISHED = 1, TCP_SYN_SENT = 2, TCP_SYN_RECV = 3, TCP_FIN_WAIT1 = 4, TCP_FIN_WAIT2 = 5, TCP_TIME_WAIT = 6, TCP_CLOSE = 7, TCP_CLOSE_WAIT = 8, TCP_LAST_ACK = 9, TCP_LISTEN = 10, TCP_CLOSING = 11, TCP_NEW_SYN_RECV = 12, TCP_BOUND_INACTIVE = 13, TCP_MAX_STATES = 14, }; enum { TCP_FLAG_CWR = 32768, TCP_FLAG_ECE = 16384, TCP_FLAG_URG = 8192, TCP_FLAG_ACK = 4096, TCP_FLAG_PSH = 2048, TCP_FLAG_RST = 1024, TCP_FLAG_SYN = 512, TCP_FLAG_FIN = 256, TCP_RESERVED_BITS = 15, TCP_DATA_OFFSET = 240, }; enum { TCP_METRICS_ATTR_UNSPEC = 0, TCP_METRICS_ATTR_ADDR_IPV4 = 1, TCP_METRICS_ATTR_ADDR_IPV6 = 2, TCP_METRICS_ATTR_AGE = 3, TCP_METRICS_ATTR_TW_TSVAL = 4, TCP_METRICS_ATTR_TW_TS_STAMP = 5, TCP_METRICS_ATTR_VALS = 6, TCP_METRICS_ATTR_FOPEN_MSS = 7, TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, TCP_METRICS_ATTR_FOPEN_COOKIE = 10, TCP_METRICS_ATTR_SADDR_IPV4 = 11, TCP_METRICS_ATTR_SADDR_IPV6 = 12, TCP_METRICS_ATTR_PAD = 13, __TCP_METRICS_ATTR_MAX = 14, }; enum { TCP_METRICS_CMD_UNSPEC = 0, TCP_METRICS_CMD_GET = 1, TCP_METRICS_CMD_DEL = 2, __TCP_METRICS_CMD_MAX = 3, }; enum { TCP_MIB_NUM = 0, TCP_MIB_RTOALGORITHM = 1, TCP_MIB_RTOMIN = 2, TCP_MIB_RTOMAX = 3, TCP_MIB_MAXCONN = 4, TCP_MIB_ACTIVEOPENS = 5, TCP_MIB_PASSIVEOPENS = 6, TCP_MIB_ATTEMPTFAILS = 7, TCP_MIB_ESTABRESETS = 8, TCP_MIB_CURRESTAB = 9, TCP_MIB_INSEGS = 10, TCP_MIB_OUTSEGS = 11, TCP_MIB_RETRANSSEGS = 12, TCP_MIB_INERRS = 13, TCP_MIB_OUTRSTS = 14, TCP_MIB_CSUMERRORS = 15, __TCP_MIB_MAX = 16, }; enum { TCP_NLA_PAD = 0, TCP_NLA_BUSY = 1, TCP_NLA_RWND_LIMITED = 2, TCP_NLA_SNDBUF_LIMITED = 3, TCP_NLA_DATA_SEGS_OUT = 4, TCP_NLA_TOTAL_RETRANS = 5, TCP_NLA_PACING_RATE = 6, TCP_NLA_DELIVERY_RATE = 7, TCP_NLA_SND_CWND = 8, TCP_NLA_REORDERING = 9, TCP_NLA_MIN_RTT = 10, TCP_NLA_RECUR_RETRANS = 11, TCP_NLA_DELIVERY_RATE_APP_LMT = 12, TCP_NLA_SNDQ_SIZE = 13, TCP_NLA_CA_STATE = 14, TCP_NLA_SND_SSTHRESH = 15, TCP_NLA_DELIVERED = 16, TCP_NLA_DELIVERED_CE = 17, TCP_NLA_BYTES_SENT = 18, TCP_NLA_BYTES_RETRANS = 19, TCP_NLA_DSACK_DUPS = 20, TCP_NLA_REORD_SEEN = 21, TCP_NLA_SRTT = 22, TCP_NLA_TIMEOUT_REHASH = 23, TCP_NLA_BYTES_NOTSENT = 24, TCP_NLA_EDT = 25, TCP_NLA_TTL = 26, TCP_NLA_REHASH = 27, }; enum { TCP_NO_QUEUE = 0, TCP_RECV_QUEUE = 1, TCP_SEND_QUEUE = 2, TCP_QUEUES_NR = 3, }; enum { TEST_NONE = 0, TEST_CORE = 1, TEST_CPUS = 2, TEST_PLATFORM = 3, TEST_DEVICES = 4, TEST_FREEZER = 5, __TEST_AFTER_LAST = 6, }; enum { TLSV4 = 0, TLSV6 = 1, TLS_NUM_PROTS = 2, }; enum { TLS_BASE = 0, TLS_SW = 1, TLS_HW = 2, TLS_HW_RECORD = 3, TLS_NUM_CONFIG = 4, }; enum { TLS_INFO_UNSPEC = 0, TLS_INFO_VERSION = 1, TLS_INFO_CIPHER = 2, TLS_INFO_TXCONF = 3, TLS_INFO_RXCONF = 4, TLS_INFO_ZC_RO_TX = 5, TLS_INFO_RX_NO_PAD = 6, __TLS_INFO_MAX = 7, }; enum { TLS_RECORD_TYPE_CHANGE_CIPHER_SPEC = 20, TLS_RECORD_TYPE_ALERT = 21, TLS_RECORD_TYPE_HANDSHAKE = 22, TLS_RECORD_TYPE_DATA = 23, TLS_RECORD_TYPE_HEARTBEAT = 24, TLS_RECORD_TYPE_TLS12_CID = 25, TLS_RECORD_TYPE_ACK = 26, }; enum { TOO_MANY_CLOSE = -1, TOO_MANY_OPEN = -2, MISSING_QUOTE = -3, }; enum { TP_ERR_FILE_NOT_FOUND = 0, TP_ERR_NO_REGULAR_FILE = 1, TP_ERR_BAD_REFCNT = 2, TP_ERR_REFCNT_OPEN_BRACE = 3, TP_ERR_BAD_REFCNT_SUFFIX = 4, TP_ERR_BAD_UPROBE_OFFS = 5, TP_ERR_BAD_MAXACT_TYPE = 6, TP_ERR_BAD_MAXACT = 7, TP_ERR_MAXACT_TOO_BIG = 8, TP_ERR_BAD_PROBE_ADDR = 9, TP_ERR_NON_UNIQ_SYMBOL = 10, TP_ERR_BAD_RETPROBE = 11, TP_ERR_NO_TRACEPOINT = 12, TP_ERR_BAD_ADDR_SUFFIX = 13, TP_ERR_NO_GROUP_NAME = 14, TP_ERR_GROUP_TOO_LONG = 15, TP_ERR_BAD_GROUP_NAME = 16, TP_ERR_NO_EVENT_NAME = 17, TP_ERR_EVENT_TOO_LONG = 18, TP_ERR_BAD_EVENT_NAME = 19, TP_ERR_EVENT_EXIST = 20, TP_ERR_RETVAL_ON_PROBE = 21, TP_ERR_NO_RETVAL = 22, TP_ERR_BAD_STACK_NUM = 23, TP_ERR_BAD_ARG_NUM = 24, TP_ERR_BAD_VAR = 25, TP_ERR_BAD_REG_NAME = 26, TP_ERR_BAD_MEM_ADDR = 27, TP_ERR_BAD_IMM = 28, TP_ERR_IMMSTR_NO_CLOSE = 29, TP_ERR_FILE_ON_KPROBE = 30, TP_ERR_BAD_FILE_OFFS = 31, TP_ERR_SYM_ON_UPROBE = 32, TP_ERR_TOO_MANY_OPS = 33, TP_ERR_DEREF_NEED_BRACE = 34, TP_ERR_BAD_DEREF_OFFS = 35, TP_ERR_DEREF_OPEN_BRACE = 36, TP_ERR_COMM_CANT_DEREF = 37, TP_ERR_BAD_FETCH_ARG = 38, TP_ERR_ARRAY_NO_CLOSE = 39, TP_ERR_BAD_ARRAY_SUFFIX = 40, TP_ERR_BAD_ARRAY_NUM = 41, TP_ERR_ARRAY_TOO_BIG = 42, TP_ERR_BAD_TYPE = 43, TP_ERR_BAD_STRING = 44, TP_ERR_BAD_SYMSTRING = 45, TP_ERR_BAD_BITFIELD = 46, TP_ERR_ARG_NAME_TOO_LONG = 47, TP_ERR_NO_ARG_NAME = 48, TP_ERR_BAD_ARG_NAME = 49, TP_ERR_USED_ARG_NAME = 50, TP_ERR_ARG_TOO_LONG = 51, TP_ERR_NO_ARG_BODY = 52, TP_ERR_BAD_INSN_BNDRY = 53, TP_ERR_FAIL_REG_PROBE = 54, TP_ERR_DIFF_PROBE_TYPE = 55, TP_ERR_DIFF_ARG_TYPE = 56, TP_ERR_SAME_PROBE = 57, TP_ERR_NO_EVENT_INFO = 58, TP_ERR_BAD_ATTACH_EVENT = 59, TP_ERR_BAD_ATTACH_ARG = 60, TP_ERR_NO_EP_FILTER = 61, TP_ERR_NOSUP_BTFARG = 62, TP_ERR_NO_BTFARG = 63, TP_ERR_NO_BTF_ENTRY = 64, TP_ERR_BAD_VAR_ARGS = 65, TP_ERR_NOFENTRY_ARGS = 66, TP_ERR_DOUBLE_ARGS = 67, TP_ERR_ARGS_2LONG = 68, TP_ERR_ARGIDX_2BIG = 69, TP_ERR_NO_PTR_STRCT = 70, TP_ERR_NOSUP_DAT_ARG = 71, TP_ERR_BAD_HYPHEN = 72, TP_ERR_NO_BTF_FIELD = 73, TP_ERR_BAD_BTF_TID = 74, TP_ERR_BAD_TYPE4STR = 75, TP_ERR_NEED_STRING_TYPE = 76, }; enum { TRACEFS_EVENT_INODE = 2, TRACEFS_GID_PERM_SET = 4, TRACEFS_UID_PERM_SET = 8, TRACEFS_INSTANCE_INODE = 16, }; enum { TRACE_ARRAY_FL_GLOBAL = 1, TRACE_ARRAY_FL_BOOT = 2, }; enum { TRACE_CTX_NMI = 0, TRACE_CTX_IRQ = 1, TRACE_CTX_SOFTIRQ = 2, TRACE_CTX_NORMAL = 3, TRACE_CTX_TRANSITION = 4, }; enum { TRACE_EVENT_FL_FILTERED = 1, TRACE_EVENT_FL_CAP_ANY = 2, TRACE_EVENT_FL_NO_SET_FILTER = 4, TRACE_EVENT_FL_IGNORE_ENABLE = 8, TRACE_EVENT_FL_TRACEPOINT = 16, TRACE_EVENT_FL_DYNAMIC = 32, TRACE_EVENT_FL_KPROBE = 64, TRACE_EVENT_FL_UPROBE = 128, TRACE_EVENT_FL_EPROBE = 256, TRACE_EVENT_FL_FPROBE = 512, TRACE_EVENT_FL_CUSTOM = 1024, }; enum { TRACE_EVENT_FL_FILTERED_BIT = 0, TRACE_EVENT_FL_CAP_ANY_BIT = 1, TRACE_EVENT_FL_NO_SET_FILTER_BIT = 2, TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 3, TRACE_EVENT_FL_TRACEPOINT_BIT = 4, TRACE_EVENT_FL_DYNAMIC_BIT = 5, TRACE_EVENT_FL_KPROBE_BIT = 6, TRACE_EVENT_FL_UPROBE_BIT = 7, TRACE_EVENT_FL_EPROBE_BIT = 8, TRACE_EVENT_FL_FPROBE_BIT = 9, TRACE_EVENT_FL_CUSTOM_BIT = 10, }; enum { TRACE_FTRACE_BIT = 0, TRACE_FTRACE_NMI_BIT = 1, TRACE_FTRACE_IRQ_BIT = 2, TRACE_FTRACE_SIRQ_BIT = 3, TRACE_FTRACE_TRANSITION_BIT = 4, TRACE_INTERNAL_BIT = 5, TRACE_INTERNAL_NMI_BIT = 6, TRACE_INTERNAL_IRQ_BIT = 7, TRACE_INTERNAL_SIRQ_BIT = 8, TRACE_INTERNAL_TRANSITION_BIT = 9, TRACE_BRANCH_BIT = 10, TRACE_IRQ_BIT = 11, TRACE_RECORD_RECURSION_BIT = 12, }; enum { TRACE_FUNC_NO_OPTS = 0, TRACE_FUNC_OPT_STACK = 1, TRACE_FUNC_OPT_NO_REPEATS = 2, TRACE_FUNC_OPT_HIGHEST_BIT = 4, }; enum { TRACE_GRAPH_FL = 1, TRACE_GRAPH_DEPTH_START_BIT = 2, TRACE_GRAPH_DEPTH_END_BIT = 3, TRACE_GRAPH_NOTRACE_BIT = 4, }; enum { TRACE_NOP_OPT_ACCEPT = 1, TRACE_NOP_OPT_REFUSE = 2, }; enum { TRACE_PIDS = 1, TRACE_NO_PIDS = 2, }; enum { TRACE_SIGNAL_DELIVERED = 0, TRACE_SIGNAL_IGNORED = 1, TRACE_SIGNAL_ALREADY_PENDING = 2, TRACE_SIGNAL_OVERFLOW_FAIL = 3, TRACE_SIGNAL_LOSE_INFO = 4, }; enum { TTY_LOCK_NORMAL = 0, TTY_LOCK_SLAVE = 1, }; enum { UDP_BPF_IPV4 = 0, UDP_BPF_IPV6 = 1, UDP_BPF_NUM_PROTS = 2, }; enum { UDP_FLAGS_CORK = 0, UDP_FLAGS_NO_CHECK6_TX = 1, UDP_FLAGS_NO_CHECK6_RX = 2, UDP_FLAGS_GRO_ENABLED = 3, UDP_FLAGS_ACCEPT_FRAGLIST = 4, UDP_FLAGS_ACCEPT_L4 = 5, UDP_FLAGS_ENCAP_ENABLED = 6, UDP_FLAGS_UDPLITE_SEND_CC = 7, UDP_FLAGS_UDPLITE_RECV_CC = 8, }; enum { UDP_MIB_NUM = 0, UDP_MIB_INDATAGRAMS = 1, UDP_MIB_NOPORTS = 2, UDP_MIB_INERRORS = 3, UDP_MIB_OUTDATAGRAMS = 4, UDP_MIB_RCVBUFERRORS = 5, UDP_MIB_SNDBUFERRORS = 6, UDP_MIB_CSUMERRORS = 7, UDP_MIB_IGNOREDMULTI = 8, UDP_MIB_MEMERRORS = 9, __UDP_MIB_MAX = 10, }; enum { UNAME26 = 131072, ADDR_NO_RANDOMIZE = 262144, FDPIC_FUNCPTRS = 524288, MMAP_PAGE_ZERO = 1048576, ADDR_COMPAT_LAYOUT = 2097152, READ_IMPLIES_EXEC = 4194304, ADDR_LIMIT_32BIT = 8388608, SHORT_INODE = 16777216, WHOLE_SECONDS = 33554432, STICKY_TIMEOUTS = 67108864, ADDR_LIMIT_3GB = 134217728, }; enum { UNCORE_TYPE_DF = 0, UNCORE_TYPE_L3 = 1, UNCORE_TYPE_UMC = 2, UNCORE_TYPE_MAX = 3, }; enum { UNDEFINED_CAPABLE = 0, SYSTEM_INTEL_MSR_CAPABLE = 1, SYSTEM_AMD_MSR_CAPABLE = 2, SYSTEM_IO_CAPABLE = 3, }; enum { VERBOSE_STATUS = 1, }; enum { VETH_INFO_UNSPEC = 0, VETH_INFO_PEER = 1, __VETH_INFO_MAX = 2, }; enum { VMGENID_SIZE = 16, }; enum { VNIFILTER_ENTRY_STATS_UNSPEC = 0, VNIFILTER_ENTRY_STATS_RX_BYTES = 1, VNIFILTER_ENTRY_STATS_RX_PKTS = 2, VNIFILTER_ENTRY_STATS_RX_DROPS = 3, VNIFILTER_ENTRY_STATS_RX_ERRORS = 4, VNIFILTER_ENTRY_STATS_TX_BYTES = 5, VNIFILTER_ENTRY_STATS_TX_PKTS = 6, VNIFILTER_ENTRY_STATS_TX_DROPS = 7, VNIFILTER_ENTRY_STATS_TX_ERRORS = 8, VNIFILTER_ENTRY_STATS_PAD = 9, __VNIFILTER_ENTRY_STATS_MAX = 10, }; enum { VXLAN_VNIFILTER_ENTRY_UNSPEC = 0, VXLAN_VNIFILTER_ENTRY_START = 1, VXLAN_VNIFILTER_ENTRY_END = 2, VXLAN_VNIFILTER_ENTRY_GROUP = 3, VXLAN_VNIFILTER_ENTRY_GROUP6 = 4, VXLAN_VNIFILTER_ENTRY_STATS = 5, __VXLAN_VNIFILTER_ENTRY_MAX = 6, }; enum { VXLAN_VNIFILTER_UNSPEC = 0, VXLAN_VNIFILTER_ENTRY = 1, __VXLAN_VNIFILTER_MAX = 2, }; enum { VXLAN_VNI_STATS_RX = 0, VXLAN_VNI_STATS_RX_DROPS = 1, VXLAN_VNI_STATS_RX_ERRORS = 2, VXLAN_VNI_STATS_TX = 3, VXLAN_VNI_STATS_TX_DROPS = 4, VXLAN_VNI_STATS_TX_ERRORS = 5, }; enum { WALK_TRAILING = 1, WALK_MORE = 2, WALK_NOFOLLOW = 4, }; enum { X86_BR_NONE = 0, X86_BR_USER = 1, X86_BR_KERNEL = 2, X86_BR_CALL = 4, X86_BR_RET = 8, X86_BR_SYSCALL = 16, X86_BR_SYSRET = 32, X86_BR_INT = 64, X86_BR_IRET = 128, X86_BR_JCC = 256, X86_BR_JMP = 512, X86_BR_IRQ = 1024, X86_BR_IND_CALL = 2048, X86_BR_ABORT = 4096, X86_BR_IN_TX = 8192, X86_BR_NO_TX = 16384, X86_BR_ZERO_CALL = 32768, X86_BR_CALL_STACK = 65536, X86_BR_IND_JMP = 131072, X86_BR_TYPE_SAVE = 262144, }; enum { X86_IRQ_ALLOC_LEGACY = 1, }; enum { X86_PERF_KFREE_SHARED = 0, X86_PERF_KFREE_EXCL = 1, X86_PERF_KFREE_MAX = 2, }; enum { XA_CHECK_SCHED = 4096, }; enum { XDP_ATTACHED_NONE = 0, XDP_ATTACHED_DRV = 1, XDP_ATTACHED_SKB = 2, XDP_ATTACHED_HW = 3, XDP_ATTACHED_MULTI = 4, }; enum { XDP_DIAG_NONE = 0, XDP_DIAG_INFO = 1, XDP_DIAG_UID = 2, XDP_DIAG_RX_RING = 3, XDP_DIAG_TX_RING = 4, XDP_DIAG_UMEM = 5, XDP_DIAG_UMEM_FILL_RING = 6, XDP_DIAG_UMEM_COMPLETION_RING = 7, XDP_DIAG_MEMINFO = 8, XDP_DIAG_STATS = 9, __XDP_DIAG_MAX = 10, }; enum { XFRM_DEV_OFFLOAD_IN = 1, XFRM_DEV_OFFLOAD_OUT = 2, XFRM_DEV_OFFLOAD_FWD = 3, }; enum { XFRM_DEV_OFFLOAD_UNSPECIFIED = 0, XFRM_DEV_OFFLOAD_CRYPTO = 1, XFRM_DEV_OFFLOAD_PACKET = 2, }; enum { XFRM_LOOKUP_ICMP = 1, XFRM_LOOKUP_QUEUE = 2, XFRM_LOOKUP_KEEP_DST_REF = 4, }; enum { XFRM_MODE_FLAG_TUNNEL = 1, }; enum { XFRM_MSG_BASE = 16, XFRM_MSG_NEWSA = 16, XFRM_MSG_DELSA = 17, XFRM_MSG_GETSA = 18, XFRM_MSG_NEWPOLICY = 19, XFRM_MSG_DELPOLICY = 20, XFRM_MSG_GETPOLICY = 21, XFRM_MSG_ALLOCSPI = 22, XFRM_MSG_ACQUIRE = 23, XFRM_MSG_EXPIRE = 24, XFRM_MSG_UPDPOLICY = 25, XFRM_MSG_UPDSA = 26, XFRM_MSG_POLEXPIRE = 27, XFRM_MSG_FLUSHSA = 28, XFRM_MSG_FLUSHPOLICY = 29, XFRM_MSG_NEWAE = 30, XFRM_MSG_GETAE = 31, XFRM_MSG_REPORT = 32, XFRM_MSG_MIGRATE = 33, XFRM_MSG_NEWSADINFO = 34, XFRM_MSG_GETSADINFO = 35, XFRM_MSG_NEWSPDINFO = 36, XFRM_MSG_GETSPDINFO = 37, XFRM_MSG_MAPPING = 38, XFRM_MSG_SETDEFAULT = 39, XFRM_MSG_GETDEFAULT = 40, __XFRM_MSG_MAX = 41, }; enum { XFRM_POLICY_IN = 0, XFRM_POLICY_OUT = 1, XFRM_POLICY_FWD = 2, XFRM_POLICY_MASK = 3, XFRM_POLICY_MAX = 3, }; enum { XFRM_POLICY_TYPE_MAIN = 0, XFRM_POLICY_TYPE_SUB = 1, XFRM_POLICY_TYPE_MAX = 2, XFRM_POLICY_TYPE_ANY = 255, }; enum { XFRM_SHARE_ANY = 0, XFRM_SHARE_SESSION = 1, XFRM_SHARE_USER = 2, XFRM_SHARE_UNIQUE = 3, }; enum { XFRM_STATE_VOID = 0, XFRM_STATE_ACQ = 1, XFRM_STATE_VALID = 2, XFRM_STATE_ERROR = 3, XFRM_STATE_EXPIRED = 4, XFRM_STATE_DEAD = 5, }; enum { XT_CONNMARK_SET = 0, XT_CONNMARK_SAVE = 1, XT_CONNMARK_RESTORE = 2, }; enum { XT_CT_NOTRACK = 1, XT_CT_NOTRACK_ALIAS = 2, XT_CT_ZONE_DIR_ORIG = 4, XT_CT_ZONE_DIR_REPL = 8, XT_CT_ZONE_MARK = 16, XT_CT_MASK = 31, }; enum { ZONELIST_FALLBACK = 0, ZONELIST_NOFALLBACK = 1, MAX_ZONELISTS = 2, }; enum { _IRQ_DEFAULT_INIT_FLAGS = 0, _IRQ_PER_CPU = 512, _IRQ_LEVEL = 256, _IRQ_NOPROBE = 1024, _IRQ_NOREQUEST = 2048, _IRQ_NOTHREAD = 65536, _IRQ_NOAUTOEN = 4096, _IRQ_MOVE_PCNTXT = 16384, _IRQ_NO_BALANCING = 8192, _IRQ_NESTED_THREAD = 32768, _IRQ_PER_CPU_DEVID = 131072, _IRQ_IS_POLLED = 262144, _IRQ_DISABLE_UNLAZY = 524288, _IRQ_HIDDEN = 1048576, _IRQ_NO_DEBUG = 2097152, _IRQF_MODIFY_MASK = 2096911, }; enum { __ND_OPT_PREFIX_INFO_END = 0, ND_OPT_SOURCE_LL_ADDR = 1, ND_OPT_TARGET_LL_ADDR = 2, ND_OPT_PREFIX_INFO = 3, ND_OPT_REDIRECT_HDR = 4, ND_OPT_MTU = 5, ND_OPT_NONCE = 14, __ND_OPT_ARRAY_MAX = 15, ND_OPT_ROUTE_INFO = 24, ND_OPT_RDNSS = 25, ND_OPT_DNSSL = 31, ND_OPT_6CO = 34, ND_OPT_CAPTIVE_PORTAL = 37, ND_OPT_PREF64 = 38, __ND_OPT_MAX = 39, }; enum { __PERCPU_REF_ATOMIC = 1, __PERCPU_REF_DEAD = 2, __PERCPU_REF_ATOMIC_DEAD = 3, __PERCPU_REF_FLAG_BITS = 2, }; enum { __RQF_STARTED = 0, __RQF_FLUSH_SEQ = 1, __RQF_MIXED_MERGE = 2, __RQF_DONTPREP = 3, __RQF_SCHED_TAGS = 4, __RQF_USE_SCHED = 5, __RQF_FAILED = 6, __RQF_QUIET = 7, __RQF_IO_STAT = 8, __RQF_PM = 9, __RQF_HASHED = 10, __RQF_STATS = 11, __RQF_SPECIAL_PAYLOAD = 12, __RQF_ZONE_WRITE_PLUGGING = 13, __RQF_TIMED_OUT = 14, __RQF_RESV = 15, __RQF_BITS = 16, }; enum { __SCHED_FEAT_PLACE_LAG = 0, __SCHED_FEAT_PLACE_DEADLINE_INITIAL = 1, __SCHED_FEAT_PLACE_REL_DEADLINE = 2, __SCHED_FEAT_RUN_TO_PARITY = 3, __SCHED_FEAT_PREEMPT_SHORT = 4, __SCHED_FEAT_NEXT_BUDDY = 5, __SCHED_FEAT_CACHE_HOT_BUDDY = 6, __SCHED_FEAT_DELAY_DEQUEUE = 7, __SCHED_FEAT_DELAY_ZERO = 8, __SCHED_FEAT_WAKEUP_PREEMPTION = 9, __SCHED_FEAT_HRTICK = 10, __SCHED_FEAT_HRTICK_DL = 11, __SCHED_FEAT_DOUBLE_TICK = 12, __SCHED_FEAT_NONTASK_CAPACITY = 13, __SCHED_FEAT_TTWU_QUEUE = 14, __SCHED_FEAT_SIS_UTIL = 15, __SCHED_FEAT_WARN_DOUBLE_CLOCK = 16, __SCHED_FEAT_RT_PUSH_IPI = 17, __SCHED_FEAT_RT_RUNTIME_SHARE = 18, __SCHED_FEAT_LB_MIN = 19, __SCHED_FEAT_ATTACH_AGE_LOAD = 20, __SCHED_FEAT_WA_IDLE = 21, __SCHED_FEAT_WA_WEIGHT = 22, __SCHED_FEAT_WA_BIAS = 23, __SCHED_FEAT_UTIL_EST = 24, __SCHED_FEAT_LATENCY_WARN = 25, __SCHED_FEAT_NR = 26, }; enum { __SD_BALANCE_NEWIDLE = 0, __SD_BALANCE_EXEC = 1, __SD_BALANCE_FORK = 2, __SD_BALANCE_WAKE = 3, __SD_WAKE_AFFINE = 4, __SD_ASYM_CPUCAPACITY = 5, __SD_ASYM_CPUCAPACITY_FULL = 6, __SD_SHARE_CPUCAPACITY = 7, __SD_CLUSTER = 8, __SD_SHARE_LLC = 9, __SD_SERIALIZE = 10, __SD_ASYM_PACKING = 11, __SD_PREFER_SIBLING = 12, __SD_OVERLAP = 13, __SD_NUMA = 14, __SD_FLAG_CNT = 15, }; enum { ___GFP_DMA_BIT = 0, ___GFP_HIGHMEM_BIT = 1, ___GFP_DMA32_BIT = 2, ___GFP_MOVABLE_BIT = 3, ___GFP_RECLAIMABLE_BIT = 4, ___GFP_HIGH_BIT = 5, ___GFP_IO_BIT = 6, ___GFP_FS_BIT = 7, ___GFP_ZERO_BIT = 8, ___GFP_UNUSED_BIT = 9, ___GFP_DIRECT_RECLAIM_BIT = 10, ___GFP_KSWAPD_RECLAIM_BIT = 11, ___GFP_WRITE_BIT = 12, ___GFP_NOWARN_BIT = 13, ___GFP_RETRY_MAYFAIL_BIT = 14, ___GFP_NOFAIL_BIT = 15, ___GFP_NORETRY_BIT = 16, ___GFP_MEMALLOC_BIT = 17, ___GFP_COMP_BIT = 18, ___GFP_NOMEMALLOC_BIT = 19, ___GFP_HARDWALL_BIT = 20, ___GFP_THISNODE_BIT = 21, ___GFP_ACCOUNT_BIT = 22, ___GFP_ZEROTAGS_BIT = 23, ___GFP_NOLOCKDEP_BIT = 24, ___GFP_NO_OBJ_EXT_BIT = 25, ___GFP_LAST_BIT = 26, }; enum { __ctx_convertBPF_PROG_TYPE_SOCKET_FILTER = 0, __ctx_convertBPF_PROG_TYPE_SCHED_CLS = 1, __ctx_convertBPF_PROG_TYPE_SCHED_ACT = 2, __ctx_convertBPF_PROG_TYPE_XDP = 3, __ctx_convertBPF_PROG_TYPE_CGROUP_SKB = 4, __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK = 5, __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK_ADDR = 6, __ctx_convertBPF_PROG_TYPE_LWT_IN = 7, __ctx_convertBPF_PROG_TYPE_LWT_OUT = 8, __ctx_convertBPF_PROG_TYPE_LWT_XMIT = 9, __ctx_convertBPF_PROG_TYPE_LWT_SEG6LOCAL = 10, __ctx_convertBPF_PROG_TYPE_SOCK_OPS = 11, __ctx_convertBPF_PROG_TYPE_SK_SKB = 12, __ctx_convertBPF_PROG_TYPE_SK_MSG = 13, __ctx_convertBPF_PROG_TYPE_FLOW_DISSECTOR = 14, __ctx_convertBPF_PROG_TYPE_KPROBE = 15, __ctx_convertBPF_PROG_TYPE_TRACEPOINT = 16, __ctx_convertBPF_PROG_TYPE_PERF_EVENT = 17, __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT = 18, __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 19, __ctx_convertBPF_PROG_TYPE_TRACING = 20, __ctx_convertBPF_PROG_TYPE_CGROUP_DEVICE = 21, __ctx_convertBPF_PROG_TYPE_CGROUP_SYSCTL = 22, __ctx_convertBPF_PROG_TYPE_CGROUP_SOCKOPT = 23, __ctx_convertBPF_PROG_TYPE_LIRC_MODE2 = 24, __ctx_convertBPF_PROG_TYPE_SK_REUSEPORT = 25, __ctx_convertBPF_PROG_TYPE_SK_LOOKUP = 26, __ctx_convertBPF_PROG_TYPE_STRUCT_OPS = 27, __ctx_convertBPF_PROG_TYPE_EXT = 28, __ctx_convertBPF_PROG_TYPE_LSM = 29, __ctx_convertBPF_PROG_TYPE_SYSCALL = 30, __ctx_convertBPF_PROG_TYPE_NETFILTER = 31, __ctx_convert_unused = 32, }; enum { attr_noop = 0, attr_delayed_allocation_blocks = 1, attr_session_write_kbytes = 2, attr_lifetime_write_kbytes = 3, attr_reserved_clusters = 4, attr_sra_exceeded_retry_limit = 5, attr_inode_readahead = 6, attr_trigger_test_error = 7, attr_first_error_time = 8, attr_last_error_time = 9, attr_clusters_in_group = 10, attr_mb_order = 11, attr_feature = 12, attr_pointer_pi = 13, attr_pointer_ui = 14, attr_pointer_ul = 15, attr_pointer_u64 = 16, attr_pointer_u8 = 17, attr_pointer_string = 18, attr_pointer_atomic = 19, attr_journal_task = 20, }; enum { blank_off = 0, blank_normal_wait = 1, blank_vesa_wait = 2, }; enum { cpuset = 0, possible = 1, fail = 2, }; enum { dns_key_data = 0, dns_key_error = 1, }; enum { false = 0, true = 1, }; enum { mask_exec = 0, mask_write = 1, mask_read = 2, mask_append = 3, }; enum { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3, }; enum { preempt_dynamic_undefined = -1, preempt_dynamic_none = 0, preempt_dynamic_voluntary = 1, preempt_dynamic_full = 2, }; enum { ptr_explicit = 0, ptr_ext4_sb_info_offset = 1, ptr_ext4_super_block_offset = 2, }; enum { st_wordstart = 0, st_wordcmp = 1, st_wordskip = 2, st_bufcpy = 3, }; enum { st_wordstart___2 = 0, st_wordcmp___2 = 1, st_wordskip___2 = 2, }; enum { x86_lbr_exclusive_lbr = 0, x86_lbr_exclusive_bts = 1, x86_lbr_exclusive_pt = 2, x86_lbr_exclusive_max = 3, }; typedef enum { BIT_DStream_unfinished = 0, BIT_DStream_endOfBuffer = 1, BIT_DStream_completed = 2, BIT_DStream_overflow = 3, } BIT_DStream_status; typedef enum { ZSTD_error_no_error = 0, ZSTD_error_GENERIC = 1, ZSTD_error_prefix_unknown = 10, ZSTD_error_version_unsupported = 12, ZSTD_error_frameParameter_unsupported = 14, ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, ZSTD_error_workSpace_tooSmall = 66, ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, ZSTD_error_dstBuffer_wrong = 104, ZSTD_error_srcBuffer_wrong = 105, ZSTD_error_maxCode = 120, } ZSTD_ErrorCode; typedef ZSTD_ErrorCode ERR_enum; typedef enum { ZSTD_reset_session_only = 1, ZSTD_reset_parameters = 2, ZSTD_reset_session_and_parameters = 3, } ZSTD_ResetDirective; typedef enum { ZSTD_bm_buffered = 0, ZSTD_bm_stable = 1, } ZSTD_bufferMode_e; typedef enum { ZSTD_d_windowLogMax = 100, ZSTD_d_experimentalParam1 = 1000, ZSTD_d_experimentalParam2 = 1001, ZSTD_d_experimentalParam3 = 1002, ZSTD_d_experimentalParam4 = 1003, } ZSTD_dParameter; typedef enum { ZSTDds_getFrameHeaderSize = 0, ZSTDds_decodeFrameHeader = 1, ZSTDds_decodeBlockHeader = 2, ZSTDds_decompressBlock = 3, ZSTDds_decompressLastBlock = 4, ZSTDds_checkChecksum = 5, ZSTDds_decodeSkippableHeader = 6, ZSTDds_skipFrame = 7, } ZSTD_dStage; typedef enum { zdss_init = 0, zdss_loadHeader = 1, zdss_read = 2, zdss_load = 3, zdss_flush = 4, } ZSTD_dStreamStage; typedef enum { ZSTD_dct_auto = 0, ZSTD_dct_rawContent = 1, ZSTD_dct_fullDict = 2, } ZSTD_dictContentType_e; typedef enum { ZSTD_dlm_byCopy = 0, ZSTD_dlm_byRef = 1, } ZSTD_dictLoadMethod_e; typedef enum { ZSTD_use_indefinitely = -1, ZSTD_dont_use = 0, ZSTD_use_once = 1, } ZSTD_dictUses_e; typedef enum { ZSTD_d_validateChecksum = 0, ZSTD_d_ignoreChecksum = 1, } ZSTD_forceIgnoreChecksum_e; typedef enum { ZSTD_f_zstd1 = 0, ZSTD_f_zstd1_magicless = 1, } ZSTD_format_e; typedef enum { ZSTD_frame = 0, ZSTD_skippableFrame = 1, } ZSTD_frameType_e; typedef enum { ZSTD_not_in_dst = 0, ZSTD_in_dst = 1, ZSTD_split = 2, } ZSTD_litLocation_e; typedef enum { ZSTD_lo_isRegularOffset = 0, ZSTD_lo_isLongOffset = 1, } ZSTD_longOffset_e; typedef enum { ZSTDnit_frameHeader = 0, ZSTDnit_blockHeader = 1, ZSTDnit_block = 2, ZSTDnit_lastBlock = 3, ZSTDnit_checksum = 4, ZSTDnit_skippableFrame = 5, } ZSTD_nextInputType_e; typedef enum { ZSTD_no_overlap = 0, ZSTD_overlap_src_before_dst = 1, } ZSTD_overlap_e; typedef enum { ZSTD_rmd_refSingleDDict = 0, ZSTD_rmd_refMultipleDDicts = 1, } ZSTD_refMultipleDDicts_e; typedef enum { OSL_GLOBAL_LOCK_HANDLER = 0, OSL_NOTIFY_HANDLER = 1, OSL_GPE_HANDLER = 2, OSL_DEBUGGER_MAIN_THREAD = 3, OSL_DEBUGGER_EXEC_THREAD = 4, OSL_EC_POLL_HANDLER = 5, OSL_EC_BURST_HANDLER = 6, } acpi_execute_type; typedef enum { ACPI_IMODE_LOAD_PASS1 = 1, ACPI_IMODE_LOAD_PASS2 = 2, ACPI_IMODE_EXECUTE = 3, } acpi_interpreter_mode; typedef enum { ACPI_TRACE_AML_METHOD = 0, ACPI_TRACE_AML_OPCODE = 1, ACPI_TRACE_AML_REGION = 2, } acpi_trace_event_type; typedef enum { bt_raw = 0, bt_rle = 1, bt_compressed = 2, bt_reserved = 3, } blockType_e; typedef enum { AD_CHURN_MONITOR = 0, AD_CHURN = 1, AD_NO_CHURN = 2, } churn_state_t; typedef enum { CODES = 0, LENS = 1, DISTS = 2, } codetype; typedef enum { FILE_MEMORY_MIGRATE = 0, FILE_CPULIST = 1, FILE_MEMLIST = 2, FILE_EFFECTIVE_CPULIST = 3, FILE_EFFECTIVE_MEMLIST = 4, FILE_SUBPARTS_CPULIST = 5, FILE_EXCLUSIVE_CPULIST = 6, FILE_EFFECTIVE_XCPULIST = 7, FILE_ISOLATED_CPULIST = 8, FILE_CPU_EXCLUSIVE = 9, FILE_MEM_EXCLUSIVE = 10, FILE_MEM_HARDWALL = 11, FILE_SCHED_LOAD_BALANCE = 12, FILE_PARTITION_ROOT = 13, FILE_SCHED_RELAX_DOMAIN_LEVEL = 14, FILE_MEMORY_PRESSURE_ENABLED = 15, FILE_MEMORY_PRESSURE = 16, FILE_SPREAD_PAGE = 17, FILE_SPREAD_SLAB = 18, } cpuset_filetype_t; typedef enum { CS_ONLINE = 0, CS_CPU_EXCLUSIVE = 1, CS_MEM_EXCLUSIVE = 2, CS_MEM_HARDWALL = 3, CS_MEMORY_MIGRATE = 4, CS_SCHED_LOAD_BALANCE = 5, CS_SPREAD_PAGE = 6, CS_SPREAD_SLAB = 7, } cpuset_flagbits_t; typedef enum { noDict = 0, withPrefix64k = 1, usingExtDict = 2, } dict_directive; typedef enum { EITHER = 0, INDEX = 1, DIRENT = 2, DIRENT_HTREE = 3, } dirblock_type_t; typedef enum { decode_full_block = 0, partial_decode = 1, } earlyEnd_directive; typedef enum { endOnOutputSize = 0, endOnInputSize = 1, } endCondition_directive; typedef enum { EXT4_IGET_NORMAL = 0, EXT4_IGET_SPECIAL = 1, EXT4_IGET_HANDLE = 2, EXT4_IGET_BAD = 4, EXT4_IGET_EA_INODE = 8, } ext4_iget_flags; typedef enum { HEAD = 0, FLAGS = 1, TIME = 2, OS = 3, EXLEN = 4, EXTRA = 5, NAME = 6, COMMENT = 7, HCRC = 8, DICTID = 9, DICT = 10, TYPE = 11, TYPEDO = 12, STORED = 13, COPY = 14, TABLE = 15, LENLENS = 16, CODELENS = 17, LEN = 18, LENEXT = 19, DIST = 20, DISTEXT = 21, MATCH = 22, LIT = 23, CHECK = 24, LENGTH = 25, DONE = 26, BAD = 27, MEM = 28, SYNC = 29, } inflate_mode; typedef enum { ISOLATE_ABORT = 0, ISOLATE_NONE = 1, ISOLATE_SUCCESS = 2, } isolate_migrate_t; typedef enum { AD_MUX_DUMMY = 0, AD_MUX_DETACHED = 1, AD_MUX_WAITING = 2, AD_MUX_ATTACHED = 3, AD_MUX_COLLECTING = 4, AD_MUX_DISTRIBUTING = 5, AD_MUX_COLLECTING_DISTRIBUTING = 6, } mux_states_t; typedef enum { PAGE_KEEP = 0, PAGE_ACTIVATE = 1, PAGE_SUCCESS = 2, PAGE_CLEAN = 3, } pageout_t; typedef enum { AD_PERIODIC_DUMMY = 0, AD_NO_PERIODIC = 1, AD_FAST_PERIODIC = 2, AD_SLOW_PERIODIC = 3, AD_PERIODIC_TX = 4, } periodic_states_t; typedef enum { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_INTERNAL = 1, PHY_INTERFACE_MODE_MII = 2, PHY_INTERFACE_MODE_GMII = 3, PHY_INTERFACE_MODE_SGMII = 4, PHY_INTERFACE_MODE_TBI = 5, PHY_INTERFACE_MODE_REVMII = 6, PHY_INTERFACE_MODE_RMII = 7, PHY_INTERFACE_MODE_REVRMII = 8, PHY_INTERFACE_MODE_RGMII = 9, PHY_INTERFACE_MODE_RGMII_ID = 10, PHY_INTERFACE_MODE_RGMII_RXID = 11, PHY_INTERFACE_MODE_RGMII_TXID = 12, PHY_INTERFACE_MODE_RTBI = 13, PHY_INTERFACE_MODE_SMII = 14, PHY_INTERFACE_MODE_XGMII = 15, PHY_INTERFACE_MODE_XLGMII = 16, PHY_INTERFACE_MODE_MOCA = 17, PHY_INTERFACE_MODE_PSGMII = 18, PHY_INTERFACE_MODE_QSGMII = 19, PHY_INTERFACE_MODE_TRGMII = 20, PHY_INTERFACE_MODE_100BASEX = 21, PHY_INTERFACE_MODE_1000BASEX = 22, PHY_INTERFACE_MODE_2500BASEX = 23, PHY_INTERFACE_MODE_5GBASER = 24, PHY_INTERFACE_MODE_RXAUI = 25, PHY_INTERFACE_MODE_XAUI = 26, PHY_INTERFACE_MODE_10GBASER = 27, PHY_INTERFACE_MODE_25GBASER = 28, PHY_INTERFACE_MODE_USXGMII = 29, PHY_INTERFACE_MODE_10GKR = 30, PHY_INTERFACE_MODE_QUSGMII = 31, PHY_INTERFACE_MODE_1000BASEKX = 32, PHY_INTERFACE_MODE_10G_QXGMII = 33, PHY_INTERFACE_MODE_MAX = 34, } phy_interface_t; typedef enum { PSMOUSE_BAD_DATA = 0, PSMOUSE_GOOD_DATA = 1, PSMOUSE_FULL_PACKET = 2, } psmouse_ret_t; typedef enum { AD_RX_DUMMY = 0, AD_RX_INITIALIZE = 1, AD_RX_PORT_DISABLED = 2, AD_RX_LACP_DISABLED = 3, AD_RX_EXPIRED = 4, AD_RX_DEFAULTED = 5, AD_RX_CURRENT = 6, } rx_states_t; typedef enum { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4, } socket_state; typedef enum { not_streaming = 0, is_streaming = 1, } streaming_operation; typedef enum { set_basic = 0, set_rle = 1, set_compressed = 2, set_repeat = 3, } symbolEncodingType_e; typedef enum { AD_TX_DUMMY = 0, AD_TRANSMIT = 1, } tx_states_t; typedef ZSTD_ErrorCode zstd_error_code; enum CSI_J { CSI_J_CURSOR_TO_END = 0, CSI_J_START_TO_CURSOR = 1, CSI_J_VISIBLE = 2, CSI_J_FULL = 3, }; enum CSI_right_square_bracket { CSI_RSB_COLOR_FOR_UNDERLINE = 1, CSI_RSB_COLOR_FOR_HALF_BRIGHT = 2, CSI_RSB_MAKE_CUR_COLOR_DEFAULT = 8, CSI_RSB_BLANKING_INTERVAL = 9, CSI_RSB_BELL_FREQUENCY = 10, CSI_RSB_BELL_DURATION = 11, CSI_RSB_BRING_CONSOLE_TO_FRONT = 12, CSI_RSB_UNBLANK = 13, CSI_RSB_VESA_OFF_INTERVAL = 14, CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT = 15, CSI_RSB_CURSOR_BLINK_INTERVAL = 16, }; enum KTHREAD_BITS { KTHREAD_IS_PER_CPU = 0, KTHREAD_SHOULD_STOP = 1, KTHREAD_SHOULD_PARK = 2, }; enum OID { OID_id_dsa_with_sha1 = 0, OID_id_dsa = 1, OID_id_ecPublicKey = 2, OID_id_prime192v1 = 3, OID_id_prime256v1 = 4, OID_id_ecdsa_with_sha1 = 5, OID_id_ecdsa_with_sha224 = 6, OID_id_ecdsa_with_sha256 = 7, OID_id_ecdsa_with_sha384 = 8, OID_id_ecdsa_with_sha512 = 9, OID_rsaEncryption = 10, OID_sha1WithRSAEncryption = 11, OID_sha256WithRSAEncryption = 12, OID_sha384WithRSAEncryption = 13, OID_sha512WithRSAEncryption = 14, OID_sha224WithRSAEncryption = 15, OID_data = 16, OID_signed_data = 17, OID_email_address = 18, OID_contentType = 19, OID_messageDigest = 20, OID_signingTime = 21, OID_smimeCapabilites = 22, OID_smimeAuthenticatedAttrs = 23, OID_mskrb5 = 24, OID_krb5 = 25, OID_krb5u2u = 26, OID_msIndirectData = 27, OID_msStatementType = 28, OID_msSpOpusInfo = 29, OID_msPeImageDataObjId = 30, OID_msIndividualSPKeyPurpose = 31, OID_msOutlookExpress = 32, OID_ntlmssp = 33, OID_negoex = 34, OID_spnego = 35, OID_IAKerb = 36, OID_PKU2U = 37, OID_Scram = 38, OID_certAuthInfoAccess = 39, OID_sha1 = 40, OID_id_ansip384r1 = 41, OID_id_ansip521r1 = 42, OID_sha256 = 43, OID_sha384 = 44, OID_sha512 = 45, OID_sha224 = 46, OID_commonName = 47, OID_surname = 48, OID_countryName = 49, OID_locality = 50, OID_stateOrProvinceName = 51, OID_organizationName = 52, OID_organizationUnitName = 53, OID_title = 54, OID_description = 55, OID_name = 56, OID_givenName = 57, OID_initials = 58, OID_generationalQualifier = 59, OID_subjectKeyIdentifier = 60, OID_keyUsage = 61, OID_subjectAltName = 62, OID_issuerAltName = 63, OID_basicConstraints = 64, OID_crlDistributionPoints = 65, OID_certPolicies = 66, OID_authorityKeyIdentifier = 67, OID_extKeyUsage = 68, OID_NetlogonMechanism = 69, OID_appleLocalKdcSupported = 70, OID_gostCPSignA = 71, OID_gostCPSignB = 72, OID_gostCPSignC = 73, OID_gost2012PKey256 = 74, OID_gost2012PKey512 = 75, OID_gost2012Digest256 = 76, OID_gost2012Digest512 = 77, OID_gost2012Signature256 = 78, OID_gost2012Signature512 = 79, OID_gostTC26Sign256A = 80, OID_gostTC26Sign256B = 81, OID_gostTC26Sign256C = 82, OID_gostTC26Sign256D = 83, OID_gostTC26Sign512A = 84, OID_gostTC26Sign512B = 85, OID_gostTC26Sign512C = 86, OID_sm2 = 87, OID_sm3 = 88, OID_SM2_with_SM3 = 89, OID_sm3WithRSAEncryption = 90, OID_TPMLoadableKey = 91, OID_TPMImportableKey = 92, OID_TPMSealedData = 93, OID_sha3_256 = 94, OID_sha3_384 = 95, OID_sha3_512 = 96, OID_id_ecdsa_with_sha3_256 = 97, OID_id_ecdsa_with_sha3_384 = 98, OID_id_ecdsa_with_sha3_512 = 99, OID_id_rsassa_pkcs1_v1_5_with_sha3_256 = 100, OID_id_rsassa_pkcs1_v1_5_with_sha3_384 = 101, OID_id_rsassa_pkcs1_v1_5_with_sha3_512 = 102, OID__NR = 103, }; enum P4_ESCR_EMASKS { P4_EVENT_TC_DELIVER_MODE__DD = 512, P4_EVENT_TC_DELIVER_MODE__DB = 1024, P4_EVENT_TC_DELIVER_MODE__DI = 2048, P4_EVENT_TC_DELIVER_MODE__BD = 4096, P4_EVENT_TC_DELIVER_MODE__BB = 8192, P4_EVENT_TC_DELIVER_MODE__BI = 16384, P4_EVENT_TC_DELIVER_MODE__ID = 32768, P4_EVENT_BPU_FETCH_REQUEST__TCMISS = 512, P4_EVENT_ITLB_REFERENCE__HIT = 512, P4_EVENT_ITLB_REFERENCE__MISS = 1024, P4_EVENT_ITLB_REFERENCE__HIT_UK = 2048, P4_EVENT_MEMORY_CANCEL__ST_RB_FULL = 2048, P4_EVENT_MEMORY_CANCEL__64K_CONF = 4096, P4_EVENT_MEMORY_COMPLETE__LSC = 512, P4_EVENT_MEMORY_COMPLETE__SSC = 1024, P4_EVENT_LOAD_PORT_REPLAY__SPLIT_LD = 1024, P4_EVENT_STORE_PORT_REPLAY__SPLIT_ST = 1024, P4_EVENT_MOB_LOAD_REPLAY__NO_STA = 1024, P4_EVENT_MOB_LOAD_REPLAY__NO_STD = 4096, P4_EVENT_MOB_LOAD_REPLAY__PARTIAL_DATA = 8192, P4_EVENT_MOB_LOAD_REPLAY__UNALGN_ADDR = 16384, P4_EVENT_PAGE_WALK_TYPE__DTMISS = 512, P4_EVENT_PAGE_WALK_TYPE__ITMISS = 1024, P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITS = 512, P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITE = 1024, P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITM = 2048, P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITS = 4096, P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITE = 8192, P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITM = 16384, P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_MISS = 131072, P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_MISS = 262144, P4_EVENT_BSQ_CACHE_REFERENCE__WR_2ndL_MISS = 524288, P4_EVENT_IOQ_ALLOCATION__DEFAULT = 512, P4_EVENT_IOQ_ALLOCATION__ALL_READ = 16384, P4_EVENT_IOQ_ALLOCATION__ALL_WRITE = 32768, P4_EVENT_IOQ_ALLOCATION__MEM_UC = 65536, P4_EVENT_IOQ_ALLOCATION__MEM_WC = 131072, P4_EVENT_IOQ_ALLOCATION__MEM_WT = 262144, P4_EVENT_IOQ_ALLOCATION__MEM_WP = 524288, P4_EVENT_IOQ_ALLOCATION__MEM_WB = 1048576, P4_EVENT_IOQ_ALLOCATION__OWN = 4194304, P4_EVENT_IOQ_ALLOCATION__OTHER = 8388608, P4_EVENT_IOQ_ALLOCATION__PREFETCH = 16777216, P4_EVENT_IOQ_ACTIVE_ENTRIES__DEFAULT = 512, P4_EVENT_IOQ_ACTIVE_ENTRIES__ALL_READ = 16384, P4_EVENT_IOQ_ACTIVE_ENTRIES__ALL_WRITE = 32768, P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_UC = 65536, P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WC = 131072, P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WT = 262144, P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WP = 524288, P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WB = 1048576, P4_EVENT_IOQ_ACTIVE_ENTRIES__OWN = 4194304, P4_EVENT_IOQ_ACTIVE_ENTRIES__OTHER = 8388608, P4_EVENT_IOQ_ACTIVE_ENTRIES__PREFETCH = 16777216, P4_EVENT_FSB_DATA_ACTIVITY__DRDY_DRV = 512, P4_EVENT_FSB_DATA_ACTIVITY__DRDY_OWN = 1024, P4_EVENT_FSB_DATA_ACTIVITY__DRDY_OTHER = 2048, P4_EVENT_FSB_DATA_ACTIVITY__DBSY_DRV = 4096, P4_EVENT_FSB_DATA_ACTIVITY__DBSY_OWN = 8192, P4_EVENT_FSB_DATA_ACTIVITY__DBSY_OTHER = 16384, P4_EVENT_BSQ_ALLOCATION__REQ_TYPE0 = 512, P4_EVENT_BSQ_ALLOCATION__REQ_TYPE1 = 1024, P4_EVENT_BSQ_ALLOCATION__REQ_LEN0 = 2048, P4_EVENT_BSQ_ALLOCATION__REQ_LEN1 = 4096, P4_EVENT_BSQ_ALLOCATION__REQ_IO_TYPE = 16384, P4_EVENT_BSQ_ALLOCATION__REQ_LOCK_TYPE = 32768, P4_EVENT_BSQ_ALLOCATION__REQ_CACHE_TYPE = 65536, P4_EVENT_BSQ_ALLOCATION__REQ_SPLIT_TYPE = 131072, P4_EVENT_BSQ_ALLOCATION__REQ_DEM_TYPE = 262144, P4_EVENT_BSQ_ALLOCATION__REQ_ORD_TYPE = 524288, P4_EVENT_BSQ_ALLOCATION__MEM_TYPE0 = 1048576, P4_EVENT_BSQ_ALLOCATION__MEM_TYPE1 = 2097152, P4_EVENT_BSQ_ALLOCATION__MEM_TYPE2 = 4194304, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_TYPE0 = 512, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_TYPE1 = 1024, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LEN0 = 2048, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LEN1 = 4096, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_IO_TYPE = 16384, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LOCK_TYPE = 32768, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_CACHE_TYPE = 65536, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_SPLIT_TYPE = 131072, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_DEM_TYPE = 262144, P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_ORD_TYPE = 524288, P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE0 = 1048576, P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE1 = 2097152, P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE2 = 4194304, P4_EVENT_SSE_INPUT_ASSIST__ALL = 16777216, P4_EVENT_PACKED_SP_UOP__ALL = 16777216, P4_EVENT_PACKED_DP_UOP__ALL = 16777216, P4_EVENT_SCALAR_SP_UOP__ALL = 16777216, P4_EVENT_SCALAR_DP_UOP__ALL = 16777216, P4_EVENT_64BIT_MMX_UOP__ALL = 16777216, P4_EVENT_128BIT_MMX_UOP__ALL = 16777216, P4_EVENT_X87_FP_UOP__ALL = 16777216, P4_EVENT_TC_MISC__FLUSH = 8192, P4_EVENT_GLOBAL_POWER_EVENTS__RUNNING = 512, P4_EVENT_TC_MS_XFER__CISC = 512, P4_EVENT_UOP_QUEUE_WRITES__FROM_TC_BUILD = 512, P4_EVENT_UOP_QUEUE_WRITES__FROM_TC_DELIVER = 1024, P4_EVENT_UOP_QUEUE_WRITES__FROM_ROM = 2048, P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__CONDITIONAL = 1024, P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__CALL = 2048, P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__RETURN = 4096, P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__INDIRECT = 8192, P4_EVENT_RETIRED_BRANCH_TYPE__CONDITIONAL = 1024, P4_EVENT_RETIRED_BRANCH_TYPE__CALL = 2048, P4_EVENT_RETIRED_BRANCH_TYPE__RETURN = 4096, P4_EVENT_RETIRED_BRANCH_TYPE__INDIRECT = 8192, P4_EVENT_RESOURCE_STALL__SBFULL = 16384, P4_EVENT_WC_BUFFER__WCB_EVICTS = 512, P4_EVENT_WC_BUFFER__WCB_FULL_EVICTS = 1024, P4_EVENT_FRONT_END_EVENT__NBOGUS = 512, P4_EVENT_FRONT_END_EVENT__BOGUS = 1024, P4_EVENT_EXECUTION_EVENT__NBOGUS0 = 512, P4_EVENT_EXECUTION_EVENT__NBOGUS1 = 1024, P4_EVENT_EXECUTION_EVENT__NBOGUS2 = 2048, P4_EVENT_EXECUTION_EVENT__NBOGUS3 = 4096, P4_EVENT_EXECUTION_EVENT__BOGUS0 = 8192, P4_EVENT_EXECUTION_EVENT__BOGUS1 = 16384, P4_EVENT_EXECUTION_EVENT__BOGUS2 = 32768, P4_EVENT_EXECUTION_EVENT__BOGUS3 = 65536, P4_EVENT_REPLAY_EVENT__NBOGUS = 512, P4_EVENT_REPLAY_EVENT__BOGUS = 1024, P4_EVENT_INSTR_RETIRED__NBOGUSNTAG = 512, P4_EVENT_INSTR_RETIRED__NBOGUSTAG = 1024, P4_EVENT_INSTR_RETIRED__BOGUSNTAG = 2048, P4_EVENT_INSTR_RETIRED__BOGUSTAG = 4096, P4_EVENT_UOPS_RETIRED__NBOGUS = 512, P4_EVENT_UOPS_RETIRED__BOGUS = 1024, P4_EVENT_UOP_TYPE__TAGLOADS = 1024, P4_EVENT_UOP_TYPE__TAGSTORES = 2048, P4_EVENT_BRANCH_RETIRED__MMNP = 512, P4_EVENT_BRANCH_RETIRED__MMNM = 1024, P4_EVENT_BRANCH_RETIRED__MMTP = 2048, P4_EVENT_BRANCH_RETIRED__MMTM = 4096, P4_EVENT_MISPRED_BRANCH_RETIRED__NBOGUS = 512, P4_EVENT_X87_ASSIST__FPSU = 512, P4_EVENT_X87_ASSIST__FPSO = 1024, P4_EVENT_X87_ASSIST__POAO = 2048, P4_EVENT_X87_ASSIST__POAU = 4096, P4_EVENT_X87_ASSIST__PREA = 8192, P4_EVENT_MACHINE_CLEAR__CLEAR = 512, P4_EVENT_MACHINE_CLEAR__MOCLEAR = 1024, P4_EVENT_MACHINE_CLEAR__SMCLEAR = 2048, P4_EVENT_INSTR_COMPLETED__NBOGUS = 512, P4_EVENT_INSTR_COMPLETED__BOGUS = 1024, }; enum P4_EVENTS { P4_EVENT_TC_DELIVER_MODE = 0, P4_EVENT_BPU_FETCH_REQUEST = 1, P4_EVENT_ITLB_REFERENCE = 2, P4_EVENT_MEMORY_CANCEL = 3, P4_EVENT_MEMORY_COMPLETE = 4, P4_EVENT_LOAD_PORT_REPLAY = 5, P4_EVENT_STORE_PORT_REPLAY = 6, P4_EVENT_MOB_LOAD_REPLAY = 7, P4_EVENT_PAGE_WALK_TYPE = 8, P4_EVENT_BSQ_CACHE_REFERENCE = 9, P4_EVENT_IOQ_ALLOCATION = 10, P4_EVENT_IOQ_ACTIVE_ENTRIES = 11, P4_EVENT_FSB_DATA_ACTIVITY = 12, P4_EVENT_BSQ_ALLOCATION = 13, P4_EVENT_BSQ_ACTIVE_ENTRIES = 14, P4_EVENT_SSE_INPUT_ASSIST = 15, P4_EVENT_PACKED_SP_UOP = 16, P4_EVENT_PACKED_DP_UOP = 17, P4_EVENT_SCALAR_SP_UOP = 18, P4_EVENT_SCALAR_DP_UOP = 19, P4_EVENT_64BIT_MMX_UOP = 20, P4_EVENT_128BIT_MMX_UOP = 21, P4_EVENT_X87_FP_UOP = 22, P4_EVENT_TC_MISC = 23, P4_EVENT_GLOBAL_POWER_EVENTS = 24, P4_EVENT_TC_MS_XFER = 25, P4_EVENT_UOP_QUEUE_WRITES = 26, P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE = 27, P4_EVENT_RETIRED_BRANCH_TYPE = 28, P4_EVENT_RESOURCE_STALL = 29, P4_EVENT_WC_BUFFER = 30, P4_EVENT_B2B_CYCLES = 31, P4_EVENT_BNR = 32, P4_EVENT_SNOOP = 33, P4_EVENT_RESPONSE = 34, P4_EVENT_FRONT_END_EVENT = 35, P4_EVENT_EXECUTION_EVENT = 36, P4_EVENT_REPLAY_EVENT = 37, P4_EVENT_INSTR_RETIRED = 38, P4_EVENT_UOPS_RETIRED = 39, P4_EVENT_UOP_TYPE = 40, P4_EVENT_BRANCH_RETIRED = 41, P4_EVENT_MISPRED_BRANCH_RETIRED = 42, P4_EVENT_X87_ASSIST = 43, P4_EVENT_MACHINE_CLEAR = 44, P4_EVENT_INSTR_COMPLETED = 45, }; enum P4_EVENT_OPCODES { P4_EVENT_TC_DELIVER_MODE_OPCODE = 257, P4_EVENT_BPU_FETCH_REQUEST_OPCODE = 768, P4_EVENT_ITLB_REFERENCE_OPCODE = 6147, P4_EVENT_MEMORY_CANCEL_OPCODE = 517, P4_EVENT_MEMORY_COMPLETE_OPCODE = 2050, P4_EVENT_LOAD_PORT_REPLAY_OPCODE = 1026, P4_EVENT_STORE_PORT_REPLAY_OPCODE = 1282, P4_EVENT_MOB_LOAD_REPLAY_OPCODE = 770, P4_EVENT_PAGE_WALK_TYPE_OPCODE = 260, P4_EVENT_BSQ_CACHE_REFERENCE_OPCODE = 3079, P4_EVENT_IOQ_ALLOCATION_OPCODE = 774, P4_EVENT_IOQ_ACTIVE_ENTRIES_OPCODE = 6662, P4_EVENT_FSB_DATA_ACTIVITY_OPCODE = 5894, P4_EVENT_BSQ_ALLOCATION_OPCODE = 1287, P4_EVENT_BSQ_ACTIVE_ENTRIES_OPCODE = 1543, P4_EVENT_SSE_INPUT_ASSIST_OPCODE = 13313, P4_EVENT_PACKED_SP_UOP_OPCODE = 2049, P4_EVENT_PACKED_DP_UOP_OPCODE = 3073, P4_EVENT_SCALAR_SP_UOP_OPCODE = 2561, P4_EVENT_SCALAR_DP_UOP_OPCODE = 3585, P4_EVENT_64BIT_MMX_UOP_OPCODE = 513, P4_EVENT_128BIT_MMX_UOP_OPCODE = 6657, P4_EVENT_X87_FP_UOP_OPCODE = 1025, P4_EVENT_TC_MISC_OPCODE = 1537, P4_EVENT_GLOBAL_POWER_EVENTS_OPCODE = 4870, P4_EVENT_TC_MS_XFER_OPCODE = 1280, P4_EVENT_UOP_QUEUE_WRITES_OPCODE = 2304, P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE_OPCODE = 1282, P4_EVENT_RETIRED_BRANCH_TYPE_OPCODE = 1026, P4_EVENT_RESOURCE_STALL_OPCODE = 257, P4_EVENT_WC_BUFFER_OPCODE = 1285, P4_EVENT_B2B_CYCLES_OPCODE = 5635, P4_EVENT_BNR_OPCODE = 2051, P4_EVENT_SNOOP_OPCODE = 1539, P4_EVENT_RESPONSE_OPCODE = 1027, P4_EVENT_FRONT_END_EVENT_OPCODE = 2053, P4_EVENT_EXECUTION_EVENT_OPCODE = 3077, P4_EVENT_REPLAY_EVENT_OPCODE = 2309, P4_EVENT_INSTR_RETIRED_OPCODE = 516, P4_EVENT_UOPS_RETIRED_OPCODE = 260, P4_EVENT_UOP_TYPE_OPCODE = 514, P4_EVENT_BRANCH_RETIRED_OPCODE = 1541, P4_EVENT_MISPRED_BRANCH_RETIRED_OPCODE = 772, P4_EVENT_X87_ASSIST_OPCODE = 773, P4_EVENT_MACHINE_CLEAR_OPCODE = 517, P4_EVENT_INSTR_COMPLETED_OPCODE = 1796, }; enum P4_PEBS_METRIC { P4_PEBS_METRIC__none = 0, P4_PEBS_METRIC__1stl_cache_load_miss_retired = 1, P4_PEBS_METRIC__2ndl_cache_load_miss_retired = 2, P4_PEBS_METRIC__dtlb_load_miss_retired = 3, P4_PEBS_METRIC__dtlb_store_miss_retired = 4, P4_PEBS_METRIC__dtlb_all_miss_retired = 5, P4_PEBS_METRIC__tagged_mispred_branch = 6, P4_PEBS_METRIC__mob_load_replay_retired = 7, P4_PEBS_METRIC__split_load_retired = 8, P4_PEBS_METRIC__split_store_retired = 9, P4_PEBS_METRIC__max = 10, }; enum SHIFT_DIRECTION { SHIFT_LEFT = 0, SHIFT_RIGHT = 1, }; enum SS4_PACKET_ID { SS4_PACKET_ID_IDLE = 0, SS4_PACKET_ID_ONE = 1, SS4_PACKET_ID_TWO = 2, SS4_PACKET_ID_MULTI = 3, SS4_PACKET_ID_STICK = 4, }; enum TPM_OPS_FLAGS { TPM_OPS_AUTO_STARTUP = 1, }; enum V7_PACKET_ID { V7_PACKET_ID_IDLE = 0, V7_PACKET_ID_TWO = 1, V7_PACKET_ID_MULTI = 2, V7_PACKET_ID_NEW = 3, V7_PACKET_ID_UNKNOWN = 4, }; enum __sk_action { __SK_DROP = 0, __SK_PASS = 1, __SK_REDIRECT = 2, __SK_NONE = 3, }; enum _cache_type { CTYPE_NULL = 0, CTYPE_DATA = 1, CTYPE_INST = 2, CTYPE_UNIFIED = 3, }; enum _slab_flag_bits { _SLAB_CONSISTENCY_CHECKS = 0, _SLAB_RED_ZONE = 1, _SLAB_POISON = 2, _SLAB_KMALLOC = 3, _SLAB_HWCACHE_ALIGN = 4, _SLAB_CACHE_DMA = 5, _SLAB_CACHE_DMA32 = 6, _SLAB_STORE_USER = 7, _SLAB_PANIC = 8, _SLAB_TYPESAFE_BY_RCU = 9, _SLAB_TRACE = 10, _SLAB_NOLEAKTRACE = 11, _SLAB_NO_MERGE = 12, _SLAB_ACCOUNT = 13, _SLAB_NO_USER_FLAGS = 14, _SLAB_RECLAIM_ACCOUNT = 15, _SLAB_OBJECT_POISON = 16, _SLAB_CMPXCHG_DOUBLE = 17, _SLAB_NO_OBJ_EXT = 18, _SLAB_FLAGS_LAST_BIT = 19, }; enum access_coordinate_class { ACCESS_COORDINATE_LOCAL = 0, ACCESS_COORDINATE_CPU = 1, ACCESS_COORDINATE_MAX = 2, }; enum acpi_attr_enum { ACPI_ATTR_LABEL_SHOW = 0, ACPI_ATTR_INDEX_SHOW = 1, }; enum acpi_bridge_type { ACPI_BRIDGE_TYPE_PCIE = 1, ACPI_BRIDGE_TYPE_CXL = 2, }; enum acpi_bus_device_type { ACPI_BUS_TYPE_DEVICE = 0, ACPI_BUS_TYPE_POWER = 1, ACPI_BUS_TYPE_PROCESSOR = 2, ACPI_BUS_TYPE_THERMAL = 3, ACPI_BUS_TYPE_POWER_BUTTON = 4, ACPI_BUS_TYPE_SLEEP_BUTTON = 5, ACPI_BUS_TYPE_ECDT_EC = 6, ACPI_BUS_DEVICE_TYPE_COUNT = 7, }; enum acpi_cdat_type { ACPI_CDAT_TYPE_DSMAS = 0, ACPI_CDAT_TYPE_DSLBIS = 1, ACPI_CDAT_TYPE_DSMSCIS = 2, ACPI_CDAT_TYPE_DSIS = 3, ACPI_CDAT_TYPE_DSEMTS = 4, ACPI_CDAT_TYPE_SSLBIS = 5, ACPI_CDAT_TYPE_RESERVED = 6, }; enum acpi_cedt_type { ACPI_CEDT_TYPE_CHBS = 0, ACPI_CEDT_TYPE_CFMWS = 1, ACPI_CEDT_TYPE_CXIMS = 2, ACPI_CEDT_TYPE_RDPAS = 3, ACPI_CEDT_TYPE_RESERVED = 4, }; enum acpi_device_swnode_dev_props { ACPI_DEVICE_SWNODE_DEV_ROTATION = 0, ACPI_DEVICE_SWNODE_DEV_CLOCK_FREQUENCY = 1, ACPI_DEVICE_SWNODE_DEV_LED_MAX_MICROAMP = 2, ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_MICROAMP = 3, ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_TIMEOUT_US = 4, ACPI_DEVICE_SWNODE_DEV_NUM_OF = 5, ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES = 6, }; enum acpi_device_swnode_ep_props { ACPI_DEVICE_SWNODE_EP_REMOTE_EP = 0, ACPI_DEVICE_SWNODE_EP_BUS_TYPE = 1, ACPI_DEVICE_SWNODE_EP_REG = 2, ACPI_DEVICE_SWNODE_EP_CLOCK_LANES = 3, ACPI_DEVICE_SWNODE_EP_DATA_LANES = 4, ACPI_DEVICE_SWNODE_EP_LANE_POLARITIES = 5, ACPI_DEVICE_SWNODE_EP_LINK_FREQUENCIES = 6, ACPI_DEVICE_SWNODE_EP_NUM_OF = 7, ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES = 8, }; enum acpi_device_swnode_port_props { ACPI_DEVICE_SWNODE_PORT_REG = 0, ACPI_DEVICE_SWNODE_PORT_NUM_OF = 1, ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES = 2, }; enum acpi_ec_event_state { EC_EVENT_READY = 0, EC_EVENT_IN_PROGRESS = 1, EC_EVENT_COMPLETE = 2, }; enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, ACPI_IRQ_MODEL_IOAPIC = 1, ACPI_IRQ_MODEL_IOSAPIC = 2, ACPI_IRQ_MODEL_PLATFORM = 3, ACPI_IRQ_MODEL_GIC = 4, ACPI_IRQ_MODEL_LPIC = 5, ACPI_IRQ_MODEL_RINTC = 6, ACPI_IRQ_MODEL_COUNT = 7, }; enum acpi_madt_multiproc_wakeup_version { ACPI_MADT_MP_WAKEUP_VERSION_NONE = 0, ACPI_MADT_MP_WAKEUP_VERSION_V1 = 1, ACPI_MADT_MP_WAKEUP_VERSION_RESERVED = 2, }; enum acpi_madt_type { ACPI_MADT_TYPE_LOCAL_APIC = 0, ACPI_MADT_TYPE_IO_APIC = 1, ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, ACPI_MADT_TYPE_NMI_SOURCE = 3, ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, ACPI_MADT_TYPE_IO_SAPIC = 6, ACPI_MADT_TYPE_LOCAL_SAPIC = 7, ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, ACPI_MADT_TYPE_LOCAL_X2APIC = 9, ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16, ACPI_MADT_TYPE_CORE_PIC = 17, ACPI_MADT_TYPE_LIO_PIC = 18, ACPI_MADT_TYPE_HT_PIC = 19, ACPI_MADT_TYPE_EIO_PIC = 20, ACPI_MADT_TYPE_MSI_PIC = 21, ACPI_MADT_TYPE_BIO_PIC = 22, ACPI_MADT_TYPE_LPC_PIC = 23, ACPI_MADT_TYPE_RINTC = 24, ACPI_MADT_TYPE_IMSIC = 25, ACPI_MADT_TYPE_APLIC = 26, ACPI_MADT_TYPE_PLIC = 27, ACPI_MADT_TYPE_RESERVED = 28, ACPI_MADT_TYPE_OEM_RESERVED = 128, }; enum acpi_pcct_type { ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, ACPI_PCCT_TYPE_HW_REG_COMM_SUBSPACE = 5, ACPI_PCCT_TYPE_RESERVED = 6, }; enum acpi_predicate { all_versions = 0, less_than_or_equal = 1, equal = 2, greater_than_or_equal = 3, }; enum acpi_preferred_pm_profiles { PM_UNSPECIFIED = 0, PM_DESKTOP = 1, PM_MOBILE = 2, PM_WORKSTATION = 3, PM_ENTERPRISE_SERVER = 4, PM_SOHO_SERVER = 5, PM_APPLIANCE_PC = 6, PM_PERFORMANCE_SERVER = 7, PM_TABLET = 8, NR_PM_PROFILES = 9, }; enum acpi_reconfig_event { ACPI_RECONFIG_DEVICE_ADD = 0, ACPI_RECONFIG_DEVICE_REMOVE = 1, }; enum acpi_return_package_types { ACPI_PTYPE1_FIXED = 1, ACPI_PTYPE1_VAR = 2, ACPI_PTYPE1_OPTION = 3, ACPI_PTYPE2 = 4, ACPI_PTYPE2_COUNT = 5, ACPI_PTYPE2_PKG_COUNT = 6, ACPI_PTYPE2_FIXED = 7, ACPI_PTYPE2_MIN = 8, ACPI_PTYPE2_REV_FIXED = 9, ACPI_PTYPE2_FIX_VAR = 10, ACPI_PTYPE2_VAR_VAR = 11, ACPI_PTYPE2_UUID_PAIR = 12, ACPI_PTYPE_CUSTOM = 13, }; enum acpi_srat_type { ACPI_SRAT_TYPE_CPU_AFFINITY = 0, ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, ACPI_SRAT_TYPE_GICC_AFFINITY = 3, ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, ACPI_SRAT_TYPE_RINTC_AFFINITY = 7, ACPI_SRAT_TYPE_RESERVED = 8, }; enum acpi_subtable_type { ACPI_SUBTABLE_COMMON = 0, ACPI_SUBTABLE_HMAT = 1, ACPI_SUBTABLE_PRMT = 2, ACPI_SUBTABLE_CEDT = 3, CDAT_SUBTABLE = 4, }; enum actions { REGISTER = 0, DEREGISTER = 1, CPU_DONT_CARE = 2, }; enum ad_link_speed_type { AD_LINK_SPEED_1MBPS = 1, AD_LINK_SPEED_10MBPS = 2, AD_LINK_SPEED_100MBPS = 3, AD_LINK_SPEED_1000MBPS = 4, AD_LINK_SPEED_2500MBPS = 5, AD_LINK_SPEED_5000MBPS = 6, AD_LINK_SPEED_10000MBPS = 7, AD_LINK_SPEED_14000MBPS = 8, AD_LINK_SPEED_20000MBPS = 9, AD_LINK_SPEED_25000MBPS = 10, AD_LINK_SPEED_40000MBPS = 11, AD_LINK_SPEED_50000MBPS = 12, AD_LINK_SPEED_56000MBPS = 13, AD_LINK_SPEED_100000MBPS = 14, AD_LINK_SPEED_200000MBPS = 15, AD_LINK_SPEED_400000MBPS = 16, AD_LINK_SPEED_800000MBPS = 17, }; enum addr_type_t { UNICAST_ADDR = 0, MULTICAST_ADDR = 1, ANYCAST_ADDR = 2, }; enum af_vsockmon_op { AF_VSOCK_OP_UNKNOWN = 0, AF_VSOCK_OP_CONNECT = 1, AF_VSOCK_OP_DISCONNECT = 2, AF_VSOCK_OP_CONTROL = 3, AF_VSOCK_OP_PAYLOAD = 4, }; enum af_vsockmon_transport { AF_VSOCK_TRANSPORT_UNKNOWN = 0, AF_VSOCK_TRANSPORT_NO_INFO = 1, AF_VSOCK_TRANSPORT_VIRTIO = 2, }; enum alarmtimer_restart { ALARMTIMER_NORESTART = 0, ALARMTIMER_RESTART = 1, }; enum alarmtimer_type { ALARM_REALTIME = 0, ALARM_BOOTTIME = 1, ALARM_NUMTYPE = 2, ALARM_REALTIME_FREEZER = 3, ALARM_BOOTTIME_FREEZER = 4, }; enum align_flags { ALIGN_VA_32 = 1, ALIGN_VA_64 = 2, }; enum allow_write_msrs { MSR_WRITES_ON = 0, MSR_WRITES_OFF = 1, MSR_WRITES_DEFAULT = 2, }; enum amd_chipset_gen { NOT_AMD_CHIPSET = 0, AMD_CHIPSET_SB600 = 1, AMD_CHIPSET_SB700 = 2, AMD_CHIPSET_SB800 = 3, AMD_CHIPSET_HUDSON2 = 4, AMD_CHIPSET_BOLTON = 5, AMD_CHIPSET_YANGTZE = 6, AMD_CHIPSET_TAISHAN = 7, AMD_CHIPSET_UNKNOWN = 8, }; enum amd_pref_core { AMD_PREF_CORE_UNKNOWN = 0, AMD_PREF_CORE_SUPPORTED = 1, AMD_PREF_CORE_UNSUPPORTED = 2, }; enum amd_pstate_mode { AMD_PSTATE_UNDEFINED = 0, AMD_PSTATE_DISABLE = 1, AMD_PSTATE_PASSIVE = 2, AMD_PSTATE_ACTIVE = 3, AMD_PSTATE_GUIDED = 4, AMD_PSTATE_MAX = 5, }; enum aper_size_type { U8_APER_SIZE = 0, U16_APER_SIZE = 1, U32_APER_SIZE = 2, LVL2_APER_SIZE = 3, FIXED_APER_SIZE = 4, }; enum apic_intr_mode_id { APIC_PIC = 0, APIC_VIRTUAL_WIRE = 1, APIC_VIRTUAL_WIRE_NO_CONFIG = 2, APIC_SYMMETRIC_IO = 3, APIC_SYMMETRIC_IO_NO_ROUTING = 4, }; enum asn1_class { ASN1_UNIV = 0, ASN1_APPL = 1, ASN1_CONT = 2, ASN1_PRIV = 3, }; enum asn1_method { ASN1_PRIM = 0, ASN1_CONS = 1, }; enum asn1_opcode { ASN1_OP_MATCH = 0, ASN1_OP_MATCH_OR_SKIP = 1, ASN1_OP_MATCH_ACT = 2, ASN1_OP_MATCH_ACT_OR_SKIP = 3, ASN1_OP_MATCH_JUMP = 4, ASN1_OP_MATCH_JUMP_OR_SKIP = 5, ASN1_OP_MATCH_ANY = 8, ASN1_OP_MATCH_ANY_OR_SKIP = 9, ASN1_OP_MATCH_ANY_ACT = 10, ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, ASN1_OP_COND_MATCH_OR_SKIP = 17, ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, ASN1_OP_COND_MATCH_ANY = 24, ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, ASN1_OP_COND_MATCH_ANY_ACT = 26, ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, ASN1_OP_COND_FAIL = 28, ASN1_OP_COMPLETE = 29, ASN1_OP_ACT = 30, ASN1_OP_MAYBE_ACT = 31, ASN1_OP_END_SEQ = 32, ASN1_OP_END_SET = 33, ASN1_OP_END_SEQ_OF = 34, ASN1_OP_END_SET_OF = 35, ASN1_OP_END_SEQ_ACT = 36, ASN1_OP_END_SET_ACT = 37, ASN1_OP_END_SEQ_OF_ACT = 38, ASN1_OP_END_SET_OF_ACT = 39, ASN1_OP_RETURN = 40, ASN1_OP__NR = 41, }; enum asn1_tag { ASN1_EOC = 0, ASN1_BOOL = 1, ASN1_INT = 2, ASN1_BTS = 3, ASN1_OTS = 4, ASN1_NULL = 5, ASN1_OID = 6, ASN1_ODE = 7, ASN1_EXT = 8, ASN1_REAL = 9, ASN1_ENUM = 10, ASN1_EPDV = 11, ASN1_UTF8STR = 12, ASN1_RELOID = 13, ASN1_SEQ = 16, ASN1_SET = 17, ASN1_NUMSTR = 18, ASN1_PRNSTR = 19, ASN1_TEXSTR = 20, ASN1_VIDSTR = 21, ASN1_IA5STR = 22, ASN1_UNITIM = 23, ASN1_GENTIM = 24, ASN1_GRASTR = 25, ASN1_VISSTR = 26, ASN1_GENSTR = 27, ASN1_UNISTR = 28, ASN1_CHRSTR = 29, ASN1_BMPSTR = 30, ASN1_LONG_TAG = 31, }; enum assoc_array_walk_status { assoc_array_walk_tree_empty = 0, assoc_array_walk_found_terminal_node = 1, assoc_array_walk_found_wrong_shortcut = 2, }; enum asymmetric_payload_bits { asym_crypto = 0, asym_subtype = 1, asym_key_ids = 2, asym_auth = 3, }; enum ata_quirks { __ATA_QUIRK_DIAGNOSTIC = 0, __ATA_QUIRK_NODMA = 1, __ATA_QUIRK_NONCQ = 2, __ATA_QUIRK_MAX_SEC_128 = 3, __ATA_QUIRK_BROKEN_HPA = 4, __ATA_QUIRK_DISABLE = 5, __ATA_QUIRK_HPA_SIZE = 6, __ATA_QUIRK_IVB = 7, __ATA_QUIRK_STUCK_ERR = 8, __ATA_QUIRK_BRIDGE_OK = 9, __ATA_QUIRK_ATAPI_MOD16_DMA = 10, __ATA_QUIRK_FIRMWARE_WARN = 11, __ATA_QUIRK_1_5_GBPS = 12, __ATA_QUIRK_NOSETXFER = 13, __ATA_QUIRK_BROKEN_FPDMA_AA = 14, __ATA_QUIRK_DUMP_ID = 15, __ATA_QUIRK_MAX_SEC_LBA48 = 16, __ATA_QUIRK_ATAPI_DMADIR = 17, __ATA_QUIRK_NO_NCQ_TRIM = 18, __ATA_QUIRK_NOLPM = 19, __ATA_QUIRK_WD_BROKEN_LPM = 20, __ATA_QUIRK_ZERO_AFTER_TRIM = 21, __ATA_QUIRK_NO_DMA_LOG = 22, __ATA_QUIRK_NOTRIM = 23, __ATA_QUIRK_MAX_SEC_1024 = 24, __ATA_QUIRK_MAX_TRIM_128M = 25, __ATA_QUIRK_NO_NCQ_ON_ATI = 26, __ATA_QUIRK_NO_ID_DEV_LOG = 27, __ATA_QUIRK_NO_LOG_DIR = 28, __ATA_QUIRK_NO_FUA = 29, __ATA_QUIRK_MAX = 30, }; enum audit_nfcfgop { AUDIT_XT_OP_REGISTER = 0, AUDIT_XT_OP_REPLACE = 1, AUDIT_XT_OP_UNREGISTER = 2, AUDIT_NFT_OP_TABLE_REGISTER = 3, AUDIT_NFT_OP_TABLE_UNREGISTER = 4, AUDIT_NFT_OP_CHAIN_REGISTER = 5, AUDIT_NFT_OP_CHAIN_UNREGISTER = 6, AUDIT_NFT_OP_RULE_REGISTER = 7, AUDIT_NFT_OP_RULE_UNREGISTER = 8, AUDIT_NFT_OP_SET_REGISTER = 9, AUDIT_NFT_OP_SET_UNREGISTER = 10, AUDIT_NFT_OP_SETELEM_REGISTER = 11, AUDIT_NFT_OP_SETELEM_UNREGISTER = 12, AUDIT_NFT_OP_GEN_REGISTER = 13, AUDIT_NFT_OP_OBJ_REGISTER = 14, AUDIT_NFT_OP_OBJ_UNREGISTER = 15, AUDIT_NFT_OP_OBJ_RESET = 16, AUDIT_NFT_OP_FLOWTABLE_REGISTER = 17, AUDIT_NFT_OP_FLOWTABLE_UNREGISTER = 18, AUDIT_NFT_OP_SETELEM_RESET = 19, AUDIT_NFT_OP_RULE_RESET = 20, AUDIT_NFT_OP_INVALID = 21, }; enum audit_nlgrps { AUDIT_NLGRP_NONE = 0, AUDIT_NLGRP_READLOG = 1, __AUDIT_NLGRP_MAX = 2, }; enum audit_ntp_type { AUDIT_NTP_OFFSET = 0, AUDIT_NTP_FREQ = 1, AUDIT_NTP_STATUS = 2, AUDIT_NTP_TAI = 3, AUDIT_NTP_TICK = 4, AUDIT_NTP_ADJUST = 5, AUDIT_NTP_NVALS = 6, }; enum audit_state { AUDIT_STATE_DISABLED = 0, AUDIT_STATE_BUILD = 1, AUDIT_STATE_RECORD = 2, }; enum auditsc_class_t { AUDITSC_NATIVE = 0, AUDITSC_COMPAT = 1, AUDITSC_OPEN = 2, AUDITSC_OPENAT = 3, AUDITSC_SOCKETCALL = 4, AUDITSC_EXECVE = 5, AUDITSC_OPENAT2 = 6, AUDITSC_NVALS = 7, }; enum backlight_notification { BACKLIGHT_REGISTERED = 0, BACKLIGHT_UNREGISTERED = 1, }; enum backlight_scale { BACKLIGHT_SCALE_UNKNOWN = 0, BACKLIGHT_SCALE_LINEAR = 1, BACKLIGHT_SCALE_NON_LINEAR = 2, }; enum backlight_type { BACKLIGHT_RAW = 1, BACKLIGHT_PLATFORM = 2, BACKLIGHT_FIRMWARE = 3, BACKLIGHT_TYPE_MAX = 4, }; enum backlight_update_reason { BACKLIGHT_UPDATE_HOTKEY = 0, BACKLIGHT_UPDATE_SYSFS = 1, }; enum batadv_packettype { BATADV_IV_OGM = 0, BATADV_BCAST = 1, BATADV_CODED = 2, BATADV_ELP = 3, BATADV_OGM2 = 4, BATADV_MCAST = 5, BATADV_UNICAST = 64, BATADV_UNICAST_FRAG = 65, BATADV_UNICAST_4ADDR = 66, BATADV_ICMP = 67, BATADV_UNICAST_TVLV = 68, }; enum bbr_mode { BBR_STARTUP = 0, BBR_DRAIN = 1, BBR_PROBE_BW = 2, BBR_PROBE_RTT = 3, }; enum behavior { EXCLUSIVE = 0, SHARED = 1, DROP = 2, }; enum bfs_result { BFS_EINVALIDNODE = -2, BFS_EQUEUEFULL = -1, BFS_RMATCH = 0, BFS_RNOMATCH = 1, }; enum bh_state_bits { BH_Uptodate = 0, BH_Dirty = 1, BH_Lock = 2, BH_Req = 3, BH_Mapped = 4, BH_New = 5, BH_Async_Read = 6, BH_Async_Write = 7, BH_Delay = 8, BH_Boundary = 9, BH_Write_EIO = 10, BH_Unwritten = 11, BH_Quiet = 12, BH_Meta = 13, BH_Prio = 14, BH_Defer_Completion = 15, BH_PrivateStart = 16, }; enum bhi_mitigations { BHI_MITIGATION_OFF = 0, BHI_MITIGATION_ON = 1, BHI_MITIGATION_VMEXIT_ONLY = 2, }; enum bio_merge_status { BIO_MERGE_OK = 0, BIO_MERGE_NONE = 1, BIO_MERGE_FAILED = 2, }; enum bio_post_read_step { STEP_INITIAL = 0, STEP_DECRYPT = 1, STEP_VERITY = 2, STEP_MAX = 3, }; enum bios_platform_class { BIOS_CLIENT = 0, BIOS_SERVER = 1, }; enum blacklist_hash_type { BLACKLIST_HASH_X509_TBS = 1, BLACKLIST_HASH_BINARY = 2, }; enum blake2b_iv { BLAKE2B_IV0 = 7640891576956012808ULL, BLAKE2B_IV1 = 13503953896175478587ULL, BLAKE2B_IV2 = 4354685564936845355ULL, BLAKE2B_IV3 = 11912009170470909681ULL, BLAKE2B_IV4 = 5840696475078001361ULL, BLAKE2B_IV5 = 11170449401992604703ULL, BLAKE2B_IV6 = 2270897969802886507ULL, BLAKE2B_IV7 = 6620516959819538809ULL, }; enum blake2b_lengths { BLAKE2B_BLOCK_SIZE = 128, BLAKE2B_HASH_SIZE = 64, BLAKE2B_KEY_SIZE = 64, BLAKE2B_160_HASH_SIZE = 20, BLAKE2B_256_HASH_SIZE = 32, BLAKE2B_384_HASH_SIZE = 48, BLAKE2B_512_HASH_SIZE = 64, }; enum blake2s_iv { BLAKE2S_IV0 = 1779033703, BLAKE2S_IV1 = 3144134277, BLAKE2S_IV2 = 1013904242, BLAKE2S_IV3 = 2773480762, BLAKE2S_IV4 = 1359893119, BLAKE2S_IV5 = 2600822924, BLAKE2S_IV6 = 528734635, BLAKE2S_IV7 = 1541459225, }; enum blake2s_lengths { BLAKE2S_BLOCK_SIZE = 64, BLAKE2S_HASH_SIZE = 32, BLAKE2S_KEY_SIZE = 32, BLAKE2S_128_HASH_SIZE = 16, BLAKE2S_160_HASH_SIZE = 20, BLAKE2S_224_HASH_SIZE = 28, BLAKE2S_256_HASH_SIZE = 32, }; enum blk_default_limits { BLK_MAX_SEGMENTS = 128, BLK_SAFE_MAX_SECTORS = 255, BLK_MAX_SEGMENT_SIZE = 65536, BLK_SEG_BOUNDARY_MASK = 4294967295, }; enum blk_eh_timer_return { BLK_EH_DONE = 0, BLK_EH_RESET_TIMER = 1, }; enum blk_integrity_checksum { BLK_INTEGRITY_CSUM_NONE = 0, BLK_INTEGRITY_CSUM_IP = 1, BLK_INTEGRITY_CSUM_CRC = 2, BLK_INTEGRITY_CSUM_CRC64 = 3, } __attribute__((mode(byte))); enum blk_integrity_flags { BLK_INTEGRITY_NOVERIFY = 1, BLK_INTEGRITY_NOGENERATE = 2, BLK_INTEGRITY_DEVICE_CAPABLE = 4, BLK_INTEGRITY_REF_TAG = 8, BLK_INTEGRITY_STACKED = 16, }; enum blk_unique_id { BLK_UID_T10 = 1, BLK_UID_EUI64 = 2, BLK_UID_NAA = 3, }; enum blkg_iostat_type { BLKG_IOSTAT_READ = 0, BLKG_IOSTAT_WRITE = 1, BLKG_IOSTAT_DISCARD = 2, BLKG_IOSTAT_NR = 3, }; enum blkg_rwstat_type { BLKG_RWSTAT_READ = 0, BLKG_RWSTAT_WRITE = 1, BLKG_RWSTAT_SYNC = 2, BLKG_RWSTAT_ASYNC = 3, BLKG_RWSTAT_DISCARD = 4, BLKG_RWSTAT_NR = 5, BLKG_RWSTAT_TOTAL = 5, }; enum blktrace_act { __BLK_TA_QUEUE = 1, __BLK_TA_BACKMERGE = 2, __BLK_TA_FRONTMERGE = 3, __BLK_TA_GETRQ = 4, __BLK_TA_SLEEPRQ = 5, __BLK_TA_REQUEUE = 6, __BLK_TA_ISSUE = 7, __BLK_TA_COMPLETE = 8, __BLK_TA_PLUG = 9, __BLK_TA_UNPLUG_IO = 10, __BLK_TA_UNPLUG_TIMER = 11, __BLK_TA_INSERT = 12, __BLK_TA_SPLIT = 13, __BLK_TA_BOUNCE = 14, __BLK_TA_REMAP = 15, __BLK_TA_ABORT = 16, __BLK_TA_DRV_DATA = 17, __BLK_TA_CGROUP = 256, }; enum blktrace_cat { BLK_TC_READ = 1, BLK_TC_WRITE = 2, BLK_TC_FLUSH = 4, BLK_TC_SYNC = 8, BLK_TC_SYNCIO = 8, BLK_TC_QUEUE = 16, BLK_TC_REQUEUE = 32, BLK_TC_ISSUE = 64, BLK_TC_COMPLETE = 128, BLK_TC_FS = 256, BLK_TC_PC = 512, BLK_TC_NOTIFY = 1024, BLK_TC_AHEAD = 2048, BLK_TC_META = 4096, BLK_TC_DISCARD = 8192, BLK_TC_DRV_DATA = 16384, BLK_TC_FUA = 32768, BLK_TC_END = 32768, }; enum blktrace_notify { __BLK_TN_PROCESS = 0, __BLK_TN_TIMESTAMP = 1, __BLK_TN_MESSAGE = 2, __BLK_TN_CGROUP = 256, }; enum bp_type_idx { TYPE_INST = 0, TYPE_DATA = 0, TYPE_MAX = 1, }; enum bpf_access_src { ACCESS_DIRECT = 1, ACCESS_HELPER = 2, }; enum bpf_access_type { BPF_READ = 1, BPF_WRITE = 2, }; enum bpf_addr_space_cast { BPF_ADDR_SPACE_CAST = 1, }; enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET = 0, BPF_ADJ_ROOM_MAC = 1, }; enum bpf_arg_type { ARG_DONTCARE = 0, ARG_CONST_MAP_PTR = 1, ARG_PTR_TO_MAP_KEY = 2, ARG_PTR_TO_MAP_VALUE = 3, ARG_PTR_TO_MEM = 4, ARG_PTR_TO_ARENA = 5, ARG_CONST_SIZE = 6, ARG_CONST_SIZE_OR_ZERO = 7, ARG_PTR_TO_CTX = 8, ARG_ANYTHING = 9, ARG_PTR_TO_SPIN_LOCK = 10, ARG_PTR_TO_SOCK_COMMON = 11, ARG_PTR_TO_SOCKET = 12, ARG_PTR_TO_BTF_ID = 13, ARG_PTR_TO_RINGBUF_MEM = 14, ARG_CONST_ALLOC_SIZE_OR_ZERO = 15, ARG_PTR_TO_BTF_ID_SOCK_COMMON = 16, ARG_PTR_TO_PERCPU_BTF_ID = 17, ARG_PTR_TO_FUNC = 18, ARG_PTR_TO_STACK = 19, ARG_PTR_TO_CONST_STR = 20, ARG_PTR_TO_TIMER = 21, ARG_KPTR_XCHG_DEST = 22, ARG_PTR_TO_DYNPTR = 23, __BPF_ARG_TYPE_MAX = 24, ARG_PTR_TO_MAP_VALUE_OR_NULL = 259, ARG_PTR_TO_MEM_OR_NULL = 260, ARG_PTR_TO_CTX_OR_NULL = 264, ARG_PTR_TO_SOCKET_OR_NULL = 268, ARG_PTR_TO_STACK_OR_NULL = 275, ARG_PTR_TO_BTF_ID_OR_NULL = 269, ARG_PTR_TO_UNINIT_MEM = 32772, ARG_PTR_TO_FIXED_SIZE_MEM = 262148, __BPF_ARG_TYPE_LIMIT = 67108863, }; enum bpf_async_type { BPF_ASYNC_TYPE_TIMER = 0, BPF_ASYNC_TYPE_WQ = 1, }; enum bpf_attach_type { BPF_CGROUP_INET_INGRESS = 0, BPF_CGROUP_INET_EGRESS = 1, BPF_CGROUP_INET_SOCK_CREATE = 2, BPF_CGROUP_SOCK_OPS = 3, BPF_SK_SKB_STREAM_PARSER = 4, BPF_SK_SKB_STREAM_VERDICT = 5, BPF_CGROUP_DEVICE = 6, BPF_SK_MSG_VERDICT = 7, BPF_CGROUP_INET4_BIND = 8, BPF_CGROUP_INET6_BIND = 9, BPF_CGROUP_INET4_CONNECT = 10, BPF_CGROUP_INET6_CONNECT = 11, BPF_CGROUP_INET4_POST_BIND = 12, BPF_CGROUP_INET6_POST_BIND = 13, BPF_CGROUP_UDP4_SENDMSG = 14, BPF_CGROUP_UDP6_SENDMSG = 15, BPF_LIRC_MODE2 = 16, BPF_FLOW_DISSECTOR = 17, BPF_CGROUP_SYSCTL = 18, BPF_CGROUP_UDP4_RECVMSG = 19, BPF_CGROUP_UDP6_RECVMSG = 20, BPF_CGROUP_GETSOCKOPT = 21, BPF_CGROUP_SETSOCKOPT = 22, BPF_TRACE_RAW_TP = 23, BPF_TRACE_FENTRY = 24, BPF_TRACE_FEXIT = 25, BPF_MODIFY_RETURN = 26, BPF_LSM_MAC = 27, BPF_TRACE_ITER = 28, BPF_CGROUP_INET4_GETPEERNAME = 29, BPF_CGROUP_INET6_GETPEERNAME = 30, BPF_CGROUP_INET4_GETSOCKNAME = 31, BPF_CGROUP_INET6_GETSOCKNAME = 32, BPF_XDP_DEVMAP = 33, BPF_CGROUP_INET_SOCK_RELEASE = 34, BPF_XDP_CPUMAP = 35, BPF_SK_LOOKUP = 36, BPF_XDP = 37, BPF_SK_SKB_VERDICT = 38, BPF_SK_REUSEPORT_SELECT = 39, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, BPF_PERF_EVENT = 41, BPF_TRACE_KPROBE_MULTI = 42, BPF_LSM_CGROUP = 43, BPF_STRUCT_OPS = 44, BPF_NETFILTER = 45, BPF_TCX_INGRESS = 46, BPF_TCX_EGRESS = 47, BPF_TRACE_UPROBE_MULTI = 48, BPF_CGROUP_UNIX_CONNECT = 49, BPF_CGROUP_UNIX_SENDMSG = 50, BPF_CGROUP_UNIX_RECVMSG = 51, BPF_CGROUP_UNIX_GETPEERNAME = 52, BPF_CGROUP_UNIX_GETSOCKNAME = 53, BPF_NETKIT_PRIMARY = 54, BPF_NETKIT_PEER = 55, BPF_TRACE_KPROBE_SESSION = 56, __MAX_BPF_ATTACH_TYPE = 57, }; enum bpf_audit { BPF_AUDIT_LOAD = 0, BPF_AUDIT_UNLOAD = 1, BPF_AUDIT_MAX = 2, }; enum bpf_cgroup_iter_order { BPF_CGROUP_ITER_ORDER_UNSPEC = 0, BPF_CGROUP_ITER_SELF_ONLY = 1, BPF_CGROUP_ITER_DESCENDANTS_PRE = 2, BPF_CGROUP_ITER_DESCENDANTS_POST = 3, BPF_CGROUP_ITER_ANCESTORS_UP = 4, }; enum bpf_cgroup_storage_type { BPF_CGROUP_STORAGE_SHARED = 0, BPF_CGROUP_STORAGE_PERCPU = 1, __BPF_CGROUP_STORAGE_MAX = 2, }; enum bpf_check_mtu_flags { BPF_MTU_CHK_SEGS = 1, }; enum bpf_check_mtu_ret { BPF_MTU_CHK_RET_SUCCESS = 0, BPF_MTU_CHK_RET_FRAG_NEEDED = 1, BPF_MTU_CHK_RET_SEGS_TOOBIG = 2, }; enum bpf_cmd { BPF_MAP_CREATE = 0, BPF_MAP_LOOKUP_ELEM = 1, BPF_MAP_UPDATE_ELEM = 2, BPF_MAP_DELETE_ELEM = 3, BPF_MAP_GET_NEXT_KEY = 4, BPF_PROG_LOAD = 5, BPF_OBJ_PIN = 6, BPF_OBJ_GET = 7, BPF_PROG_ATTACH = 8, BPF_PROG_DETACH = 9, BPF_PROG_TEST_RUN = 10, BPF_PROG_RUN = 10, BPF_PROG_GET_NEXT_ID = 11, BPF_MAP_GET_NEXT_ID = 12, BPF_PROG_GET_FD_BY_ID = 13, BPF_MAP_GET_FD_BY_ID = 14, BPF_OBJ_GET_INFO_BY_FD = 15, BPF_PROG_QUERY = 16, BPF_RAW_TRACEPOINT_OPEN = 17, BPF_BTF_LOAD = 18, BPF_BTF_GET_FD_BY_ID = 19, BPF_TASK_FD_QUERY = 20, BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, BPF_MAP_FREEZE = 22, BPF_BTF_GET_NEXT_ID = 23, BPF_MAP_LOOKUP_BATCH = 24, BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, BPF_MAP_UPDATE_BATCH = 26, BPF_MAP_DELETE_BATCH = 27, BPF_LINK_CREATE = 28, BPF_LINK_UPDATE = 29, BPF_LINK_GET_FD_BY_ID = 30, BPF_LINK_GET_NEXT_ID = 31, BPF_ENABLE_STATS = 32, BPF_ITER_CREATE = 33, BPF_LINK_DETACH = 34, BPF_PROG_BIND_MAP = 35, BPF_TOKEN_CREATE = 36, __MAX_BPF_CMD = 37, }; enum bpf_cond_pseudo_jmp { BPF_MAY_GOTO = 0, }; enum bpf_core_relo_kind { BPF_CORE_FIELD_BYTE_OFFSET = 0, BPF_CORE_FIELD_BYTE_SIZE = 1, BPF_CORE_FIELD_EXISTS = 2, BPF_CORE_FIELD_SIGNED = 3, BPF_CORE_FIELD_LSHIFT_U64 = 4, BPF_CORE_FIELD_RSHIFT_U64 = 5, BPF_CORE_TYPE_ID_LOCAL = 6, BPF_CORE_TYPE_ID_TARGET = 7, BPF_CORE_TYPE_EXISTS = 8, BPF_CORE_TYPE_SIZE = 9, BPF_CORE_ENUMVAL_EXISTS = 10, BPF_CORE_ENUMVAL_VALUE = 11, BPF_CORE_TYPE_MATCHES = 12, }; enum bpf_dynptr_type { BPF_DYNPTR_TYPE_INVALID = 0, BPF_DYNPTR_TYPE_LOCAL = 1, BPF_DYNPTR_TYPE_RINGBUF = 2, BPF_DYNPTR_TYPE_SKB = 3, BPF_DYNPTR_TYPE_XDP = 4, }; enum bpf_fou_encap_type { FOU_BPF_ENCAP_FOU = 0, FOU_BPF_ENCAP_GUE = 1, }; enum bpf_func_id { BPF_FUNC_unspec = 0, BPF_FUNC_map_lookup_elem = 1, BPF_FUNC_map_update_elem = 2, BPF_FUNC_map_delete_elem = 3, BPF_FUNC_probe_read = 4, BPF_FUNC_ktime_get_ns = 5, BPF_FUNC_trace_printk = 6, BPF_FUNC_get_prandom_u32 = 7, BPF_FUNC_get_smp_processor_id = 8, BPF_FUNC_skb_store_bytes = 9, BPF_FUNC_l3_csum_replace = 10, BPF_FUNC_l4_csum_replace = 11, BPF_FUNC_tail_call = 12, BPF_FUNC_clone_redirect = 13, BPF_FUNC_get_current_pid_tgid = 14, BPF_FUNC_get_current_uid_gid = 15, BPF_FUNC_get_current_comm = 16, BPF_FUNC_get_cgroup_classid = 17, BPF_FUNC_skb_vlan_push = 18, BPF_FUNC_skb_vlan_pop = 19, BPF_FUNC_skb_get_tunnel_key = 20, BPF_FUNC_skb_set_tunnel_key = 21, BPF_FUNC_perf_event_read = 22, BPF_FUNC_redirect = 23, BPF_FUNC_get_route_realm = 24, BPF_FUNC_perf_event_output = 25, BPF_FUNC_skb_load_bytes = 26, BPF_FUNC_get_stackid = 27, BPF_FUNC_csum_diff = 28, BPF_FUNC_skb_get_tunnel_opt = 29, BPF_FUNC_skb_set_tunnel_opt = 30, BPF_FUNC_skb_change_proto = 31, BPF_FUNC_skb_change_type = 32, BPF_FUNC_skb_under_cgroup = 33, BPF_FUNC_get_hash_recalc = 34, BPF_FUNC_get_current_task = 35, BPF_FUNC_probe_write_user = 36, BPF_FUNC_current_task_under_cgroup = 37, BPF_FUNC_skb_change_tail = 38, BPF_FUNC_skb_pull_data = 39, BPF_FUNC_csum_update = 40, BPF_FUNC_set_hash_invalid = 41, BPF_FUNC_get_numa_node_id = 42, BPF_FUNC_skb_change_head = 43, BPF_FUNC_xdp_adjust_head = 44, BPF_FUNC_probe_read_str = 45, BPF_FUNC_get_socket_cookie = 46, BPF_FUNC_get_socket_uid = 47, BPF_FUNC_set_hash = 48, BPF_FUNC_setsockopt = 49, BPF_FUNC_skb_adjust_room = 50, BPF_FUNC_redirect_map = 51, BPF_FUNC_sk_redirect_map = 52, BPF_FUNC_sock_map_update = 53, BPF_FUNC_xdp_adjust_meta = 54, BPF_FUNC_perf_event_read_value = 55, BPF_FUNC_perf_prog_read_value = 56, BPF_FUNC_getsockopt = 57, BPF_FUNC_override_return = 58, BPF_FUNC_sock_ops_cb_flags_set = 59, BPF_FUNC_msg_redirect_map = 60, BPF_FUNC_msg_apply_bytes = 61, BPF_FUNC_msg_cork_bytes = 62, BPF_FUNC_msg_pull_data = 63, BPF_FUNC_bind = 64, BPF_FUNC_xdp_adjust_tail = 65, BPF_FUNC_skb_get_xfrm_state = 66, BPF_FUNC_get_stack = 67, BPF_FUNC_skb_load_bytes_relative = 68, BPF_FUNC_fib_lookup = 69, BPF_FUNC_sock_hash_update = 70, BPF_FUNC_msg_redirect_hash = 71, BPF_FUNC_sk_redirect_hash = 72, BPF_FUNC_lwt_push_encap = 73, BPF_FUNC_lwt_seg6_store_bytes = 74, BPF_FUNC_lwt_seg6_adjust_srh = 75, BPF_FUNC_lwt_seg6_action = 76, BPF_FUNC_rc_repeat = 77, BPF_FUNC_rc_keydown = 78, BPF_FUNC_skb_cgroup_id = 79, BPF_FUNC_get_current_cgroup_id = 80, BPF_FUNC_get_local_storage = 81, BPF_FUNC_sk_select_reuseport = 82, BPF_FUNC_skb_ancestor_cgroup_id = 83, BPF_FUNC_sk_lookup_tcp = 84, BPF_FUNC_sk_lookup_udp = 85, BPF_FUNC_sk_release = 86, BPF_FUNC_map_push_elem = 87, BPF_FUNC_map_pop_elem = 88, BPF_FUNC_map_peek_elem = 89, BPF_FUNC_msg_push_data = 90, BPF_FUNC_msg_pop_data = 91, BPF_FUNC_rc_pointer_rel = 92, BPF_FUNC_spin_lock = 93, BPF_FUNC_spin_unlock = 94, BPF_FUNC_sk_fullsock = 95, BPF_FUNC_tcp_sock = 96, BPF_FUNC_skb_ecn_set_ce = 97, BPF_FUNC_get_listener_sock = 98, BPF_FUNC_skc_lookup_tcp = 99, BPF_FUNC_tcp_check_syncookie = 100, BPF_FUNC_sysctl_get_name = 101, BPF_FUNC_sysctl_get_current_value = 102, BPF_FUNC_sysctl_get_new_value = 103, BPF_FUNC_sysctl_set_new_value = 104, BPF_FUNC_strtol = 105, BPF_FUNC_strtoul = 106, BPF_FUNC_sk_storage_get = 107, BPF_FUNC_sk_storage_delete = 108, BPF_FUNC_send_signal = 109, BPF_FUNC_tcp_gen_syncookie = 110, BPF_FUNC_skb_output = 111, BPF_FUNC_probe_read_user = 112, BPF_FUNC_probe_read_kernel = 113, BPF_FUNC_probe_read_user_str = 114, BPF_FUNC_probe_read_kernel_str = 115, BPF_FUNC_tcp_send_ack = 116, BPF_FUNC_send_signal_thread = 117, BPF_FUNC_jiffies64 = 118, BPF_FUNC_read_branch_records = 119, BPF_FUNC_get_ns_current_pid_tgid = 120, BPF_FUNC_xdp_output = 121, BPF_FUNC_get_netns_cookie = 122, BPF_FUNC_get_current_ancestor_cgroup_id = 123, BPF_FUNC_sk_assign = 124, BPF_FUNC_ktime_get_boot_ns = 125, BPF_FUNC_seq_printf = 126, BPF_FUNC_seq_write = 127, BPF_FUNC_sk_cgroup_id = 128, BPF_FUNC_sk_ancestor_cgroup_id = 129, BPF_FUNC_ringbuf_output = 130, BPF_FUNC_ringbuf_reserve = 131, BPF_FUNC_ringbuf_submit = 132, BPF_FUNC_ringbuf_discard = 133, BPF_FUNC_ringbuf_query = 134, BPF_FUNC_csum_level = 135, BPF_FUNC_skc_to_tcp6_sock = 136, BPF_FUNC_skc_to_tcp_sock = 137, BPF_FUNC_skc_to_tcp_timewait_sock = 138, BPF_FUNC_skc_to_tcp_request_sock = 139, BPF_FUNC_skc_to_udp6_sock = 140, BPF_FUNC_get_task_stack = 141, BPF_FUNC_load_hdr_opt = 142, BPF_FUNC_store_hdr_opt = 143, BPF_FUNC_reserve_hdr_opt = 144, BPF_FUNC_inode_storage_get = 145, BPF_FUNC_inode_storage_delete = 146, BPF_FUNC_d_path = 147, BPF_FUNC_copy_from_user = 148, BPF_FUNC_snprintf_btf = 149, BPF_FUNC_seq_printf_btf = 150, BPF_FUNC_skb_cgroup_classid = 151, BPF_FUNC_redirect_neigh = 152, BPF_FUNC_per_cpu_ptr = 153, BPF_FUNC_this_cpu_ptr = 154, BPF_FUNC_redirect_peer = 155, BPF_FUNC_task_storage_get = 156, BPF_FUNC_task_storage_delete = 157, BPF_FUNC_get_current_task_btf = 158, BPF_FUNC_bprm_opts_set = 159, BPF_FUNC_ktime_get_coarse_ns = 160, BPF_FUNC_ima_inode_hash = 161, BPF_FUNC_sock_from_file = 162, BPF_FUNC_check_mtu = 163, BPF_FUNC_for_each_map_elem = 164, BPF_FUNC_snprintf = 165, BPF_FUNC_sys_bpf = 166, BPF_FUNC_btf_find_by_name_kind = 167, BPF_FUNC_sys_close = 168, BPF_FUNC_timer_init = 169, BPF_FUNC_timer_set_callback = 170, BPF_FUNC_timer_start = 171, BPF_FUNC_timer_cancel = 172, BPF_FUNC_get_func_ip = 173, BPF_FUNC_get_attach_cookie = 174, BPF_FUNC_task_pt_regs = 175, BPF_FUNC_get_branch_snapshot = 176, BPF_FUNC_trace_vprintk = 177, BPF_FUNC_skc_to_unix_sock = 178, BPF_FUNC_kallsyms_lookup_name = 179, BPF_FUNC_find_vma = 180, BPF_FUNC_loop = 181, BPF_FUNC_strncmp = 182, BPF_FUNC_get_func_arg = 183, BPF_FUNC_get_func_ret = 184, BPF_FUNC_get_func_arg_cnt = 185, BPF_FUNC_get_retval = 186, BPF_FUNC_set_retval = 187, BPF_FUNC_xdp_get_buff_len = 188, BPF_FUNC_xdp_load_bytes = 189, BPF_FUNC_xdp_store_bytes = 190, BPF_FUNC_copy_from_user_task = 191, BPF_FUNC_skb_set_tstamp = 192, BPF_FUNC_ima_file_hash = 193, BPF_FUNC_kptr_xchg = 194, BPF_FUNC_map_lookup_percpu_elem = 195, BPF_FUNC_skc_to_mptcp_sock = 196, BPF_FUNC_dynptr_from_mem = 197, BPF_FUNC_ringbuf_reserve_dynptr = 198, BPF_FUNC_ringbuf_submit_dynptr = 199, BPF_FUNC_ringbuf_discard_dynptr = 200, BPF_FUNC_dynptr_read = 201, BPF_FUNC_dynptr_write = 202, BPF_FUNC_dynptr_data = 203, BPF_FUNC_tcp_raw_gen_syncookie_ipv4 = 204, BPF_FUNC_tcp_raw_gen_syncookie_ipv6 = 205, BPF_FUNC_tcp_raw_check_syncookie_ipv4 = 206, BPF_FUNC_tcp_raw_check_syncookie_ipv6 = 207, BPF_FUNC_ktime_get_tai_ns = 208, BPF_FUNC_user_ringbuf_drain = 209, BPF_FUNC_cgrp_storage_get = 210, BPF_FUNC_cgrp_storage_delete = 211, __BPF_FUNC_MAX_ID = 212, }; enum bpf_hdr_start_off { BPF_HDR_START_MAC = 0, BPF_HDR_START_NET = 1, }; enum bpf_iter_feature { BPF_ITER_RESCHED = 1, }; enum bpf_iter_state { BPF_ITER_STATE_INVALID = 0, BPF_ITER_STATE_ACTIVE = 1, BPF_ITER_STATE_DRAINED = 2, }; enum bpf_iter_task_type { BPF_TASK_ITER_ALL = 0, BPF_TASK_ITER_TID = 1, BPF_TASK_ITER_TGID = 2, }; enum bpf_jit_poke_reason { BPF_POKE_REASON_TAIL_CALL = 0, }; enum bpf_kfunc_flags { BPF_F_PAD_ZEROS = 1, }; enum bpf_link_type { BPF_LINK_TYPE_UNSPEC = 0, BPF_LINK_TYPE_RAW_TRACEPOINT = 1, BPF_LINK_TYPE_TRACING = 2, BPF_LINK_TYPE_CGROUP = 3, BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, BPF_LINK_TYPE_NETKIT = 13, BPF_LINK_TYPE_SOCKMAP = 14, __MAX_BPF_LINK_TYPE = 15, }; enum bpf_lru_list_type { BPF_LRU_LIST_T_ACTIVE = 0, BPF_LRU_LIST_T_INACTIVE = 1, BPF_LRU_LIST_T_FREE = 2, BPF_LRU_LOCAL_LIST_T_FREE = 3, BPF_LRU_LOCAL_LIST_T_PENDING = 4, }; enum bpf_lwt_encap_mode { BPF_LWT_ENCAP_SEG6 = 0, BPF_LWT_ENCAP_SEG6_INLINE = 1, BPF_LWT_ENCAP_IP = 2, }; enum bpf_map_type { BPF_MAP_TYPE_UNSPEC = 0, BPF_MAP_TYPE_HASH = 1, BPF_MAP_TYPE_ARRAY = 2, BPF_MAP_TYPE_PROG_ARRAY = 3, BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, BPF_MAP_TYPE_PERCPU_HASH = 5, BPF_MAP_TYPE_PERCPU_ARRAY = 6, BPF_MAP_TYPE_STACK_TRACE = 7, BPF_MAP_TYPE_CGROUP_ARRAY = 8, BPF_MAP_TYPE_LRU_HASH = 9, BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, BPF_MAP_TYPE_LPM_TRIE = 11, BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, BPF_MAP_TYPE_HASH_OF_MAPS = 13, BPF_MAP_TYPE_DEVMAP = 14, BPF_MAP_TYPE_SOCKMAP = 15, BPF_MAP_TYPE_CPUMAP = 16, BPF_MAP_TYPE_XSKMAP = 17, BPF_MAP_TYPE_SOCKHASH = 18, BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, BPF_MAP_TYPE_CGROUP_STORAGE = 19, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, BPF_MAP_TYPE_QUEUE = 22, BPF_MAP_TYPE_STACK = 23, BPF_MAP_TYPE_SK_STORAGE = 24, BPF_MAP_TYPE_DEVMAP_HASH = 25, BPF_MAP_TYPE_STRUCT_OPS = 26, BPF_MAP_TYPE_RINGBUF = 27, BPF_MAP_TYPE_INODE_STORAGE = 28, BPF_MAP_TYPE_TASK_STORAGE = 29, BPF_MAP_TYPE_BLOOM_FILTER = 30, BPF_MAP_TYPE_USER_RINGBUF = 31, BPF_MAP_TYPE_CGRP_STORAGE = 32, BPF_MAP_TYPE_ARENA = 33, __MAX_BPF_MAP_TYPE = 34, }; enum bpf_netdev_command { XDP_SETUP_PROG = 0, XDP_SETUP_PROG_HW = 1, BPF_OFFLOAD_MAP_ALLOC = 2, BPF_OFFLOAD_MAP_FREE = 3, XDP_SETUP_XSK_POOL = 4, }; enum bpf_perf_event_type { BPF_PERF_EVENT_UNSPEC = 0, BPF_PERF_EVENT_UPROBE = 1, BPF_PERF_EVENT_URETPROBE = 2, BPF_PERF_EVENT_KPROBE = 3, BPF_PERF_EVENT_KRETPROBE = 4, BPF_PERF_EVENT_TRACEPOINT = 5, BPF_PERF_EVENT_EVENT = 6, }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4, BPF_PROG_TYPE_TRACEPOINT = 5, BPF_PROG_TYPE_XDP = 6, BPF_PROG_TYPE_PERF_EVENT = 7, BPF_PROG_TYPE_CGROUP_SKB = 8, BPF_PROG_TYPE_CGROUP_SOCK = 9, BPF_PROG_TYPE_LWT_IN = 10, BPF_PROG_TYPE_LWT_OUT = 11, BPF_PROG_TYPE_LWT_XMIT = 12, BPF_PROG_TYPE_SOCK_OPS = 13, BPF_PROG_TYPE_SK_SKB = 14, BPF_PROG_TYPE_CGROUP_DEVICE = 15, BPF_PROG_TYPE_SK_MSG = 16, BPF_PROG_TYPE_RAW_TRACEPOINT = 17, BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, BPF_PROG_TYPE_LIRC_MODE2 = 20, BPF_PROG_TYPE_SK_REUSEPORT = 21, BPF_PROG_TYPE_FLOW_DISSECTOR = 22, BPF_PROG_TYPE_CGROUP_SYSCTL = 23, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, BPF_PROG_TYPE_TRACING = 26, BPF_PROG_TYPE_STRUCT_OPS = 27, BPF_PROG_TYPE_EXT = 28, BPF_PROG_TYPE_LSM = 29, BPF_PROG_TYPE_SK_LOOKUP = 30, BPF_PROG_TYPE_SYSCALL = 31, BPF_PROG_TYPE_NETFILTER = 32, __MAX_BPF_PROG_TYPE = 33, }; enum bpf_reg_liveness { REG_LIVE_NONE = 0, REG_LIVE_READ32 = 1, REG_LIVE_READ64 = 2, REG_LIVE_READ = 3, REG_LIVE_WRITTEN = 4, REG_LIVE_DONE = 8, }; enum bpf_reg_type { NOT_INIT = 0, SCALAR_VALUE = 1, PTR_TO_CTX = 2, CONST_PTR_TO_MAP = 3, PTR_TO_MAP_VALUE = 4, PTR_TO_MAP_KEY = 5, PTR_TO_STACK = 6, PTR_TO_PACKET_META = 7, PTR_TO_PACKET = 8, PTR_TO_PACKET_END = 9, PTR_TO_FLOW_KEYS = 10, PTR_TO_SOCKET = 11, PTR_TO_SOCK_COMMON = 12, PTR_TO_TCP_SOCK = 13, PTR_TO_TP_BUFFER = 14, PTR_TO_XDP_SOCK = 15, PTR_TO_BTF_ID = 16, PTR_TO_MEM = 17, PTR_TO_ARENA = 18, PTR_TO_BUF = 19, PTR_TO_FUNC = 20, CONST_PTR_TO_DYNPTR = 21, __BPF_REG_TYPE_MAX = 22, PTR_TO_MAP_VALUE_OR_NULL = 260, PTR_TO_SOCKET_OR_NULL = 267, PTR_TO_SOCK_COMMON_OR_NULL = 268, PTR_TO_TCP_SOCK_OR_NULL = 269, PTR_TO_BTF_ID_OR_NULL = 272, __BPF_REG_TYPE_LIMIT = 67108863, }; enum bpf_ret_code { BPF_OK = 0, BPF_DROP = 2, BPF_REDIRECT = 7, BPF_LWT_REROUTE = 128, BPF_FLOW_DISSECTOR_CONTINUE = 129, }; enum bpf_return_type { RET_INTEGER = 0, RET_VOID = 1, RET_PTR_TO_MAP_VALUE = 2, RET_PTR_TO_SOCKET = 3, RET_PTR_TO_TCP_SOCK = 4, RET_PTR_TO_SOCK_COMMON = 5, RET_PTR_TO_MEM = 6, RET_PTR_TO_MEM_OR_BTF_ID = 7, RET_PTR_TO_BTF_ID = 8, __BPF_RET_TYPE_MAX = 9, RET_PTR_TO_MAP_VALUE_OR_NULL = 258, RET_PTR_TO_SOCKET_OR_NULL = 259, RET_PTR_TO_TCP_SOCK_OR_NULL = 260, RET_PTR_TO_SOCK_COMMON_OR_NULL = 261, RET_PTR_TO_RINGBUF_MEM_OR_NULL = 1286, RET_PTR_TO_DYNPTR_MEM_OR_NULL = 262, RET_PTR_TO_BTF_ID_OR_NULL = 264, RET_PTR_TO_BTF_ID_TRUSTED = 1048584, __BPF_RET_TYPE_LIMIT = 67108863, }; enum bpf_stack_build_id_status { BPF_STACK_BUILD_ID_EMPTY = 0, BPF_STACK_BUILD_ID_VALID = 1, BPF_STACK_BUILD_ID_IP = 2, }; enum bpf_stack_slot_type { STACK_INVALID = 0, STACK_SPILL = 1, STACK_MISC = 2, STACK_ZERO = 3, STACK_DYNPTR = 4, STACK_ITER = 5, }; enum bpf_stats_type { BPF_STATS_RUN_TIME = 0, }; enum bpf_struct_ops_state { BPF_STRUCT_OPS_STATE_INIT = 0, BPF_STRUCT_OPS_STATE_INUSE = 1, BPF_STRUCT_OPS_STATE_TOBEFREE = 2, BPF_STRUCT_OPS_STATE_READY = 3, }; enum bpf_struct_walk_result { WALK_SCALAR = 0, WALK_PTR = 1, WALK_STRUCT = 2, }; enum bpf_task_fd_type { BPF_FD_TYPE_RAW_TRACEPOINT = 0, BPF_FD_TYPE_TRACEPOINT = 1, BPF_FD_TYPE_KPROBE = 2, BPF_FD_TYPE_KRETPROBE = 3, BPF_FD_TYPE_UPROBE = 4, BPF_FD_TYPE_URETPROBE = 5, }; enum bpf_task_vma_iter_find_op { task_vma_iter_first_vma = 0, task_vma_iter_next_vma = 1, task_vma_iter_find_vma = 2, }; enum bpf_text_poke_type { BPF_MOD_CALL = 0, BPF_MOD_JUMP = 1, }; enum bpf_tramp_prog_type { BPF_TRAMP_FENTRY = 0, BPF_TRAMP_FEXIT = 1, BPF_TRAMP_MODIFY_RETURN = 2, BPF_TRAMP_MAX = 3, BPF_TRAMP_REPLACE = 4, }; enum bpf_type { BPF_TYPE_UNSPEC = 0, BPF_TYPE_PROG = 1, BPF_TYPE_MAP = 2, BPF_TYPE_LINK = 3, }; enum bpf_type_flag { PTR_MAYBE_NULL = 256, MEM_RDONLY = 512, MEM_RINGBUF = 1024, MEM_USER = 2048, MEM_PERCPU = 4096, OBJ_RELEASE = 8192, PTR_UNTRUSTED = 16384, MEM_UNINIT = 32768, DYNPTR_TYPE_LOCAL = 65536, DYNPTR_TYPE_RINGBUF = 131072, MEM_FIXED_SIZE = 262144, MEM_ALLOC = 524288, PTR_TRUSTED = 1048576, MEM_RCU = 2097152, NON_OWN_REF = 4194304, DYNPTR_TYPE_SKB = 8388608, DYNPTR_TYPE_XDP = 16777216, MEM_ALIGNED = 33554432, __BPF_TYPE_FLAG_MAX = 33554433, __BPF_TYPE_LAST_FLAG = 33554432, }; enum bpf_xdp_mode { XDP_MODE_SKB = 0, XDP_MODE_DRV = 1, XDP_MODE_HW = 2, __MAX_XDP_MODE = 3, }; enum btf_arg_tag { ARG_TAG_CTX = 1, ARG_TAG_NONNULL = 2, ARG_TAG_TRUSTED = 4, ARG_TAG_NULLABLE = 8, ARG_TAG_ARENA = 16, }; enum btf_field_iter_kind { BTF_FIELD_ITER_IDS = 0, BTF_FIELD_ITER_STRS = 1, }; enum btf_field_type { BPF_SPIN_LOCK = 1, BPF_TIMER = 2, BPF_KPTR_UNREF = 4, BPF_KPTR_REF = 8, BPF_KPTR_PERCPU = 16, BPF_KPTR = 28, BPF_LIST_HEAD = 32, BPF_LIST_NODE = 64, BPF_RB_ROOT = 128, BPF_RB_NODE = 256, BPF_GRAPH_NODE = 320, BPF_GRAPH_ROOT = 160, BPF_REFCOUNT = 512, BPF_WORKQUEUE = 1024, }; enum btf_func_linkage { BTF_FUNC_STATIC = 0, BTF_FUNC_GLOBAL = 1, BTF_FUNC_EXTERN = 2, }; enum btf_kfunc_hook { BTF_KFUNC_HOOK_COMMON = 0, BTF_KFUNC_HOOK_XDP = 1, BTF_KFUNC_HOOK_TC = 2, BTF_KFUNC_HOOK_STRUCT_OPS = 3, BTF_KFUNC_HOOK_TRACING = 4, BTF_KFUNC_HOOK_SYSCALL = 5, BTF_KFUNC_HOOK_FMODRET = 6, BTF_KFUNC_HOOK_CGROUP = 7, BTF_KFUNC_HOOK_SCHED_ACT = 8, BTF_KFUNC_HOOK_SK_SKB = 9, BTF_KFUNC_HOOK_SOCKET_FILTER = 10, BTF_KFUNC_HOOK_LWT = 11, BTF_KFUNC_HOOK_NETFILTER = 12, BTF_KFUNC_HOOK_KPROBE = 13, BTF_KFUNC_HOOK_MAX = 14, }; enum bug_trap_type { BUG_TRAP_TYPE_NONE = 0, BUG_TRAP_TYPE_WARN = 1, BUG_TRAP_TYPE_BUG = 2, }; enum bus_notifier_event { BUS_NOTIFY_ADD_DEVICE = 0, BUS_NOTIFY_DEL_DEVICE = 1, BUS_NOTIFY_REMOVED_DEVICE = 2, BUS_NOTIFY_BIND_DRIVER = 3, BUS_NOTIFY_BOUND_DRIVER = 4, BUS_NOTIFY_UNBIND_DRIVER = 5, BUS_NOTIFY_UNBOUND_DRIVER = 6, BUS_NOTIFY_DRIVER_NOT_BOUND = 7, }; enum cache_type { CACHE_TYPE_NOCACHE = 0, CACHE_TYPE_INST = 1, CACHE_TYPE_DATA = 2, CACHE_TYPE_SEPARATE = 3, CACHE_TYPE_UNIFIED = 4, }; enum cc_attr { CC_ATTR_MEM_ENCRYPT = 0, CC_ATTR_HOST_MEM_ENCRYPT = 1, CC_ATTR_GUEST_MEM_ENCRYPT = 2, CC_ATTR_GUEST_STATE_ENCRYPT = 3, CC_ATTR_GUEST_UNROLL_STRING_IO = 4, CC_ATTR_GUEST_SEV_SNP = 5, CC_ATTR_HOST_SEV_SNP = 6, }; enum cee_attrs { DCB_ATTR_CEE_UNSPEC = 0, DCB_ATTR_CEE_PEER_PG = 1, DCB_ATTR_CEE_PEER_PFC = 2, DCB_ATTR_CEE_PEER_APP_TABLE = 3, DCB_ATTR_CEE_TX_PG = 4, DCB_ATTR_CEE_RX_PG = 5, DCB_ATTR_CEE_PFC = 6, DCB_ATTR_CEE_APP_TABLE = 7, DCB_ATTR_CEE_FEAT = 8, __DCB_ATTR_CEE_MAX = 9, }; enum cfi_mode { CFI_AUTO = 0, CFI_OFF = 1, CFI_KCFI = 2, CFI_FINEIBT = 3, }; enum cgroup1_param { Opt_all = 0, Opt_clone_children = 1, Opt_cpuset_v2_mode = 2, Opt_name = 3, Opt_none = 4, Opt_noprefix = 5, Opt_release_agent = 6, Opt_xattr = 7, Opt_favordynmods = 8, Opt_nofavordynmods = 9, }; enum cgroup2_param { Opt_nsdelegate = 0, Opt_favordynmods___2 = 1, Opt_memory_localevents = 2, Opt_memory_recursiveprot = 3, Opt_memory_hugetlb_accounting = 4, Opt_pids_localevents = 5, nr__cgroup2_params = 6, }; enum cgroup_bpf_attach_type { CGROUP_BPF_ATTACH_TYPE_INVALID = -1, CGROUP_INET_INGRESS = 0, CGROUP_INET_EGRESS = 1, CGROUP_INET_SOCK_CREATE = 2, CGROUP_SOCK_OPS = 3, CGROUP_DEVICE = 4, CGROUP_INET4_BIND = 5, CGROUP_INET6_BIND = 6, CGROUP_INET4_CONNECT = 7, CGROUP_INET6_CONNECT = 8, CGROUP_UNIX_CONNECT = 9, CGROUP_INET4_POST_BIND = 10, CGROUP_INET6_POST_BIND = 11, CGROUP_UDP4_SENDMSG = 12, CGROUP_UDP6_SENDMSG = 13, CGROUP_UNIX_SENDMSG = 14, CGROUP_SYSCTL = 15, CGROUP_UDP4_RECVMSG = 16, CGROUP_UDP6_RECVMSG = 17, CGROUP_UNIX_RECVMSG = 18, CGROUP_GETSOCKOPT = 19, CGROUP_SETSOCKOPT = 20, CGROUP_INET4_GETPEERNAME = 21, CGROUP_INET6_GETPEERNAME = 22, CGROUP_UNIX_GETPEERNAME = 23, CGROUP_INET4_GETSOCKNAME = 24, CGROUP_INET6_GETSOCKNAME = 25, CGROUP_UNIX_GETSOCKNAME = 26, CGROUP_INET_SOCK_RELEASE = 27, CGROUP_LSM_START = 28, CGROUP_LSM_END = 37, MAX_CGROUP_BPF_ATTACH_TYPE = 38, }; enum cgroup_filetype { CGROUP_FILE_PROCS = 0, CGROUP_FILE_TASKS = 1, }; enum cgroup_opt_features { OPT_FEATURE_COUNT = 0, }; enum cgroup_subsys_id { cpuset_cgrp_id = 0, cpu_cgrp_id = 1, cpuacct_cgrp_id = 2, io_cgrp_id = 3, memory_cgrp_id = 4, devices_cgrp_id = 5, freezer_cgrp_id = 6, net_cls_cgrp_id = 7, perf_event_cgrp_id = 8, hugetlb_cgrp_id = 9, CGROUP_SUBSYS_COUNT = 10, }; enum chacha_constants { CHACHA_CONSTANT_EXPA = 1634760805, CHACHA_CONSTANT_ND_3 = 857760878, CHACHA_CONSTANT_2_BY = 2036477234, CHACHA_CONSTANT_TE_K = 1797285236, }; enum chipset_type { NOT_SUPPORTED = 0, SUPPORTED = 1, }; enum cleanup_prefix_rt_t { CLEANUP_PREFIX_RT_NOP = 0, CLEANUP_PREFIX_RT_DEL = 1, CLEANUP_PREFIX_RT_EXPIRE = 2, }; enum clear_refs_types { CLEAR_REFS_ALL = 1, CLEAR_REFS_ANON = 2, CLEAR_REFS_MAPPED = 3, CLEAR_REFS_SOFT_DIRTY = 4, CLEAR_REFS_MM_HIWATER_RSS = 5, CLEAR_REFS_LAST = 6, }; enum clock_event_state { CLOCK_EVT_STATE_DETACHED = 0, CLOCK_EVT_STATE_SHUTDOWN = 1, CLOCK_EVT_STATE_PERIODIC = 2, CLOCK_EVT_STATE_ONESHOT = 3, CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, }; enum clocksource_ids { CSID_GENERIC = 0, CSID_ARM_ARCH_COUNTER = 1, CSID_X86_TSC_EARLY = 2, CSID_X86_TSC = 3, CSID_X86_KVM_CLK = 4, CSID_X86_ART = 5, CSID_MAX = 6, }; enum cmis_cdb_fw_write_mechanism { CMIS_CDB_FW_WRITE_MECHANISM_LPL = 1, CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 17, }; enum compact_priority { COMPACT_PRIO_SYNC_FULL = 0, MIN_COMPACT_PRIORITY = 0, COMPACT_PRIO_SYNC_LIGHT = 1, MIN_COMPACT_COSTLY_PRIORITY = 1, DEF_COMPACT_PRIORITY = 1, COMPACT_PRIO_ASYNC = 2, INIT_COMPACT_PRIORITY = 2, }; enum compact_result { COMPACT_NOT_SUITABLE_ZONE = 0, COMPACT_SKIPPED = 1, COMPACT_DEFERRED = 2, COMPACT_NO_SUITABLE_PAGE = 3, COMPACT_CONTINUE = 4, COMPACT_COMPLETE = 5, COMPACT_PARTIAL_SKIPPED = 6, COMPACT_CONTENDED = 7, COMPACT_SUCCESS = 8, }; enum con_flush_mode { CONSOLE_FLUSH_PENDING = 0, CONSOLE_REPLAY_ALL = 1, }; enum con_msg_format_flags { MSG_FORMAT_DEFAULT = 0, MSG_FORMAT_SYSLOG = 1, }; enum con_scroll { SM_UP = 0, SM_DOWN = 1, }; enum cons_flags { CON_PRINTBUFFER = 1, CON_CONSDEV = 2, CON_ENABLED = 4, CON_BOOT = 8, CON_ANYTIME = 16, CON_BRL = 32, CON_EXTENDED = 64, CON_SUSPENDED = 128, CON_NBCON = 256, }; enum context { IN_KERNEL = 1, IN_USER = 2, IN_KERNEL_RECOV = 3, }; enum cp_error_code { CP_EC = 32767, CP_RET = 1, CP_IRET = 2, CP_ENDBR = 3, CP_RSTRORSSP = 4, CP_SETSSBSY = 5, CP_ENCL = 32768, }; enum cpa_warn { CPA_CONFLICT = 0, CPA_PROTECT = 1, CPA_DETECT = 2, }; enum cpio_fields { C_MAGIC = 0, C_INO = 1, C_MODE = 2, C_UID = 3, C_GID = 4, C_NLINK = 5, C_MTIME = 6, C_FILESIZE = 7, C_MAJ = 8, C_MIN = 9, C_RMAJ = 10, C_RMIN = 11, C_NAMESIZE = 12, C_CHKSUM = 13, C_NFIELDS = 14, }; enum cppc_regs { HIGHEST_PERF = 0, NOMINAL_PERF = 1, LOW_NON_LINEAR_PERF = 2, LOWEST_PERF = 3, GUARANTEED_PERF = 4, DESIRED_PERF = 5, MIN_PERF = 6, MAX_PERF = 7, PERF_REDUC_TOLERANCE = 8, TIME_WINDOW = 9, CTR_WRAP_TIME = 10, REFERENCE_CTR = 11, DELIVERED_CTR = 12, PERF_LIMITED = 13, ENABLE = 14, AUTO_SEL_ENABLE = 15, AUTO_ACT_WINDOW = 16, ENERGY_PERF = 17, REFERENCE_PERF = 18, LOWEST_FREQ = 19, NOMINAL_FREQ = 20, }; enum cpu_idle_type { __CPU_NOT_IDLE = 0, CPU_IDLE = 1, CPU_NEWLY_IDLE = 2, CPU_MAX_IDLE_TYPES = 3, }; enum cpu_mitigations { CPU_MITIGATIONS_OFF = 0, CPU_MITIGATIONS_AUTO = 1, CPU_MITIGATIONS_AUTO_NOSMT = 2, }; enum cpu_usage_stat { CPUTIME_USER = 0, CPUTIME_NICE = 1, CPUTIME_SYSTEM = 2, CPUTIME_SOFTIRQ = 3, CPUTIME_IRQ = 4, CPUTIME_IDLE = 5, CPUTIME_IOWAIT = 6, CPUTIME_STEAL = 7, CPUTIME_GUEST = 8, CPUTIME_GUEST_NICE = 9, NR_STATS = 10, }; enum cpuacct_stat_index { CPUACCT_STAT_USER = 0, CPUACCT_STAT_SYSTEM = 1, CPUACCT_STAT_NSTATS = 2, }; enum cpufreq_table_sorting { CPUFREQ_TABLE_UNSORTED = 0, CPUFREQ_TABLE_SORTED_ASCENDING = 1, CPUFREQ_TABLE_SORTED_DESCENDING = 2, }; enum cpuhp_smt_control { CPU_SMT_ENABLED = 0, CPU_SMT_DISABLED = 1, CPU_SMT_FORCE_DISABLED = 2, CPU_SMT_NOT_SUPPORTED = 3, CPU_SMT_NOT_IMPLEMENTED = 4, }; enum cpuhp_state { CPUHP_INVALID = -1, CPUHP_OFFLINE = 0, CPUHP_CREATE_THREADS = 1, CPUHP_PERF_PREPARE = 2, CPUHP_PERF_X86_PREPARE = 3, CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, CPUHP_PERF_POWER = 5, CPUHP_PERF_SUPERH = 6, CPUHP_X86_HPET_DEAD = 7, CPUHP_X86_MCE_DEAD = 8, CPUHP_VIRT_NET_DEAD = 9, CPUHP_IBMVNIC_DEAD = 10, CPUHP_SLUB_DEAD = 11, CPUHP_DEBUG_OBJ_DEAD = 12, CPUHP_MM_WRITEBACK_DEAD = 13, CPUHP_MM_VMSTAT_DEAD = 14, CPUHP_SOFTIRQ_DEAD = 15, CPUHP_NET_MVNETA_DEAD = 16, CPUHP_CPUIDLE_DEAD = 17, CPUHP_ARM64_FPSIMD_DEAD = 18, CPUHP_ARM_OMAP_WAKE_DEAD = 19, CPUHP_IRQ_POLL_DEAD = 20, CPUHP_BLOCK_SOFTIRQ_DEAD = 21, CPUHP_BIO_DEAD = 22, CPUHP_ACPI_CPUDRV_DEAD = 23, CPUHP_S390_PFAULT_DEAD = 24, CPUHP_BLK_MQ_DEAD = 25, CPUHP_FS_BUFF_DEAD = 26, CPUHP_PRINTK_DEAD = 27, CPUHP_MM_MEMCQ_DEAD = 28, CPUHP_PERCPU_CNT_DEAD = 29, CPUHP_RADIX_DEAD = 30, CPUHP_PAGE_ALLOC = 31, CPUHP_NET_DEV_DEAD = 32, CPUHP_PCI_XGENE_DEAD = 33, CPUHP_IOMMU_IOVA_DEAD = 34, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 35, CPUHP_PADATA_DEAD = 36, CPUHP_AP_DTPM_CPU_DEAD = 37, CPUHP_RANDOM_PREPARE = 38, CPUHP_WORKQUEUE_PREP = 39, CPUHP_POWER_NUMA_PREPARE = 40, CPUHP_HRTIMERS_PREPARE = 41, CPUHP_X2APIC_PREPARE = 42, CPUHP_SMPCFD_PREPARE = 43, CPUHP_RELAY_PREPARE = 44, CPUHP_MD_RAID5_PREPARE = 45, CPUHP_RCUTREE_PREP = 46, CPUHP_CPUIDLE_COUPLED_PREPARE = 47, CPUHP_POWERPC_PMAC_PREPARE = 48, CPUHP_POWERPC_MMU_CTX_PREPARE = 49, CPUHP_XEN_PREPARE = 50, CPUHP_XEN_EVTCHN_PREPARE = 51, CPUHP_ARM_SHMOBILE_SCU_PREPARE = 52, CPUHP_SH_SH3X_PREPARE = 53, CPUHP_TOPOLOGY_PREPARE = 54, CPUHP_NET_IUCV_PREPARE = 55, CPUHP_ARM_BL_PREPARE = 56, CPUHP_TRACE_RB_PREPARE = 57, CPUHP_MM_ZS_PREPARE = 58, CPUHP_MM_ZSWP_POOL_PREPARE = 59, CPUHP_KVM_PPC_BOOK3S_PREPARE = 60, CPUHP_ZCOMP_PREPARE = 61, CPUHP_TIMERS_PREPARE = 62, CPUHP_TMIGR_PREPARE = 63, CPUHP_MIPS_SOC_PREPARE = 64, CPUHP_BP_PREPARE_DYN = 65, CPUHP_BP_PREPARE_DYN_END = 85, CPUHP_BP_KICK_AP = 86, CPUHP_BRINGUP_CPU = 87, CPUHP_AP_IDLE_DEAD = 88, CPUHP_AP_OFFLINE = 89, CPUHP_AP_CACHECTRL_STARTING = 90, CPUHP_AP_SCHED_STARTING = 91, CPUHP_AP_RCUTREE_DYING = 92, CPUHP_AP_CPU_PM_STARTING = 93, CPUHP_AP_IRQ_GIC_STARTING = 94, CPUHP_AP_IRQ_HIP04_STARTING = 95, CPUHP_AP_IRQ_APPLE_AIC_STARTING = 96, CPUHP_AP_IRQ_ARMADA_XP_STARTING = 97, CPUHP_AP_IRQ_BCM2836_STARTING = 98, CPUHP_AP_IRQ_MIPS_GIC_STARTING = 99, CPUHP_AP_IRQ_EIOINTC_STARTING = 100, CPUHP_AP_IRQ_AVECINTC_STARTING = 101, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 102, CPUHP_AP_IRQ_RISCV_IMSIC_STARTING = 103, CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING = 104, CPUHP_AP_ARM_MVEBU_COHERENCY = 105, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 106, CPUHP_AP_PERF_X86_STARTING = 107, CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 108, CPUHP_AP_PERF_XTENSA_STARTING = 109, CPUHP_AP_ARM_VFP_STARTING = 110, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 111, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 112, CPUHP_AP_PERF_ARM_ACPI_STARTING = 113, CPUHP_AP_PERF_ARM_STARTING = 114, CPUHP_AP_PERF_RISCV_STARTING = 115, CPUHP_AP_ARM_L2X0_STARTING = 116, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 117, CPUHP_AP_ARM_ARCH_TIMER_STARTING = 118, CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING = 119, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 120, CPUHP_AP_JCORE_TIMER_STARTING = 121, CPUHP_AP_ARM_TWD_STARTING = 122, CPUHP_AP_QCOM_TIMER_STARTING = 123, CPUHP_AP_TEGRA_TIMER_STARTING = 124, CPUHP_AP_ARMADA_TIMER_STARTING = 125, CPUHP_AP_MIPS_GIC_TIMER_STARTING = 126, CPUHP_AP_ARC_TIMER_STARTING = 127, CPUHP_AP_REALTEK_TIMER_STARTING = 128, CPUHP_AP_RISCV_TIMER_STARTING = 129, CPUHP_AP_CLINT_TIMER_STARTING = 130, CPUHP_AP_CSKY_TIMER_STARTING = 131, CPUHP_AP_TI_GP_TIMER_STARTING = 132, CPUHP_AP_HYPERV_TIMER_STARTING = 133, CPUHP_AP_DUMMY_TIMER_STARTING = 134, CPUHP_AP_ARM_XEN_STARTING = 135, CPUHP_AP_ARM_XEN_RUNSTATE_STARTING = 136, CPUHP_AP_ARM_CORESIGHT_STARTING = 137, CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 138, CPUHP_AP_ARM64_ISNDEP_STARTING = 139, CPUHP_AP_SMPCFD_DYING = 140, CPUHP_AP_HRTIMERS_DYING = 141, CPUHP_AP_TICK_DYING = 142, CPUHP_AP_X86_TBOOT_DYING = 143, CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 144, CPUHP_AP_ONLINE = 145, CPUHP_TEARDOWN_CPU = 146, CPUHP_AP_ONLINE_IDLE = 147, CPUHP_AP_HYPERV_ONLINE = 148, CPUHP_AP_KVM_ONLINE = 149, CPUHP_AP_SCHED_WAIT_EMPTY = 150, CPUHP_AP_SMPBOOT_THREADS = 151, CPUHP_AP_IRQ_AFFINITY_ONLINE = 152, CPUHP_AP_BLK_MQ_ONLINE = 153, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 154, CPUHP_AP_X86_INTEL_EPB_ONLINE = 155, CPUHP_AP_PERF_ONLINE = 156, CPUHP_AP_PERF_X86_ONLINE = 157, CPUHP_AP_PERF_X86_UNCORE_ONLINE = 158, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 159, CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 160, CPUHP_AP_PERF_X86_RAPL_ONLINE = 161, CPUHP_AP_PERF_S390_CF_ONLINE = 162, CPUHP_AP_PERF_S390_SF_ONLINE = 163, CPUHP_AP_PERF_ARM_CCI_ONLINE = 164, CPUHP_AP_PERF_ARM_CCN_ONLINE = 165, CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE = 166, CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 167, CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 168, CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 169, CPUHP_AP_PERF_ARM_HISI_PA_ONLINE = 170, CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE = 171, CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE = 172, CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE = 173, CPUHP_AP_PERF_ARM_L2X0_ONLINE = 174, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 175, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 176, CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 177, CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 178, CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE = 179, CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 180, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 181, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 182, CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 183, CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 184, CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 185, CPUHP_AP_PERF_CSKY_ONLINE = 186, CPUHP_AP_TMIGR_ONLINE = 187, CPUHP_AP_WATCHDOG_ONLINE = 188, CPUHP_AP_WORKQUEUE_ONLINE = 189, CPUHP_AP_RANDOM_ONLINE = 190, CPUHP_AP_RCUTREE_ONLINE = 191, CPUHP_AP_BASE_CACHEINFO_ONLINE = 192, CPUHP_AP_ONLINE_DYN = 193, CPUHP_AP_ONLINE_DYN_END = 233, CPUHP_AP_X86_HPET_ONLINE = 234, CPUHP_AP_X86_KVM_CLK_ONLINE = 235, CPUHP_AP_ACTIVE = 236, CPUHP_ONLINE = 237, }; enum cpuhp_sync_state { SYNC_STATE_DEAD = 0, SYNC_STATE_KICKED = 1, SYNC_STATE_SHOULD_DIE = 2, SYNC_STATE_ALIVE = 3, SYNC_STATE_SHOULD_ONLINE = 4, SYNC_STATE_ONLINE = 5, }; enum cpuid_leafs { CPUID_1_EDX = 0, CPUID_8000_0001_EDX = 1, CPUID_8086_0001_EDX = 2, CPUID_LNX_1 = 3, CPUID_1_ECX = 4, CPUID_C000_0001_EDX = 5, CPUID_8000_0001_ECX = 6, CPUID_LNX_2 = 7, CPUID_LNX_3 = 8, CPUID_7_0_EBX = 9, CPUID_D_1_EAX = 10, CPUID_LNX_4 = 11, CPUID_7_1_EAX = 12, CPUID_8000_0008_EBX = 13, CPUID_6_EAX = 14, CPUID_8000_000A_EDX = 15, CPUID_7_ECX = 16, CPUID_8000_0007_EBX = 17, CPUID_7_EDX = 18, CPUID_8000_001F_EAX = 19, CPUID_8000_0021_EAX = 20, CPUID_LNX_5 = 21, NR_CPUID_WORDS = 22, }; enum cpuid_regs_idx { CPUID_EAX = 0, CPUID_EBX = 1, CPUID_ECX = 2, CPUID_EDX = 3, }; enum crb_cancel { CRB_CANCEL_INVOKE = 1, }; enum crb_ctrl_req { CRB_CTRL_REQ_CMD_READY = 1, CRB_CTRL_REQ_GO_IDLE = 2, }; enum crb_ctrl_sts { CRB_CTRL_STS_ERROR = 1, CRB_CTRL_STS_TPM_IDLE = 2, }; enum crb_defaults { CRB_ACPI_START_REVISION_ID = 1, CRB_ACPI_START_INDEX = 1, }; enum crb_loc_ctrl { CRB_LOC_CTRL_REQUEST_ACCESS = 1, CRB_LOC_CTRL_RELINQUISH = 2, }; enum crb_loc_state { CRB_LOC_STATE_LOC_ASSIGNED = 2, CRB_LOC_STATE_TPM_REG_VALID_STS = 128, }; enum crb_start { CRB_START_INVOKE = 1, }; enum crb_status { CRB_DRV_STS_COMPLETE = 1, }; enum criteria { CR_POWER2_ALIGNED = 0, CR_GOAL_LEN_FAST = 1, CR_BEST_AVAIL_LEN = 2, CR_GOAL_LEN_SLOW = 3, CR_ANY_FREE = 4, EXT4_MB_NUM_CRS = 5, }; enum ct_dccp_roles { CT_DCCP_ROLE_CLIENT = 0, CT_DCCP_ROLE_SERVER = 1, __CT_DCCP_ROLE_MAX = 2, }; enum ct_dccp_states { CT_DCCP_NONE = 0, CT_DCCP_REQUEST = 1, CT_DCCP_RESPOND = 2, CT_DCCP_PARTOPEN = 3, CT_DCCP_OPEN = 4, CT_DCCP_CLOSEREQ = 5, CT_DCCP_CLOSING = 6, CT_DCCP_TIMEWAIT = 7, CT_DCCP_IGNORE = 8, CT_DCCP_INVALID = 9, __CT_DCCP_MAX = 10, }; enum cti_port_type { CTI_PORT_TYPE_NONE = 0, CTI_PORT_TYPE_RS232 = 1, CTI_PORT_TYPE_RS422_485 = 2, CTI_PORT_TYPE_RS232_422_485_HW = 3, CTI_PORT_TYPE_RS232_422_485_SW = 4, CTI_PORT_TYPE_RS232_422_485_4B = 5, CTI_PORT_TYPE_RS232_422_485_2B = 6, CTI_PORT_TYPE_MAX = 7, }; enum ctx_state { CT_STATE_DISABLED = -1, CT_STATE_KERNEL = 0, CT_STATE_IDLE = 1, CT_STATE_USER = 2, CT_STATE_GUEST = 3, CT_STATE_MAX = 4, }; enum d_real_type { D_REAL_DATA = 0, D_REAL_METADATA = 1, }; enum d_walk_ret { D_WALK_CONTINUE = 0, D_WALK_QUIT = 1, D_WALK_NORETRY = 2, D_WALK_SKIP = 3, }; enum data_formats { DATA_FMT_DIGEST = 0, DATA_FMT_DIGEST_WITH_ALGO = 1, DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO = 2, DATA_FMT_STRING = 3, DATA_FMT_HEX = 4, DATA_FMT_UINT = 5, }; enum dcb_general_attr_values { DCB_ATTR_VALUE_UNDEFINED = 255, }; enum dcbevent_notif_type { DCB_APP_EVENT = 1, }; enum dcbnl_app_attrs { DCB_APP_ATTR_UNDEFINED = 0, DCB_APP_ATTR_IDTYPE = 1, DCB_APP_ATTR_ID = 2, DCB_APP_ATTR_PRIORITY = 3, __DCB_APP_ATTR_ENUM_MAX = 4, DCB_APP_ATTR_MAX = 3, }; enum dcbnl_attrs { DCB_ATTR_UNDEFINED = 0, DCB_ATTR_IFNAME = 1, DCB_ATTR_STATE = 2, DCB_ATTR_PFC_STATE = 3, DCB_ATTR_PFC_CFG = 4, DCB_ATTR_NUM_TC = 5, DCB_ATTR_PG_CFG = 6, DCB_ATTR_SET_ALL = 7, DCB_ATTR_PERM_HWADDR = 8, DCB_ATTR_CAP = 9, DCB_ATTR_NUMTCS = 10, DCB_ATTR_BCN = 11, DCB_ATTR_APP = 12, DCB_ATTR_IEEE = 13, DCB_ATTR_DCBX = 14, DCB_ATTR_FEATCFG = 15, DCB_ATTR_CEE = 16, __DCB_ATTR_ENUM_MAX = 17, DCB_ATTR_MAX = 16, }; enum dcbnl_bcn_attrs { DCB_BCN_ATTR_UNDEFINED = 0, DCB_BCN_ATTR_RP_0 = 1, DCB_BCN_ATTR_RP_1 = 2, DCB_BCN_ATTR_RP_2 = 3, DCB_BCN_ATTR_RP_3 = 4, DCB_BCN_ATTR_RP_4 = 5, DCB_BCN_ATTR_RP_5 = 6, DCB_BCN_ATTR_RP_6 = 7, DCB_BCN_ATTR_RP_7 = 8, DCB_BCN_ATTR_RP_ALL = 9, DCB_BCN_ATTR_BCNA_0 = 10, DCB_BCN_ATTR_BCNA_1 = 11, DCB_BCN_ATTR_ALPHA = 12, DCB_BCN_ATTR_BETA = 13, DCB_BCN_ATTR_GD = 14, DCB_BCN_ATTR_GI = 15, DCB_BCN_ATTR_TMAX = 16, DCB_BCN_ATTR_TD = 17, DCB_BCN_ATTR_RMIN = 18, DCB_BCN_ATTR_W = 19, DCB_BCN_ATTR_RD = 20, DCB_BCN_ATTR_RU = 21, DCB_BCN_ATTR_WRTT = 22, DCB_BCN_ATTR_RI = 23, DCB_BCN_ATTR_C = 24, DCB_BCN_ATTR_ALL = 25, __DCB_BCN_ATTR_ENUM_MAX = 26, DCB_BCN_ATTR_MAX = 25, }; enum dcbnl_cap_attrs { DCB_CAP_ATTR_UNDEFINED = 0, DCB_CAP_ATTR_ALL = 1, DCB_CAP_ATTR_PG = 2, DCB_CAP_ATTR_PFC = 3, DCB_CAP_ATTR_UP2TC = 4, DCB_CAP_ATTR_PG_TCS = 5, DCB_CAP_ATTR_PFC_TCS = 6, DCB_CAP_ATTR_GSP = 7, DCB_CAP_ATTR_BCN = 8, DCB_CAP_ATTR_DCBX = 9, __DCB_CAP_ATTR_ENUM_MAX = 10, DCB_CAP_ATTR_MAX = 9, }; enum dcbnl_commands { DCB_CMD_UNDEFINED = 0, DCB_CMD_GSTATE = 1, DCB_CMD_SSTATE = 2, DCB_CMD_PGTX_GCFG = 3, DCB_CMD_PGTX_SCFG = 4, DCB_CMD_PGRX_GCFG = 5, DCB_CMD_PGRX_SCFG = 6, DCB_CMD_PFC_GCFG = 7, DCB_CMD_PFC_SCFG = 8, DCB_CMD_SET_ALL = 9, DCB_CMD_GPERM_HWADDR = 10, DCB_CMD_GCAP = 11, DCB_CMD_GNUMTCS = 12, DCB_CMD_SNUMTCS = 13, DCB_CMD_PFC_GSTATE = 14, DCB_CMD_PFC_SSTATE = 15, DCB_CMD_BCN_GCFG = 16, DCB_CMD_BCN_SCFG = 17, DCB_CMD_GAPP = 18, DCB_CMD_SAPP = 19, DCB_CMD_IEEE_SET = 20, DCB_CMD_IEEE_GET = 21, DCB_CMD_GDCBX = 22, DCB_CMD_SDCBX = 23, DCB_CMD_GFEATCFG = 24, DCB_CMD_SFEATCFG = 25, DCB_CMD_CEE_GET = 26, DCB_CMD_IEEE_DEL = 27, __DCB_CMD_ENUM_MAX = 28, DCB_CMD_MAX = 27, }; enum dcbnl_featcfg_attrs { DCB_FEATCFG_ATTR_UNDEFINED = 0, DCB_FEATCFG_ATTR_ALL = 1, DCB_FEATCFG_ATTR_PG = 2, DCB_FEATCFG_ATTR_PFC = 3, DCB_FEATCFG_ATTR_APP = 4, __DCB_FEATCFG_ATTR_ENUM_MAX = 5, DCB_FEATCFG_ATTR_MAX = 4, }; enum dcbnl_numtcs_attrs { DCB_NUMTCS_ATTR_UNDEFINED = 0, DCB_NUMTCS_ATTR_ALL = 1, DCB_NUMTCS_ATTR_PG = 2, DCB_NUMTCS_ATTR_PFC = 3, __DCB_NUMTCS_ATTR_ENUM_MAX = 4, DCB_NUMTCS_ATTR_MAX = 3, }; enum dcbnl_pfc_up_attrs { DCB_PFC_UP_ATTR_UNDEFINED = 0, DCB_PFC_UP_ATTR_0 = 1, DCB_PFC_UP_ATTR_1 = 2, DCB_PFC_UP_ATTR_2 = 3, DCB_PFC_UP_ATTR_3 = 4, DCB_PFC_UP_ATTR_4 = 5, DCB_PFC_UP_ATTR_5 = 6, DCB_PFC_UP_ATTR_6 = 7, DCB_PFC_UP_ATTR_7 = 8, DCB_PFC_UP_ATTR_ALL = 9, __DCB_PFC_UP_ATTR_ENUM_MAX = 10, DCB_PFC_UP_ATTR_MAX = 9, }; enum dcbnl_pg_attrs { DCB_PG_ATTR_UNDEFINED = 0, DCB_PG_ATTR_TC_0 = 1, DCB_PG_ATTR_TC_1 = 2, DCB_PG_ATTR_TC_2 = 3, DCB_PG_ATTR_TC_3 = 4, DCB_PG_ATTR_TC_4 = 5, DCB_PG_ATTR_TC_5 = 6, DCB_PG_ATTR_TC_6 = 7, DCB_PG_ATTR_TC_7 = 8, DCB_PG_ATTR_TC_MAX = 9, DCB_PG_ATTR_TC_ALL = 10, DCB_PG_ATTR_BW_ID_0 = 11, DCB_PG_ATTR_BW_ID_1 = 12, DCB_PG_ATTR_BW_ID_2 = 13, DCB_PG_ATTR_BW_ID_3 = 14, DCB_PG_ATTR_BW_ID_4 = 15, DCB_PG_ATTR_BW_ID_5 = 16, DCB_PG_ATTR_BW_ID_6 = 17, DCB_PG_ATTR_BW_ID_7 = 18, DCB_PG_ATTR_BW_ID_MAX = 19, DCB_PG_ATTR_BW_ID_ALL = 20, __DCB_PG_ATTR_ENUM_MAX = 21, DCB_PG_ATTR_MAX = 20, }; enum dcbnl_tc_attrs { DCB_TC_ATTR_PARAM_UNDEFINED = 0, DCB_TC_ATTR_PARAM_PGID = 1, DCB_TC_ATTR_PARAM_UP_MAPPING = 2, DCB_TC_ATTR_PARAM_STRICT_PRIO = 3, DCB_TC_ATTR_PARAM_BW_PCT = 4, DCB_TC_ATTR_PARAM_ALL = 5, __DCB_TC_ATTR_PARAM_ENUM_MAX = 6, DCB_TC_ATTR_PARAM_MAX = 5, }; enum dccp_pkt_type { DCCP_PKT_REQUEST = 0, DCCP_PKT_RESPONSE = 1, DCCP_PKT_DATA = 2, DCCP_PKT_ACK = 3, DCCP_PKT_DATAACK = 4, DCCP_PKT_CLOSEREQ = 5, DCCP_PKT_CLOSE = 6, DCCP_PKT_RESET = 7, DCCP_PKT_SYNC = 8, DCCP_PKT_SYNCACK = 9, DCCP_PKT_INVALID = 10, }; enum dccp_state { DCCP_OPEN = 1, DCCP_REQUESTING = 2, DCCP_LISTEN = 10, DCCP_RESPOND = 3, DCCP_ACTIVE_CLOSEREQ = 4, DCCP_PASSIVE_CLOSE = 8, DCCP_CLOSING = 11, DCCP_TIME_WAIT = 6, DCCP_CLOSED = 7, DCCP_NEW_SYN_RECV = 12, DCCP_PARTOPEN = 14, DCCP_PASSIVE_CLOSEREQ = 15, DCCP_MAX_STATES = 16, }; enum dd_data_dir { DD_READ = 0, DD_WRITE = 1, }; enum dd_prio { DD_RT_PRIO = 0, DD_BE_PRIO = 1, DD_IDLE_PRIO = 2, DD_PRIO_MAX = 2, }; enum dentry_d_lock_class { DENTRY_D_LOCK_NORMAL = 0, DENTRY_D_LOCK_NESTED = 1, }; enum depot_counter_id { DEPOT_COUNTER_REFD_ALLOCS = 0, DEPOT_COUNTER_REFD_FREES = 1, DEPOT_COUNTER_REFD_INUSE = 2, DEPOT_COUNTER_FREELIST_SIZE = 3, DEPOT_COUNTER_PERSIST_COUNT = 4, DEPOT_COUNTER_PERSIST_BYTES = 5, DEPOT_COUNTER_COUNT = 6, }; enum desc_state { desc_miss = -1, desc_reserved = 0, desc_committed = 1, desc_finalized = 2, desc_reusable = 3, }; enum dev_dma_attr { DEV_DMA_NOT_SUPPORTED = 0, DEV_DMA_NON_COHERENT = 1, DEV_DMA_COHERENT = 2, }; enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE = 2, DEV_PM_QOS_MIN_FREQUENCY = 3, DEV_PM_QOS_MAX_FREQUENCY = 4, DEV_PM_QOS_FLAGS = 5, }; enum dev_prop_type { DEV_PROP_U8 = 0, DEV_PROP_U16 = 1, DEV_PROP_U32 = 2, DEV_PROP_U64 = 3, DEV_PROP_STRING = 4, DEV_PROP_REF = 5, }; enum devcg_behavior { DEVCG_DEFAULT_NONE = 0, DEVCG_DEFAULT_ALLOW = 1, DEVCG_DEFAULT_DENY = 2, }; enum device_link_state { DL_STATE_NONE = -1, DL_STATE_DORMANT = 0, DL_STATE_AVAILABLE = 1, DL_STATE_CONSUMER_PROBE = 2, DL_STATE_ACTIVE = 3, DL_STATE_SUPPLIER_UNBIND = 4, }; enum device_physical_location_horizontal_position { DEVICE_HORI_POS_LEFT = 0, DEVICE_HORI_POS_CENTER = 1, DEVICE_HORI_POS_RIGHT = 2, }; enum device_physical_location_panel { DEVICE_PANEL_TOP = 0, DEVICE_PANEL_BOTTOM = 1, DEVICE_PANEL_LEFT = 2, DEVICE_PANEL_RIGHT = 3, DEVICE_PANEL_FRONT = 4, DEVICE_PANEL_BACK = 5, DEVICE_PANEL_UNKNOWN = 6, }; enum device_physical_location_vertical_position { DEVICE_VERT_POS_UPPER = 0, DEVICE_VERT_POS_CENTER = 1, DEVICE_VERT_POS_LOWER = 2, }; enum device_removable { DEVICE_REMOVABLE_NOT_SUPPORTED = 0, DEVICE_REMOVABLE_UNKNOWN = 1, DEVICE_FIXED = 2, DEVICE_REMOVABLE = 3, }; enum devkmsg_log_bits { __DEVKMSG_LOG_BIT_ON = 0, __DEVKMSG_LOG_BIT_OFF = 1, __DEVKMSG_LOG_BIT_LOCK = 2, }; enum devkmsg_log_masks { DEVKMSG_LOG_MASK_ON = 1, DEVKMSG_LOG_MASK_OFF = 2, DEVKMSG_LOG_MASK_LOCK = 4, }; enum devlink_attr { DEVLINK_ATTR_UNSPEC = 0, DEVLINK_ATTR_BUS_NAME = 1, DEVLINK_ATTR_DEV_NAME = 2, DEVLINK_ATTR_PORT_INDEX = 3, DEVLINK_ATTR_PORT_TYPE = 4, DEVLINK_ATTR_PORT_DESIRED_TYPE = 5, DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6, DEVLINK_ATTR_PORT_NETDEV_NAME = 7, DEVLINK_ATTR_PORT_IBDEV_NAME = 8, DEVLINK_ATTR_PORT_SPLIT_COUNT = 9, DEVLINK_ATTR_PORT_SPLIT_GROUP = 10, DEVLINK_ATTR_SB_INDEX = 11, DEVLINK_ATTR_SB_SIZE = 12, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT = 13, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT = 14, DEVLINK_ATTR_SB_INGRESS_TC_COUNT = 15, DEVLINK_ATTR_SB_EGRESS_TC_COUNT = 16, DEVLINK_ATTR_SB_POOL_INDEX = 17, DEVLINK_ATTR_SB_POOL_TYPE = 18, DEVLINK_ATTR_SB_POOL_SIZE = 19, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE = 20, DEVLINK_ATTR_SB_THRESHOLD = 21, DEVLINK_ATTR_SB_TC_INDEX = 22, DEVLINK_ATTR_SB_OCC_CUR = 23, DEVLINK_ATTR_SB_OCC_MAX = 24, DEVLINK_ATTR_ESWITCH_MODE = 25, DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26, DEVLINK_ATTR_DPIPE_TABLES = 27, DEVLINK_ATTR_DPIPE_TABLE = 28, DEVLINK_ATTR_DPIPE_TABLE_NAME = 29, DEVLINK_ATTR_DPIPE_TABLE_SIZE = 30, DEVLINK_ATTR_DPIPE_TABLE_MATCHES = 31, DEVLINK_ATTR_DPIPE_TABLE_ACTIONS = 32, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED = 33, DEVLINK_ATTR_DPIPE_ENTRIES = 34, DEVLINK_ATTR_DPIPE_ENTRY = 35, DEVLINK_ATTR_DPIPE_ENTRY_INDEX = 36, DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES = 37, DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES = 38, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER = 39, DEVLINK_ATTR_DPIPE_MATCH = 40, DEVLINK_ATTR_DPIPE_MATCH_VALUE = 41, DEVLINK_ATTR_DPIPE_MATCH_TYPE = 42, DEVLINK_ATTR_DPIPE_ACTION = 43, DEVLINK_ATTR_DPIPE_ACTION_VALUE = 44, DEVLINK_ATTR_DPIPE_ACTION_TYPE = 45, DEVLINK_ATTR_DPIPE_VALUE = 46, DEVLINK_ATTR_DPIPE_VALUE_MASK = 47, DEVLINK_ATTR_DPIPE_VALUE_MAPPING = 48, DEVLINK_ATTR_DPIPE_HEADERS = 49, DEVLINK_ATTR_DPIPE_HEADER = 50, DEVLINK_ATTR_DPIPE_HEADER_NAME = 51, DEVLINK_ATTR_DPIPE_HEADER_ID = 52, DEVLINK_ATTR_DPIPE_HEADER_FIELDS = 53, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL = 54, DEVLINK_ATTR_DPIPE_HEADER_INDEX = 55, DEVLINK_ATTR_DPIPE_FIELD = 56, DEVLINK_ATTR_DPIPE_FIELD_NAME = 57, DEVLINK_ATTR_DPIPE_FIELD_ID = 58, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH = 59, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 60, DEVLINK_ATTR_PAD = 61, DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62, DEVLINK_ATTR_RESOURCE_LIST = 63, DEVLINK_ATTR_RESOURCE = 64, DEVLINK_ATTR_RESOURCE_NAME = 65, DEVLINK_ATTR_RESOURCE_ID = 66, DEVLINK_ATTR_RESOURCE_SIZE = 67, DEVLINK_ATTR_RESOURCE_SIZE_NEW = 68, DEVLINK_ATTR_RESOURCE_SIZE_VALID = 69, DEVLINK_ATTR_RESOURCE_SIZE_MIN = 70, DEVLINK_ATTR_RESOURCE_SIZE_MAX = 71, DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 72, DEVLINK_ATTR_RESOURCE_UNIT = 73, DEVLINK_ATTR_RESOURCE_OCC = 74, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 75, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 76, DEVLINK_ATTR_PORT_FLAVOUR = 77, DEVLINK_ATTR_PORT_NUMBER = 78, DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER = 79, DEVLINK_ATTR_PARAM = 80, DEVLINK_ATTR_PARAM_NAME = 81, DEVLINK_ATTR_PARAM_GENERIC = 82, DEVLINK_ATTR_PARAM_TYPE = 83, DEVLINK_ATTR_PARAM_VALUES_LIST = 84, DEVLINK_ATTR_PARAM_VALUE = 85, DEVLINK_ATTR_PARAM_VALUE_DATA = 86, DEVLINK_ATTR_PARAM_VALUE_CMODE = 87, DEVLINK_ATTR_REGION_NAME = 88, DEVLINK_ATTR_REGION_SIZE = 89, DEVLINK_ATTR_REGION_SNAPSHOTS = 90, DEVLINK_ATTR_REGION_SNAPSHOT = 91, DEVLINK_ATTR_REGION_SNAPSHOT_ID = 92, DEVLINK_ATTR_REGION_CHUNKS = 93, DEVLINK_ATTR_REGION_CHUNK = 94, DEVLINK_ATTR_REGION_CHUNK_DATA = 95, DEVLINK_ATTR_REGION_CHUNK_ADDR = 96, DEVLINK_ATTR_REGION_CHUNK_LEN = 97, DEVLINK_ATTR_INFO_DRIVER_NAME = 98, DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99, DEVLINK_ATTR_INFO_VERSION_FIXED = 100, DEVLINK_ATTR_INFO_VERSION_RUNNING = 101, DEVLINK_ATTR_INFO_VERSION_STORED = 102, DEVLINK_ATTR_INFO_VERSION_NAME = 103, DEVLINK_ATTR_INFO_VERSION_VALUE = 104, DEVLINK_ATTR_SB_POOL_CELL_SIZE = 105, DEVLINK_ATTR_FMSG = 106, DEVLINK_ATTR_FMSG_OBJ_NEST_START = 107, DEVLINK_ATTR_FMSG_PAIR_NEST_START = 108, DEVLINK_ATTR_FMSG_ARR_NEST_START = 109, DEVLINK_ATTR_FMSG_NEST_END = 110, DEVLINK_ATTR_FMSG_OBJ_NAME = 111, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE = 112, DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA = 113, DEVLINK_ATTR_HEALTH_REPORTER = 114, DEVLINK_ATTR_HEALTH_REPORTER_NAME = 115, DEVLINK_ATTR_HEALTH_REPORTER_STATE = 116, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT = 117, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT = 118, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS = 119, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD = 120, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER = 121, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME = 122, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT = 123, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG = 124, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE = 125, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL = 126, DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127, DEVLINK_ATTR_PORT_PCI_VF_NUMBER = 128, DEVLINK_ATTR_STATS = 129, DEVLINK_ATTR_TRAP_NAME = 130, DEVLINK_ATTR_TRAP_ACTION = 131, DEVLINK_ATTR_TRAP_TYPE = 132, DEVLINK_ATTR_TRAP_GENERIC = 133, DEVLINK_ATTR_TRAP_METADATA = 134, DEVLINK_ATTR_TRAP_GROUP_NAME = 135, DEVLINK_ATTR_RELOAD_FAILED = 136, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS = 137, DEVLINK_ATTR_NETNS_FD = 138, DEVLINK_ATTR_NETNS_PID = 139, DEVLINK_ATTR_NETNS_ID = 140, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP = 141, DEVLINK_ATTR_TRAP_POLICER_ID = 142, DEVLINK_ATTR_TRAP_POLICER_RATE = 143, DEVLINK_ATTR_TRAP_POLICER_BURST = 144, DEVLINK_ATTR_PORT_FUNCTION = 145, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER = 146, DEVLINK_ATTR_PORT_LANES = 147, DEVLINK_ATTR_PORT_SPLITTABLE = 148, DEVLINK_ATTR_PORT_EXTERNAL = 149, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT = 151, DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK = 152, DEVLINK_ATTR_RELOAD_ACTION = 153, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED = 154, DEVLINK_ATTR_RELOAD_LIMITS = 155, DEVLINK_ATTR_DEV_STATS = 156, DEVLINK_ATTR_RELOAD_STATS = 157, DEVLINK_ATTR_RELOAD_STATS_ENTRY = 158, DEVLINK_ATTR_RELOAD_STATS_LIMIT = 159, DEVLINK_ATTR_RELOAD_STATS_VALUE = 160, DEVLINK_ATTR_REMOTE_RELOAD_STATS = 161, DEVLINK_ATTR_RELOAD_ACTION_INFO = 162, DEVLINK_ATTR_RELOAD_ACTION_STATS = 163, DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164, DEVLINK_ATTR_RATE_TYPE = 165, DEVLINK_ATTR_RATE_TX_SHARE = 166, DEVLINK_ATTR_RATE_TX_MAX = 167, DEVLINK_ATTR_RATE_NODE_NAME = 168, DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 169, DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 170, DEVLINK_ATTR_LINECARD_INDEX = 171, DEVLINK_ATTR_LINECARD_STATE = 172, DEVLINK_ATTR_LINECARD_TYPE = 173, DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 174, DEVLINK_ATTR_NESTED_DEVLINK = 175, DEVLINK_ATTR_SELFTESTS = 176, DEVLINK_ATTR_RATE_TX_PRIORITY = 177, DEVLINK_ATTR_RATE_TX_WEIGHT = 178, DEVLINK_ATTR_REGION_DIRECT = 179, __DEVLINK_ATTR_MAX = 180, DEVLINK_ATTR_MAX = 179, }; enum devlink_attr_selftest_id { DEVLINK_ATTR_SELFTEST_ID_UNSPEC = 0, DEVLINK_ATTR_SELFTEST_ID_FLASH = 1, __DEVLINK_ATTR_SELFTEST_ID_MAX = 2, DEVLINK_ATTR_SELFTEST_ID_MAX = 1, }; enum devlink_attr_selftest_result { DEVLINK_ATTR_SELFTEST_RESULT_UNSPEC = 0, DEVLINK_ATTR_SELFTEST_RESULT = 1, DEVLINK_ATTR_SELFTEST_RESULT_ID = 2, DEVLINK_ATTR_SELFTEST_RESULT_STATUS = 3, __DEVLINK_ATTR_SELFTEST_RESULT_MAX = 4, DEVLINK_ATTR_SELFTEST_RESULT_MAX = 3, }; enum devlink_command { DEVLINK_CMD_UNSPEC = 0, DEVLINK_CMD_GET = 1, DEVLINK_CMD_SET = 2, DEVLINK_CMD_NEW = 3, DEVLINK_CMD_DEL = 4, DEVLINK_CMD_PORT_GET = 5, DEVLINK_CMD_PORT_SET = 6, DEVLINK_CMD_PORT_NEW = 7, DEVLINK_CMD_PORT_DEL = 8, DEVLINK_CMD_PORT_SPLIT = 9, DEVLINK_CMD_PORT_UNSPLIT = 10, DEVLINK_CMD_SB_GET = 11, DEVLINK_CMD_SB_SET = 12, DEVLINK_CMD_SB_NEW = 13, DEVLINK_CMD_SB_DEL = 14, DEVLINK_CMD_SB_POOL_GET = 15, DEVLINK_CMD_SB_POOL_SET = 16, DEVLINK_CMD_SB_POOL_NEW = 17, DEVLINK_CMD_SB_POOL_DEL = 18, DEVLINK_CMD_SB_PORT_POOL_GET = 19, DEVLINK_CMD_SB_PORT_POOL_SET = 20, DEVLINK_CMD_SB_PORT_POOL_NEW = 21, DEVLINK_CMD_SB_PORT_POOL_DEL = 22, DEVLINK_CMD_SB_TC_POOL_BIND_GET = 23, DEVLINK_CMD_SB_TC_POOL_BIND_SET = 24, DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 25, DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 26, DEVLINK_CMD_SB_OCC_SNAPSHOT = 27, DEVLINK_CMD_SB_OCC_MAX_CLEAR = 28, DEVLINK_CMD_ESWITCH_GET = 29, DEVLINK_CMD_ESWITCH_SET = 30, DEVLINK_CMD_DPIPE_TABLE_GET = 31, DEVLINK_CMD_DPIPE_ENTRIES_GET = 32, DEVLINK_CMD_DPIPE_HEADERS_GET = 33, DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 34, DEVLINK_CMD_RESOURCE_SET = 35, DEVLINK_CMD_RESOURCE_DUMP = 36, DEVLINK_CMD_RELOAD = 37, DEVLINK_CMD_PARAM_GET = 38, DEVLINK_CMD_PARAM_SET = 39, DEVLINK_CMD_PARAM_NEW = 40, DEVLINK_CMD_PARAM_DEL = 41, DEVLINK_CMD_REGION_GET = 42, DEVLINK_CMD_REGION_SET = 43, DEVLINK_CMD_REGION_NEW = 44, DEVLINK_CMD_REGION_DEL = 45, DEVLINK_CMD_REGION_READ = 46, DEVLINK_CMD_PORT_PARAM_GET = 47, DEVLINK_CMD_PORT_PARAM_SET = 48, DEVLINK_CMD_PORT_PARAM_NEW = 49, DEVLINK_CMD_PORT_PARAM_DEL = 50, DEVLINK_CMD_INFO_GET = 51, DEVLINK_CMD_HEALTH_REPORTER_GET = 52, DEVLINK_CMD_HEALTH_REPORTER_SET = 53, DEVLINK_CMD_HEALTH_REPORTER_RECOVER = 54, DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE = 55, DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET = 56, DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR = 57, DEVLINK_CMD_FLASH_UPDATE = 58, DEVLINK_CMD_FLASH_UPDATE_END = 59, DEVLINK_CMD_FLASH_UPDATE_STATUS = 60, DEVLINK_CMD_TRAP_GET = 61, DEVLINK_CMD_TRAP_SET = 62, DEVLINK_CMD_TRAP_NEW = 63, DEVLINK_CMD_TRAP_DEL = 64, DEVLINK_CMD_TRAP_GROUP_GET = 65, DEVLINK_CMD_TRAP_GROUP_SET = 66, DEVLINK_CMD_TRAP_GROUP_NEW = 67, DEVLINK_CMD_TRAP_GROUP_DEL = 68, DEVLINK_CMD_TRAP_POLICER_GET = 69, DEVLINK_CMD_TRAP_POLICER_SET = 70, DEVLINK_CMD_TRAP_POLICER_NEW = 71, DEVLINK_CMD_TRAP_POLICER_DEL = 72, DEVLINK_CMD_HEALTH_REPORTER_TEST = 73, DEVLINK_CMD_RATE_GET = 74, DEVLINK_CMD_RATE_SET = 75, DEVLINK_CMD_RATE_NEW = 76, DEVLINK_CMD_RATE_DEL = 77, DEVLINK_CMD_LINECARD_GET = 78, DEVLINK_CMD_LINECARD_SET = 79, DEVLINK_CMD_LINECARD_NEW = 80, DEVLINK_CMD_LINECARD_DEL = 81, DEVLINK_CMD_SELFTESTS_GET = 82, DEVLINK_CMD_SELFTESTS_RUN = 83, DEVLINK_CMD_NOTIFY_FILTER_SET = 84, __DEVLINK_CMD_MAX = 85, DEVLINK_CMD_MAX = 84, }; enum devlink_dpipe_action_type { DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY = 0, }; enum devlink_dpipe_field_ethernet_id { DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC = 0, }; enum devlink_dpipe_field_ipv4_id { DEVLINK_DPIPE_FIELD_IPV4_DST_IP = 0, }; enum devlink_dpipe_field_ipv6_id { DEVLINK_DPIPE_FIELD_IPV6_DST_IP = 0, }; enum devlink_dpipe_field_mapping_type { DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0, DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 1, }; enum devlink_dpipe_header_id { DEVLINK_DPIPE_HEADER_ETHERNET = 0, DEVLINK_DPIPE_HEADER_IPV4 = 1, DEVLINK_DPIPE_HEADER_IPV6 = 2, }; enum devlink_dpipe_match_type { DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0, }; enum devlink_eswitch_encap_mode { DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0, DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1, }; enum devlink_eswitch_mode { DEVLINK_ESWITCH_MODE_LEGACY = 0, DEVLINK_ESWITCH_MODE_SWITCHDEV = 1, }; enum devlink_health_reporter_state { DEVLINK_HEALTH_REPORTER_STATE_HEALTHY = 0, DEVLINK_HEALTH_REPORTER_STATE_ERROR = 1, }; enum devlink_info_version_type { DEVLINK_INFO_VERSION_TYPE_NONE = 0, DEVLINK_INFO_VERSION_TYPE_COMPONENT = 1, }; enum devlink_linecard_state { DEVLINK_LINECARD_STATE_UNSPEC = 0, DEVLINK_LINECARD_STATE_UNPROVISIONED = 1, DEVLINK_LINECARD_STATE_UNPROVISIONING = 2, DEVLINK_LINECARD_STATE_PROVISIONING = 3, DEVLINK_LINECARD_STATE_PROVISIONING_FAILED = 4, DEVLINK_LINECARD_STATE_PROVISIONED = 5, DEVLINK_LINECARD_STATE_ACTIVE = 6, __DEVLINK_LINECARD_STATE_MAX = 7, DEVLINK_LINECARD_STATE_MAX = 6, }; enum devlink_multicast_groups { DEVLINK_MCGRP_CONFIG = 0, }; enum devlink_param_cmode { DEVLINK_PARAM_CMODE_RUNTIME = 0, DEVLINK_PARAM_CMODE_DRIVERINIT = 1, DEVLINK_PARAM_CMODE_PERMANENT = 2, __DEVLINK_PARAM_CMODE_MAX = 3, DEVLINK_PARAM_CMODE_MAX = 2, }; enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET = 0, DEVLINK_PARAM_GENERIC_ID_MAX_MACS = 1, DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV = 2, DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT = 3, DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI = 4, DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX = 5, DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN = 6, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY = 7, DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE = 8, DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE = 9, DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET = 10, DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH = 11, DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA = 12, DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET = 13, DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP = 14, DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE = 15, DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE = 16, __DEVLINK_PARAM_GENERIC_ID_MAX = 17, DEVLINK_PARAM_GENERIC_ID_MAX = 16, }; enum devlink_param_type { DEVLINK_PARAM_TYPE_U8 = 0, DEVLINK_PARAM_TYPE_U16 = 1, DEVLINK_PARAM_TYPE_U32 = 2, DEVLINK_PARAM_TYPE_STRING = 3, DEVLINK_PARAM_TYPE_BOOL = 4, }; enum devlink_port_flavour { DEVLINK_PORT_FLAVOUR_PHYSICAL = 0, DEVLINK_PORT_FLAVOUR_CPU = 1, DEVLINK_PORT_FLAVOUR_DSA = 2, DEVLINK_PORT_FLAVOUR_PCI_PF = 3, DEVLINK_PORT_FLAVOUR_PCI_VF = 4, DEVLINK_PORT_FLAVOUR_VIRTUAL = 5, DEVLINK_PORT_FLAVOUR_UNUSED = 6, DEVLINK_PORT_FLAVOUR_PCI_SF = 7, }; enum devlink_port_fn_attr_cap { DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT = 0, DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT = 1, DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT = 2, DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT = 3, __DEVLINK_PORT_FN_ATTR_CAPS_MAX = 4, }; enum devlink_port_fn_opstate { DEVLINK_PORT_FN_OPSTATE_DETACHED = 0, DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1, }; enum devlink_port_fn_state { DEVLINK_PORT_FN_STATE_INACTIVE = 0, DEVLINK_PORT_FN_STATE_ACTIVE = 1, }; enum devlink_port_function_attr { DEVLINK_PORT_FUNCTION_ATTR_UNSPEC = 0, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 1, DEVLINK_PORT_FN_ATTR_STATE = 2, DEVLINK_PORT_FN_ATTR_OPSTATE = 3, DEVLINK_PORT_FN_ATTR_CAPS = 4, DEVLINK_PORT_FN_ATTR_DEVLINK = 5, DEVLINK_PORT_FN_ATTR_MAX_IO_EQS = 6, __DEVLINK_PORT_FUNCTION_ATTR_MAX = 7, DEVLINK_PORT_FUNCTION_ATTR_MAX = 6, }; enum devlink_port_type { DEVLINK_PORT_TYPE_NOTSET = 0, DEVLINK_PORT_TYPE_AUTO = 1, DEVLINK_PORT_TYPE_ETH = 2, DEVLINK_PORT_TYPE_IB = 3, }; enum devlink_rate_type { DEVLINK_RATE_TYPE_LEAF = 0, DEVLINK_RATE_TYPE_NODE = 1, }; enum devlink_reload_action { DEVLINK_RELOAD_ACTION_UNSPEC = 0, DEVLINK_RELOAD_ACTION_DRIVER_REINIT = 1, DEVLINK_RELOAD_ACTION_FW_ACTIVATE = 2, __DEVLINK_RELOAD_ACTION_MAX = 3, DEVLINK_RELOAD_ACTION_MAX = 2, }; enum devlink_reload_limit { DEVLINK_RELOAD_LIMIT_UNSPEC = 0, DEVLINK_RELOAD_LIMIT_NO_RESET = 1, __DEVLINK_RELOAD_LIMIT_MAX = 2, DEVLINK_RELOAD_LIMIT_MAX = 1, }; enum devlink_resource_unit { DEVLINK_RESOURCE_UNIT_ENTRY = 0, }; enum devlink_sb_pool_type { DEVLINK_SB_POOL_TYPE_INGRESS = 0, DEVLINK_SB_POOL_TYPE_EGRESS = 1, }; enum devlink_sb_threshold_type { DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0, DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 1, }; enum devlink_selftest_status { DEVLINK_SELFTEST_STATUS_SKIP = 0, DEVLINK_SELFTEST_STATUS_PASS = 1, DEVLINK_SELFTEST_STATUS_FAIL = 2, }; enum devlink_trap_action { DEVLINK_TRAP_ACTION_DROP = 0, DEVLINK_TRAP_ACTION_TRAP = 1, DEVLINK_TRAP_ACTION_MIRROR = 2, }; enum devlink_trap_generic_id { DEVLINK_TRAP_GENERIC_ID_SMAC_MC = 0, DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH = 1, DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER = 2, DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER = 3, DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST = 4, DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER = 5, DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE = 6, DEVLINK_TRAP_GENERIC_ID_TTL_ERROR = 7, DEVLINK_TRAP_GENERIC_ID_TAIL_DROP = 8, DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET = 9, DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC = 10, DEVLINK_TRAP_GENERIC_ID_DIP_LB = 11, DEVLINK_TRAP_GENERIC_ID_SIP_MC = 12, DEVLINK_TRAP_GENERIC_ID_SIP_LB = 13, DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR = 14, DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC = 15, DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE = 16, DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 17, DEVLINK_TRAP_GENERIC_ID_MTU_ERROR = 18, DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH = 19, DEVLINK_TRAP_GENERIC_ID_RPF = 20, DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE = 21, DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS = 22, DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS = 23, DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE = 24, DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR = 25, DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC = 26, DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP = 27, DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP = 28, DEVLINK_TRAP_GENERIC_ID_STP = 29, DEVLINK_TRAP_GENERIC_ID_LACP = 30, DEVLINK_TRAP_GENERIC_ID_LLDP = 31, DEVLINK_TRAP_GENERIC_ID_IGMP_QUERY = 32, DEVLINK_TRAP_GENERIC_ID_IGMP_V1_REPORT = 33, DEVLINK_TRAP_GENERIC_ID_IGMP_V2_REPORT = 34, DEVLINK_TRAP_GENERIC_ID_IGMP_V3_REPORT = 35, DEVLINK_TRAP_GENERIC_ID_IGMP_V2_LEAVE = 36, DEVLINK_TRAP_GENERIC_ID_MLD_QUERY = 37, DEVLINK_TRAP_GENERIC_ID_MLD_V1_REPORT = 38, DEVLINK_TRAP_GENERIC_ID_MLD_V2_REPORT = 39, DEVLINK_TRAP_GENERIC_ID_MLD_V1_DONE = 40, DEVLINK_TRAP_GENERIC_ID_IPV4_DHCP = 41, DEVLINK_TRAP_GENERIC_ID_IPV6_DHCP = 42, DEVLINK_TRAP_GENERIC_ID_ARP_REQUEST = 43, DEVLINK_TRAP_GENERIC_ID_ARP_RESPONSE = 44, DEVLINK_TRAP_GENERIC_ID_ARP_OVERLAY = 45, DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_SOLICIT = 46, DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_ADVERT = 47, DEVLINK_TRAP_GENERIC_ID_IPV4_BFD = 48, DEVLINK_TRAP_GENERIC_ID_IPV6_BFD = 49, DEVLINK_TRAP_GENERIC_ID_IPV4_OSPF = 50, DEVLINK_TRAP_GENERIC_ID_IPV6_OSPF = 51, DEVLINK_TRAP_GENERIC_ID_IPV4_BGP = 52, DEVLINK_TRAP_GENERIC_ID_IPV6_BGP = 53, DEVLINK_TRAP_GENERIC_ID_IPV4_VRRP = 54, DEVLINK_TRAP_GENERIC_ID_IPV6_VRRP = 55, DEVLINK_TRAP_GENERIC_ID_IPV4_PIM = 56, DEVLINK_TRAP_GENERIC_ID_IPV6_PIM = 57, DEVLINK_TRAP_GENERIC_ID_UC_LB = 58, DEVLINK_TRAP_GENERIC_ID_LOCAL_ROUTE = 59, DEVLINK_TRAP_GENERIC_ID_EXTERNAL_ROUTE = 60, DEVLINK_TRAP_GENERIC_ID_IPV6_UC_DIP_LINK_LOCAL_SCOPE = 61, DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_NODES = 62, DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_ROUTERS = 63, DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_SOLICIT = 64, DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ADVERT = 65, DEVLINK_TRAP_GENERIC_ID_IPV6_REDIRECT = 66, DEVLINK_TRAP_GENERIC_ID_IPV4_ROUTER_ALERT = 67, DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ALERT = 68, DEVLINK_TRAP_GENERIC_ID_PTP_EVENT = 69, DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL = 70, DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE = 71, DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP = 72, DEVLINK_TRAP_GENERIC_ID_EARLY_DROP = 73, DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING = 74, DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING = 75, DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING = 76, DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING = 77, DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING = 78, DEVLINK_TRAP_GENERIC_ID_ARP_PARSING = 79, DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING = 80, DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING = 81, DEVLINK_TRAP_GENERIC_ID_GRE_PARSING = 82, DEVLINK_TRAP_GENERIC_ID_UDP_PARSING = 83, DEVLINK_TRAP_GENERIC_ID_TCP_PARSING = 84, DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING = 85, DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING = 86, DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING = 87, DEVLINK_TRAP_GENERIC_ID_GTP_PARSING = 88, DEVLINK_TRAP_GENERIC_ID_ESP_PARSING = 89, DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_NEXTHOP = 90, DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER = 91, DEVLINK_TRAP_GENERIC_ID_EAPOL = 92, DEVLINK_TRAP_GENERIC_ID_LOCKED_PORT = 93, __DEVLINK_TRAP_GENERIC_ID_MAX = 94, DEVLINK_TRAP_GENERIC_ID_MAX = 93, }; enum devlink_trap_group_generic_id { DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS = 0, DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS = 1, DEVLINK_TRAP_GROUP_GENERIC_ID_L3_EXCEPTIONS = 2, DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS = 3, DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS = 4, DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS = 5, DEVLINK_TRAP_GROUP_GENERIC_ID_STP = 6, DEVLINK_TRAP_GROUP_GENERIC_ID_LACP = 7, DEVLINK_TRAP_GROUP_GENERIC_ID_LLDP = 8, DEVLINK_TRAP_GROUP_GENERIC_ID_MC_SNOOPING = 9, DEVLINK_TRAP_GROUP_GENERIC_ID_DHCP = 10, DEVLINK_TRAP_GROUP_GENERIC_ID_NEIGH_DISCOVERY = 11, DEVLINK_TRAP_GROUP_GENERIC_ID_BFD = 12, DEVLINK_TRAP_GROUP_GENERIC_ID_OSPF = 13, DEVLINK_TRAP_GROUP_GENERIC_ID_BGP = 14, DEVLINK_TRAP_GROUP_GENERIC_ID_VRRP = 15, DEVLINK_TRAP_GROUP_GENERIC_ID_PIM = 16, DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB = 17, DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY = 18, DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY = 19, DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6 = 20, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT = 21, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL = 22, DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE = 23, DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP = 24, DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS = 25, DEVLINK_TRAP_GROUP_GENERIC_ID_EAPOL = 26, __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = 27, DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = 26, }; enum devlink_trap_type { DEVLINK_TRAP_TYPE_DROP = 0, DEVLINK_TRAP_TYPE_EXCEPTION = 1, DEVLINK_TRAP_TYPE_CONTROL = 2, }; enum devm_ioremap_type { DEVM_IOREMAP = 0, DEVM_IOREMAP_UC = 1, DEVM_IOREMAP_WC = 2, DEVM_IOREMAP_NP = 3, }; enum die_val { DIE_OOPS = 1, DIE_INT3 = 2, DIE_DEBUG = 3, DIE_PANIC = 4, DIE_NMI = 5, DIE_DIE = 6, DIE_KERNELDEBUG = 7, DIE_TRAP = 8, DIE_GPF = 9, DIE_CALL = 10, DIE_PAGE_FAULT = 11, DIE_NMIUNKNOWN = 12, }; enum digest_type { DIGEST_TYPE_IMA = 0, DIGEST_TYPE_VERITY = 1, DIGEST_TYPE__LAST = 2, }; enum dim_cq_period_mode { DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0, DIM_CQ_PERIOD_MODE_START_FROM_CQE = 1, DIM_CQ_PERIOD_NUM_MODES = 2, }; enum dim_state { DIM_START_MEASURE = 0, DIM_MEASURE_IN_PROGRESS = 1, DIM_APPLY_NEW_PROFILE = 2, }; enum dim_stats_state { DIM_STATS_WORSE = 0, DIM_STATS_SAME = 1, DIM_STATS_BETTER = 2, }; enum dim_step_result { DIM_STEPPED = 0, DIM_TOO_TIRED = 1, DIM_ON_EDGE = 2, }; enum dim_tune_state { DIM_PARKING_ON_TOP = 0, DIM_PARKING_TIRED = 1, DIM_GOING_RIGHT = 2, DIM_GOING_LEFT = 3, }; enum dl_bw_request { dl_bw_req_check_overflow = 0, dl_bw_req_alloc = 1, dl_bw_req_free = 2, }; enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING = 1, DL_DEV_DRIVER_BOUND = 2, DL_DEV_UNBINDING = 3, }; enum dl_param { DL_RUNTIME = 0, DL_PERIOD = 1, }; enum dma_ctrl_flags { DMA_PREP_INTERRUPT = 1, DMA_CTRL_ACK = 2, DMA_PREP_PQ_DISABLE_P = 4, DMA_PREP_PQ_DISABLE_Q = 8, DMA_PREP_CONTINUE = 16, DMA_PREP_FENCE = 32, DMA_CTRL_REUSE = 64, DMA_PREP_CMD = 128, DMA_PREP_REPEAT = 256, DMA_PREP_LOAD_EOT = 512, }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3, }; enum dma_desc_metadata_mode { DESC_METADATA_NONE = 0, DESC_METADATA_CLIENT = 1, DESC_METADATA_ENGINE = 2, }; enum dma_fence_flag_bits { DMA_FENCE_FLAG_SIGNALED_BIT = 0, DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, DMA_FENCE_FLAG_USER_BITS = 3, }; enum dma_residue_granularity { DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, DMA_RESIDUE_GRANULARITY_SEGMENT = 1, DMA_RESIDUE_GRANULARITY_BURST = 2, }; enum dma_resv_usage { DMA_RESV_USAGE_KERNEL = 0, DMA_RESV_USAGE_WRITE = 1, DMA_RESV_USAGE_READ = 2, DMA_RESV_USAGE_BOOKKEEP = 3, }; enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, DMA_SLAVE_BUSWIDTH_1_BYTE = 1, DMA_SLAVE_BUSWIDTH_2_BYTES = 2, DMA_SLAVE_BUSWIDTH_3_BYTES = 3, DMA_SLAVE_BUSWIDTH_4_BYTES = 4, DMA_SLAVE_BUSWIDTH_8_BYTES = 8, DMA_SLAVE_BUSWIDTH_16_BYTES = 16, DMA_SLAVE_BUSWIDTH_32_BYTES = 32, DMA_SLAVE_BUSWIDTH_64_BYTES = 64, DMA_SLAVE_BUSWIDTH_128_BYTES = 128, }; enum dma_status { DMA_COMPLETE = 0, DMA_IN_PROGRESS = 1, DMA_PAUSED = 2, DMA_ERROR = 3, DMA_OUT_OF_ORDER = 4, }; enum dma_transaction_type { DMA_MEMCPY = 0, DMA_XOR = 1, DMA_PQ = 2, DMA_XOR_VAL = 3, DMA_PQ_VAL = 4, DMA_MEMSET = 5, DMA_MEMSET_SG = 6, DMA_INTERRUPT = 7, DMA_PRIVATE = 8, DMA_ASYNC_TX = 9, DMA_SLAVE = 10, DMA_CYCLIC = 11, DMA_INTERLEAVE = 12, DMA_COMPLETION_NO_ORDER = 13, DMA_REPEAT = 14, DMA_LOAD_EOT = 15, DMA_TX_TYPE_END = 16, }; enum dma_transfer_direction { DMA_MEM_TO_MEM = 0, DMA_MEM_TO_DEV = 1, DMA_DEV_TO_MEM = 2, DMA_DEV_TO_DEV = 3, DMA_TRANS_NONE = 4, }; enum dmaengine_alignment { DMAENGINE_ALIGN_1_BYTE = 0, DMAENGINE_ALIGN_2_BYTES = 1, DMAENGINE_ALIGN_4_BYTES = 2, DMAENGINE_ALIGN_8_BYTES = 3, DMAENGINE_ALIGN_16_BYTES = 4, DMAENGINE_ALIGN_32_BYTES = 5, DMAENGINE_ALIGN_64_BYTES = 6, DMAENGINE_ALIGN_128_BYTES = 7, DMAENGINE_ALIGN_256_BYTES = 8, }; enum dmaengine_tx_result { DMA_TRANS_NOERROR = 0, DMA_TRANS_READ_FAILED = 1, DMA_TRANS_WRITE_FAILED = 2, DMA_TRANS_ABORTED = 3, }; enum dmi_device_type { DMI_DEV_TYPE_ANY = 0, DMI_DEV_TYPE_OTHER = 1, DMI_DEV_TYPE_UNKNOWN = 2, DMI_DEV_TYPE_VIDEO = 3, DMI_DEV_TYPE_SCSI = 4, DMI_DEV_TYPE_ETHERNET = 5, DMI_DEV_TYPE_TOKENRING = 6, DMI_DEV_TYPE_SOUND = 7, DMI_DEV_TYPE_PATA = 8, DMI_DEV_TYPE_SATA = 9, DMI_DEV_TYPE_SAS = 10, DMI_DEV_TYPE_IPMI = -1, DMI_DEV_TYPE_OEM_STRING = -2, DMI_DEV_TYPE_DEV_ONBOARD = -3, DMI_DEV_TYPE_DEV_SLOT = -4, }; enum dmi_entry_type { DMI_ENTRY_BIOS = 0, DMI_ENTRY_SYSTEM = 1, DMI_ENTRY_BASEBOARD = 2, DMI_ENTRY_CHASSIS = 3, DMI_ENTRY_PROCESSOR = 4, DMI_ENTRY_MEM_CONTROLLER = 5, DMI_ENTRY_MEM_MODULE = 6, DMI_ENTRY_CACHE = 7, DMI_ENTRY_PORT_CONNECTOR = 8, DMI_ENTRY_SYSTEM_SLOT = 9, DMI_ENTRY_ONBOARD_DEVICE = 10, DMI_ENTRY_OEMSTRINGS = 11, DMI_ENTRY_SYSCONF = 12, DMI_ENTRY_BIOS_LANG = 13, DMI_ENTRY_GROUP_ASSOC = 14, DMI_ENTRY_SYSTEM_EVENT_LOG = 15, DMI_ENTRY_PHYS_MEM_ARRAY = 16, DMI_ENTRY_MEM_DEVICE = 17, DMI_ENTRY_32_MEM_ERROR = 18, DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR = 19, DMI_ENTRY_MEM_DEV_MAPPED_ADDR = 20, DMI_ENTRY_BUILTIN_POINTING_DEV = 21, DMI_ENTRY_PORTABLE_BATTERY = 22, DMI_ENTRY_SYSTEM_RESET = 23, DMI_ENTRY_HW_SECURITY = 24, DMI_ENTRY_SYSTEM_POWER_CONTROLS = 25, DMI_ENTRY_VOLTAGE_PROBE = 26, DMI_ENTRY_COOLING_DEV = 27, DMI_ENTRY_TEMP_PROBE = 28, DMI_ENTRY_ELECTRICAL_CURRENT_PROBE = 29, DMI_ENTRY_OOB_REMOTE_ACCESS = 30, DMI_ENTRY_BIS_ENTRY = 31, DMI_ENTRY_SYSTEM_BOOT = 32, DMI_ENTRY_MGMT_DEV = 33, DMI_ENTRY_MGMT_DEV_COMPONENT = 34, DMI_ENTRY_MGMT_DEV_THRES = 35, DMI_ENTRY_MEM_CHANNEL = 36, DMI_ENTRY_IPMI_DEV = 37, DMI_ENTRY_SYS_POWER_SUPPLY = 38, DMI_ENTRY_ADDITIONAL = 39, DMI_ENTRY_ONBOARD_DEV_EXT = 40, DMI_ENTRY_MGMT_CONTROLLER_HOST = 41, DMI_ENTRY_INACTIVE = 126, DMI_ENTRY_END_OF_TABLE = 127, }; enum dmi_field { DMI_NONE = 0, DMI_BIOS_VENDOR = 1, DMI_BIOS_VERSION = 2, DMI_BIOS_DATE = 3, DMI_BIOS_RELEASE = 4, DMI_EC_FIRMWARE_RELEASE = 5, DMI_SYS_VENDOR = 6, DMI_PRODUCT_NAME = 7, DMI_PRODUCT_VERSION = 8, DMI_PRODUCT_SERIAL = 9, DMI_PRODUCT_UUID = 10, DMI_PRODUCT_SKU = 11, DMI_PRODUCT_FAMILY = 12, DMI_BOARD_VENDOR = 13, DMI_BOARD_NAME = 14, DMI_BOARD_VERSION = 15, DMI_BOARD_SERIAL = 16, DMI_BOARD_ASSET_TAG = 17, DMI_CHASSIS_VENDOR = 18, DMI_CHASSIS_TYPE = 19, DMI_CHASSIS_VERSION = 20, DMI_CHASSIS_SERIAL = 21, DMI_CHASSIS_ASSET_TAG = 22, DMI_STRING_MAX = 23, DMI_OEM_STRING = 24, }; enum dns_lookup_status { DNS_LOOKUP_NOT_DONE = 0, DNS_LOOKUP_GOOD = 1, DNS_LOOKUP_GOOD_WITH_BAD = 2, DNS_LOOKUP_BAD = 3, DNS_LOOKUP_GOT_NOT_FOUND = 4, DNS_LOOKUP_GOT_LOCAL_FAILURE = 5, DNS_LOOKUP_GOT_TEMP_FAILURE = 6, DNS_LOOKUP_GOT_NS_FAILURE = 7, NR__dns_lookup_status = 8, }; enum dns_payload_content_type { DNS_PAYLOAD_IS_SERVER_LIST = 0, }; enum dpm_order { DPM_ORDER_NONE = 0, DPM_ORDER_DEV_AFTER_PARENT = 1, DPM_ORDER_PARENT_BEFORE_DEV = 2, DPM_ORDER_DEV_LAST = 3, }; enum drbg_prefixes { DRBG_PREFIX0 = 0, DRBG_PREFIX1 = 1, DRBG_PREFIX2 = 2, DRBG_PREFIX3 = 3, }; enum drbg_seed_state { DRBG_SEED_STATE_UNSEEDED = 0, DRBG_SEED_STATE_PARTIAL = 1, DRBG_SEED_STATE_FULL = 2, }; enum dynevent_type { DYNEVENT_TYPE_SYNTH = 1, DYNEVENT_TYPE_KPROBE = 2, DYNEVENT_TYPE_NONE = 3, }; enum e820_type { E820_TYPE_RAM = 1, E820_TYPE_RESERVED = 2, E820_TYPE_ACPI = 3, E820_TYPE_NVS = 4, E820_TYPE_UNUSABLE = 5, E820_TYPE_PMEM = 7, E820_TYPE_PRAM = 12, E820_TYPE_SOFT_RESERVED = 4026531839, E820_TYPE_RESERVED_KERN = 128, }; enum ec_command { ACPI_EC_COMMAND_READ = 128, ACPI_EC_COMMAND_WRITE = 129, ACPI_EC_BURST_ENABLE = 130, ACPI_EC_BURST_DISABLE = 131, ACPI_EC_COMMAND_QUERY = 132, }; enum efi_rts_ids { EFI_NONE = 0, EFI_GET_TIME = 1, EFI_SET_TIME = 2, EFI_GET_WAKEUP_TIME = 3, EFI_SET_WAKEUP_TIME = 4, EFI_GET_VARIABLE = 5, EFI_GET_NEXT_VARIABLE = 6, EFI_SET_VARIABLE = 7, EFI_QUERY_VARIABLE_INFO = 8, EFI_GET_NEXT_HIGH_MONO_COUNT = 9, EFI_RESET_SYSTEM = 10, EFI_UPDATE_CAPSULE = 11, EFI_QUERY_CAPSULE_CAPS = 12, EFI_ACPI_PRM_HANDLER = 13, }; enum efi_secureboot_mode { efi_secureboot_mode_unset = 0, efi_secureboot_mode_unknown = 1, efi_secureboot_mode_disabled = 2, efi_secureboot_mode_enabled = 3, }; enum elv_merge { ELEVATOR_NO_MERGE = 0, ELEVATOR_FRONT_MERGE = 1, ELEVATOR_BACK_MERGE = 2, ELEVATOR_DISCARD_MERGE = 3, }; enum enable_type { undefined = -1, user_disabled = 0, auto_disabled = 1, user_enabled = 2, auto_enabled = 3, }; enum energy_perf_value_index { EPB_INDEX_PERFORMANCE = 0, EPB_INDEX_BALANCE_PERFORMANCE = 1, EPB_INDEX_NORMAL = 2, EPB_INDEX_BALANCE_POWERSAVE = 3, EPB_INDEX_POWERSAVE = 4, }; enum energy_perf_value_index___2 { EPP_INDEX_DEFAULT = 0, EPP_INDEX_PERFORMANCE = 1, EPP_INDEX_BALANCE_PERFORMANCE = 2, EPP_INDEX_BALANCE_POWERSAVE = 3, EPP_INDEX_POWERSAVE = 4, }; enum error_detector { ERROR_DETECTOR_KFENCE = 0, ERROR_DETECTOR_KASAN = 1, ERROR_DETECTOR_WARN = 2, }; enum erspan_bso { BSO_NOERROR = 0, BSO_SHORT = 1, BSO_OVERSIZED = 2, BSO_BAD = 3, }; enum erspan_encap_type { ERSPAN_ENCAP_NOVLAN = 0, ERSPAN_ENCAP_ISL = 1, ERSPAN_ENCAP_8021Q = 2, ERSPAN_ENCAP_INFRAME = 3, }; enum ethnl_sock_type { ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH = 0, }; enum ethtool_c33_pse_admin_state { ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN = 1, ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED = 2, ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED = 3, }; enum ethtool_c33_pse_ext_state { ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION = 1, ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID = 2, ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE = 3, ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED = 4, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM = 5, ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED = 6, ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE = 7, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE = 8, ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED = 9, }; enum ethtool_c33_pse_ext_substate_error_condition { ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT = 1, ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT = 2, ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT = 3, ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON = 4, ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS = 5, ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF = 6, ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN = 7, ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE = 8, ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP = 9, }; enum ethtool_c33_pse_ext_substate_mr_pse_enable { ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE = 1, }; enum ethtool_c33_pse_ext_substate_option_detect_ted { ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS = 1, ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR = 2, }; enum ethtool_c33_pse_ext_substate_option_vport_lim { ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE = 1, ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE = 2, ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION = 3, }; enum ethtool_c33_pse_ext_substate_ovld_detected { ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD = 1, }; enum ethtool_c33_pse_ext_substate_power_not_available { ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED = 1, ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET = 2, ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT = 3, ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT = 4, }; enum ethtool_c33_pse_ext_substate_short_detected { ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION = 1, }; enum ethtool_c33_pse_pw_d_status { ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN = 1, ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED = 2, ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING = 3, ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING = 4, ETHTOOL_C33_PSE_PW_D_STATUS_TEST = 5, ETHTOOL_C33_PSE_PW_D_STATUS_FAULT = 6, ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT = 7, }; enum ethtool_cmis_cdb_cmd_id { ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS = 0, ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES = 64, ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 65, ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 257, ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 259, ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 263, ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 265, ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 266, }; enum ethtool_fec_config_bits { ETHTOOL_FEC_NONE_BIT = 0, ETHTOOL_FEC_AUTO_BIT = 1, ETHTOOL_FEC_OFF_BIT = 2, ETHTOOL_FEC_RS_BIT = 3, ETHTOOL_FEC_BASER_BIT = 4, ETHTOOL_FEC_LLRS_BIT = 5, }; enum ethtool_flags { ETH_FLAG_TXVLAN = 128, ETH_FLAG_RXVLAN = 256, ETH_FLAG_LRO = 32768, ETH_FLAG_NTUPLE = 134217728, ETH_FLAG_RXHASH = 268435456, }; enum ethtool_header_flags { ETHTOOL_FLAG_COMPACT_BITSETS = 1, ETHTOOL_FLAG_OMIT_REPLY = 2, ETHTOOL_FLAG_STATS = 4, }; enum ethtool_link_ext_state { ETHTOOL_LINK_EXT_STATE_AUTONEG = 0, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE = 1, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH = 2, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY = 3, ETHTOOL_LINK_EXT_STATE_NO_CABLE = 4, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE = 5, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE = 6, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE = 7, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED = 8, ETHTOOL_LINK_EXT_STATE_OVERHEAT = 9, ETHTOOL_LINK_EXT_STATE_MODULE = 10, }; enum ethtool_link_ext_substate_autoneg { ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED = 2, ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED = 3, ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE = 4, ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE = 5, ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD = 6, }; enum ethtool_link_ext_substate_bad_signal_integrity { ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 2, ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST = 3, ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS = 4, }; enum ethtool_link_ext_substate_cable_issue { ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 2, }; enum ethtool_link_ext_substate_link_logical_mismatch { ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK = 2, ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS = 3, ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED = 4, ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED = 5, }; enum ethtool_link_ext_substate_link_training { ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT = 2, ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY = 3, ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT = 4, }; enum ethtool_link_ext_substate_module { ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1, }; enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, ETHTOOL_LINK_MODE_Autoneg_BIT = 6, ETHTOOL_LINK_MODE_TP_BIT = 7, ETHTOOL_LINK_MODE_AUI_BIT = 8, ETHTOOL_LINK_MODE_MII_BIT = 9, ETHTOOL_LINK_MODE_FIBRE_BIT = 10, ETHTOOL_LINK_MODE_BNC_BIT = 11, ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, ETHTOOL_LINK_MODE_Pause_BIT = 13, ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, ETHTOOL_LINK_MODE_Backplane_BIT = 16, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93, ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94, ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95, ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102, __ETHTOOL_LINK_MODE_MASK_NBITS = 103, }; enum ethtool_mac_stats_src { ETHTOOL_MAC_STATS_SRC_AGGREGATE = 0, ETHTOOL_MAC_STATS_SRC_EMAC = 1, ETHTOOL_MAC_STATS_SRC_PMAC = 2, }; enum ethtool_mm_verify_status { ETHTOOL_MM_VERIFY_STATUS_UNKNOWN = 0, ETHTOOL_MM_VERIFY_STATUS_INITIAL = 1, ETHTOOL_MM_VERIFY_STATUS_VERIFYING = 2, ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED = 3, ETHTOOL_MM_VERIFY_STATUS_FAILED = 4, ETHTOOL_MM_VERIFY_STATUS_DISABLED = 5, }; enum ethtool_module_fw_flash_status { ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED = 1, ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS = 2, ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED = 3, ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR = 4, }; enum ethtool_module_power_mode { ETHTOOL_MODULE_POWER_MODE_LOW = 1, ETHTOOL_MODULE_POWER_MODE_HIGH = 2, }; enum ethtool_module_power_mode_policy { ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1, ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO = 2, }; enum ethtool_multicast_groups { ETHNL_MCGRP_MONITOR = 0, }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3, }; enum ethtool_podl_pse_admin_state { ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1, ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED = 2, ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED = 3, }; enum ethtool_podl_pse_pw_d_status { ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1, ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED = 2, ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING = 3, ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING = 4, ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP = 5, ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE = 6, ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR = 7, }; enum ethtool_reset_flags { ETH_RESET_MGMT = 1, ETH_RESET_IRQ = 2, ETH_RESET_DMA = 4, ETH_RESET_FILTER = 8, ETH_RESET_OFFLOAD = 16, ETH_RESET_MAC = 32, ETH_RESET_PHY = 64, ETH_RESET_RAM = 128, ETH_RESET_AP = 256, ETH_RESET_DEDICATED = 65535, ETH_RESET_ALL = 4294967295, }; enum ethtool_sfeatures_retval_bits { ETHTOOL_F_UNSUPPORTED__BIT = 0, ETHTOOL_F_WISH__BIT = 1, ETHTOOL_F_COMPAT__BIT = 2, }; enum ethtool_stringset { ETH_SS_TEST = 0, ETH_SS_STATS = 1, ETH_SS_PRIV_FLAGS = 2, ETH_SS_NTUPLE_FILTERS = 3, ETH_SS_FEATURES = 4, ETH_SS_RSS_HASH_FUNCS = 5, ETH_SS_TUNABLES = 6, ETH_SS_PHY_STATS = 7, ETH_SS_PHY_TUNABLES = 8, ETH_SS_LINK_MODES = 9, ETH_SS_MSG_CLASSES = 10, ETH_SS_WOL_MODES = 11, ETH_SS_SOF_TIMESTAMPING = 12, ETH_SS_TS_TX_TYPES = 13, ETH_SS_TS_RX_FILTERS = 14, ETH_SS_UDP_TUNNEL_TYPES = 15, ETH_SS_STATS_STD = 16, ETH_SS_STATS_ETH_PHY = 17, ETH_SS_STATS_ETH_MAC = 18, ETH_SS_STATS_ETH_CTRL = 19, ETH_SS_STATS_RMON = 20, ETH_SS_COUNT = 21, }; enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_BUF_LEN = 1, ETHTOOL_RING_USE_CQE_SIZE = 2, ETHTOOL_RING_USE_TX_PUSH = 4, ETHTOOL_RING_USE_RX_PUSH = 8, ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = 16, ETHTOOL_RING_USE_TCP_DATA_SPLIT = 32, }; enum event_command_flags { EVENT_CMD_FL_POST_TRIGGER = 1, EVENT_CMD_FL_NEEDS_REC = 2, }; enum event_trigger_type { ETT_NONE = 0, ETT_TRACE_ONOFF = 1, ETT_SNAPSHOT = 2, ETT_STACKTRACE = 4, ETT_EVENT_ENABLE = 8, ETT_EVENT_HIST = 16, ETT_HIST_ENABLE = 32, ETT_EVENT_EPROBE = 64, }; enum event_type_t { EVENT_FLEXIBLE = 1, EVENT_PINNED = 2, EVENT_TIME = 4, EVENT_FROZEN = 8, EVENT_CPU = 16, EVENT_CGROUP = 32, EVENT_ALL = 3, EVENT_TIME_FROZEN = 12, }; enum evm_ima_xattr_type { IMA_XATTR_DIGEST = 1, EVM_XATTR_HMAC = 2, EVM_IMA_XATTR_DIGSIG = 3, IMA_XATTR_DIGEST_NG = 4, EVM_XATTR_PORTABLE_DIGSIG = 5, IMA_VERITY_DIGSIG = 6, IMA_XATTR_LAST = 7, }; enum exact_level { NOT_EXACT = 0, EXACT = 1, RANGE_WITHIN = 2, }; enum exception { EXCP_CONTEXT = 1, NO_EXCP = 2, }; enum exception_stack_ordering { ESTACK_DF = 0, ESTACK_NMI = 1, ESTACK_DB = 2, ESTACK_MCE = 3, ESTACK_VC = 4, ESTACK_VC2 = 5, N_EXCEPTION_STACKS = 6, }; enum execmem_range_flags { EXECMEM_KASAN_SHADOW = 1, }; enum execmem_type { EXECMEM_DEFAULT = 0, EXECMEM_MODULE_TEXT = 0, EXECMEM_KPROBES = 1, EXECMEM_FTRACE = 2, EXECMEM_BPF = 3, EXECMEM_MODULE_DATA = 4, EXECMEM_TYPE_MAX = 5, }; enum exit_fastpath_completion { EXIT_FASTPATH_NONE = 0, EXIT_FASTPATH_REENTER_GUEST = 1, EXIT_FASTPATH_EXIT_HANDLED = 2, EXIT_FASTPATH_EXIT_USERSPACE = 3, }; enum ext4_journal_trigger_type { EXT4_JTR_ORPHAN_FILE = 0, EXT4_JTR_NONE = 1, }; enum ext4_li_mode { EXT4_LI_MODE_PREFETCH_BBITMAP = 0, EXT4_LI_MODE_ITABLE = 1, }; enum extra_reg_type { EXTRA_REG_NONE = -1, EXTRA_REG_RSP_0 = 0, EXTRA_REG_RSP_1 = 1, EXTRA_REG_LBR = 2, EXTRA_REG_LDLAT = 3, EXTRA_REG_FE = 4, EXTRA_REG_SNOOP_0 = 5, EXTRA_REG_SNOOP_1 = 6, EXTRA_REG_MAX = 7, }; enum fail_dup_mod_reason { FAIL_DUP_MOD_BECOMING = 0, FAIL_DUP_MOD_LOAD = 1, }; enum fault_flag { FAULT_FLAG_WRITE = 1, FAULT_FLAG_MKWRITE = 2, FAULT_FLAG_ALLOW_RETRY = 4, FAULT_FLAG_RETRY_NOWAIT = 8, FAULT_FLAG_KILLABLE = 16, FAULT_FLAG_TRIED = 32, FAULT_FLAG_USER = 64, FAULT_FLAG_REMOTE = 128, FAULT_FLAG_INSTRUCTION = 256, FAULT_FLAG_INTERRUPTIBLE = 512, FAULT_FLAG_UNSHARE = 1024, FAULT_FLAG_ORIG_PTE_VALID = 2048, FAULT_FLAG_VMA_LOCK = 4096, }; enum fault_flags { FAULT_NOWARN = 1, }; enum fbq_type { regular = 0, remote = 1, all = 2, }; enum fetch_op { FETCH_OP_NOP = 0, FETCH_OP_REG = 1, FETCH_OP_STACK = 2, FETCH_OP_STACKP = 3, FETCH_OP_RETVAL = 4, FETCH_OP_IMM = 5, FETCH_OP_COMM = 6, FETCH_OP_ARG = 7, FETCH_OP_FOFFS = 8, FETCH_OP_DATA = 9, FETCH_OP_EDATA = 10, FETCH_OP_DEREF = 11, FETCH_OP_UDEREF = 12, FETCH_OP_ST_RAW = 13, FETCH_OP_ST_MEM = 14, FETCH_OP_ST_UMEM = 15, FETCH_OP_ST_STRING = 16, FETCH_OP_ST_USTRING = 17, FETCH_OP_ST_SYMSTR = 18, FETCH_OP_ST_EDATA = 19, FETCH_OP_MOD_BF = 20, FETCH_OP_LP_ARRAY = 21, FETCH_OP_TP_ARG = 22, FETCH_OP_END = 23, FETCH_NOP_SYMBOL = 24, }; enum fib6_walk_state { FWS_S = 0, FWS_L = 1, FWS_R = 2, FWS_C = 3, FWS_U = 4, }; enum fib_event_type { FIB_EVENT_ENTRY_REPLACE = 0, FIB_EVENT_ENTRY_APPEND = 1, FIB_EVENT_ENTRY_ADD = 2, FIB_EVENT_ENTRY_DEL = 3, FIB_EVENT_RULE_ADD = 4, FIB_EVENT_RULE_DEL = 5, FIB_EVENT_NH_ADD = 6, FIB_EVENT_NH_DEL = 7, FIB_EVENT_VIF_ADD = 8, FIB_EVENT_VIF_DEL = 9, }; enum fid_type { FILEID_ROOT = 0, FILEID_INO32_GEN = 1, FILEID_INO32_GEN_PARENT = 2, FILEID_BTRFS_WITHOUT_PARENT = 77, FILEID_BTRFS_WITH_PARENT = 78, FILEID_BTRFS_WITH_PARENT_ROOT = 79, FILEID_UDF_WITHOUT_PARENT = 81, FILEID_UDF_WITH_PARENT = 82, FILEID_NILFS_WITHOUT_PARENT = 97, FILEID_NILFS_WITH_PARENT = 98, FILEID_FAT_WITHOUT_PARENT = 113, FILEID_FAT_WITH_PARENT = 114, FILEID_INO64_GEN = 129, FILEID_INO64_GEN_PARENT = 130, FILEID_LUSTRE = 151, FILEID_BCACHEFS_WITHOUT_PARENT = 177, FILEID_BCACHEFS_WITH_PARENT = 178, FILEID_KERNFS = 254, FILEID_INVALID = 255, }; enum file_state { MEI_FILE_UNINITIALIZED = 0, MEI_FILE_INITIALIZING = 1, MEI_FILE_CONNECTING = 2, MEI_FILE_CONNECTED = 3, MEI_FILE_DISCONNECTING = 4, MEI_FILE_DISCONNECT_REPLY = 5, MEI_FILE_DISCONNECT_REQUIRED = 6, MEI_FILE_DISCONNECTED = 7, }; enum file_time_flags { S_ATIME = 1, S_MTIME = 2, S_CTIME = 4, S_VERSION = 8, }; enum filter_op_ids { OP_GLOB = 0, OP_NE = 1, OP_EQ = 2, OP_LE = 3, OP_LT = 4, OP_GE = 5, OP_GT = 6, OP_BAND = 7, OP_MAX = 8, }; enum filter_pred_fn { FILTER_PRED_FN_NOP = 0, FILTER_PRED_FN_64 = 1, FILTER_PRED_FN_64_CPUMASK = 2, FILTER_PRED_FN_S64 = 3, FILTER_PRED_FN_U64 = 4, FILTER_PRED_FN_32 = 5, FILTER_PRED_FN_32_CPUMASK = 6, FILTER_PRED_FN_S32 = 7, FILTER_PRED_FN_U32 = 8, FILTER_PRED_FN_16 = 9, FILTER_PRED_FN_16_CPUMASK = 10, FILTER_PRED_FN_S16 = 11, FILTER_PRED_FN_U16 = 12, FILTER_PRED_FN_8 = 13, FILTER_PRED_FN_8_CPUMASK = 14, FILTER_PRED_FN_S8 = 15, FILTER_PRED_FN_U8 = 16, FILTER_PRED_FN_COMM = 17, FILTER_PRED_FN_STRING = 18, FILTER_PRED_FN_STRLOC = 19, FILTER_PRED_FN_STRRELLOC = 20, FILTER_PRED_FN_PCHAR_USER = 21, FILTER_PRED_FN_PCHAR = 22, FILTER_PRED_FN_CPU = 23, FILTER_PRED_FN_CPU_CPUMASK = 24, FILTER_PRED_FN_CPUMASK = 25, FILTER_PRED_FN_CPUMASK_CPU = 26, FILTER_PRED_FN_FUNCTION = 27, FILTER_PRED_FN_ = 28, FILTER_PRED_TEST_VISITED = 29, }; enum fit_type { NOTHING_FIT = 0, FL_FIT_TYPE = 1, LE_FIT_TYPE = 2, RE_FIT_TYPE = 3, NE_FIT_TYPE = 4, }; enum fixed_addresses { VSYSCALL_PAGE = 511, FIX_DBGP_BASE = 512, FIX_EARLYCON_MEM_BASE = 513, FIX_APIC_BASE = 514, FIX_IO_APIC_BASE_0 = 515, FIX_IO_APIC_BASE_END = 642, __end_of_permanent_fixed_addresses = 643, FIX_BTMAP_END = 1024, FIX_BTMAP_BEGIN = 1535, __end_of_fixed_addresses = 1536, }; enum flow_action_hw_stats { FLOW_ACTION_HW_STATS_IMMEDIATE = 1, FLOW_ACTION_HW_STATS_DELAYED = 2, FLOW_ACTION_HW_STATS_ANY = 3, FLOW_ACTION_HW_STATS_DISABLED = 4, FLOW_ACTION_HW_STATS_DONT_CARE = 7, }; enum flow_action_hw_stats_bit { FLOW_ACTION_HW_STATS_IMMEDIATE_BIT = 0, FLOW_ACTION_HW_STATS_DELAYED_BIT = 1, FLOW_ACTION_HW_STATS_DISABLED_BIT = 2, FLOW_ACTION_HW_STATS_NUM_BITS = 3, }; enum flow_action_id { FLOW_ACTION_ACCEPT = 0, FLOW_ACTION_DROP = 1, FLOW_ACTION_TRAP = 2, FLOW_ACTION_GOTO = 3, FLOW_ACTION_REDIRECT = 4, FLOW_ACTION_MIRRED = 5, FLOW_ACTION_REDIRECT_INGRESS = 6, FLOW_ACTION_MIRRED_INGRESS = 7, FLOW_ACTION_VLAN_PUSH = 8, FLOW_ACTION_VLAN_POP = 9, FLOW_ACTION_VLAN_MANGLE = 10, FLOW_ACTION_TUNNEL_ENCAP = 11, FLOW_ACTION_TUNNEL_DECAP = 12, FLOW_ACTION_MANGLE = 13, FLOW_ACTION_ADD = 14, FLOW_ACTION_CSUM = 15, FLOW_ACTION_MARK = 16, FLOW_ACTION_PTYPE = 17, FLOW_ACTION_PRIORITY = 18, FLOW_ACTION_RX_QUEUE_MAPPING = 19, FLOW_ACTION_WAKE = 20, FLOW_ACTION_QUEUE = 21, FLOW_ACTION_SAMPLE = 22, FLOW_ACTION_POLICE = 23, FLOW_ACTION_CT = 24, FLOW_ACTION_CT_METADATA = 25, FLOW_ACTION_MPLS_PUSH = 26, FLOW_ACTION_MPLS_POP = 27, FLOW_ACTION_MPLS_MANGLE = 28, FLOW_ACTION_GATE = 29, FLOW_ACTION_PPPOE_PUSH = 30, FLOW_ACTION_JUMP = 31, FLOW_ACTION_PIPE = 32, FLOW_ACTION_VLAN_PUSH_ETH = 33, FLOW_ACTION_VLAN_POP_ETH = 34, FLOW_ACTION_CONTINUE = 35, NUM_FLOW_ACTIONS = 36, }; enum flow_action_mangle_base { FLOW_ACT_MANGLE_UNSPEC = 0, FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1, FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2, FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3, FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4, FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5, }; enum flow_block_binder_type { FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0, FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP = 3, FLOW_BLOCK_BINDER_TYPE_RED_MARK = 4, }; enum flow_block_command { FLOW_BLOCK_BIND = 0, FLOW_BLOCK_UNBIND = 1, }; enum flow_cls_command { FLOW_CLS_REPLACE = 0, FLOW_CLS_DESTROY = 1, FLOW_CLS_STATS = 2, FLOW_CLS_TMPLT_CREATE = 3, FLOW_CLS_TMPLT_DESTROY = 4, }; enum flow_dissect_ret { FLOW_DISSECT_RET_OUT_GOOD = 0, FLOW_DISSECT_RET_OUT_BAD = 1, FLOW_DISSECT_RET_PROTO_AGAIN = 2, FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, FLOW_DISSECT_RET_CONTINUE = 4, }; enum flow_dissector_ctrl_flags { FLOW_DIS_IS_FRAGMENT = 1, FLOW_DIS_FIRST_FRAG = 2, FLOW_DIS_F_TUNNEL_CSUM = 4, FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = 8, FLOW_DIS_F_TUNNEL_OAM = 16, FLOW_DIS_F_TUNNEL_CRIT_OPT = 32, FLOW_DIS_ENCAPSULATION = 64, }; enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL = 0, FLOW_DISSECTOR_KEY_BASIC = 1, FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, FLOW_DISSECTOR_KEY_PORTS = 4, FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, FLOW_DISSECTOR_KEY_ICMP = 6, FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, FLOW_DISSECTOR_KEY_TIPC = 8, FLOW_DISSECTOR_KEY_ARP = 9, FLOW_DISSECTOR_KEY_VLAN = 10, FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, FLOW_DISSECTOR_KEY_GRE_KEYID = 12, FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, FLOW_DISSECTOR_KEY_ENC_KEYID = 14, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, FLOW_DISSECTOR_KEY_ENC_PORTS = 18, FLOW_DISSECTOR_KEY_MPLS = 19, FLOW_DISSECTOR_KEY_TCP = 20, FLOW_DISSECTOR_KEY_IP = 21, FLOW_DISSECTOR_KEY_CVLAN = 22, FLOW_DISSECTOR_KEY_ENC_IP = 23, FLOW_DISSECTOR_KEY_ENC_OPTS = 24, FLOW_DISSECTOR_KEY_META = 25, FLOW_DISSECTOR_KEY_CT = 26, FLOW_DISSECTOR_KEY_HASH = 27, FLOW_DISSECTOR_KEY_NUM_OF_VLANS = 28, FLOW_DISSECTOR_KEY_PPPOE = 29, FLOW_DISSECTOR_KEY_L2TPV3 = 30, FLOW_DISSECTOR_KEY_CFM = 31, FLOW_DISSECTOR_KEY_IPSEC = 32, FLOW_DISSECTOR_KEY_MAX = 33, }; enum flow_offload_tuple_dir { FLOW_OFFLOAD_DIR_ORIGINAL = 0, FLOW_OFFLOAD_DIR_REPLY = 1, }; enum flow_offload_type { NF_FLOW_OFFLOAD_UNSPEC = 0, NF_FLOW_OFFLOAD_ROUTE = 1, }; enum flow_offload_xmit_type { FLOW_OFFLOAD_XMIT_UNSPEC = 0, FLOW_OFFLOAD_XMIT_NEIGH = 1, FLOW_OFFLOAD_XMIT_XFRM = 2, FLOW_OFFLOAD_XMIT_DIRECT = 3, FLOW_OFFLOAD_XMIT_TC = 4, }; enum flowlabel_reflect { FLOWLABEL_REFLECT_ESTABLISHED = 1, FLOWLABEL_REFLECT_TCP_RESET = 2, FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, }; enum folio_references { FOLIOREF_RECLAIM = 0, FOLIOREF_RECLAIM_CLEAN = 1, FOLIOREF_KEEP = 2, FOLIOREF_ACTIVATE = 3, }; enum folio_walk_level { FW_LEVEL_PTE = 0, FW_LEVEL_PMD = 1, FW_LEVEL_PUD = 2, }; enum format_type { FORMAT_TYPE_NONE = 0, FORMAT_TYPE_WIDTH = 1, FORMAT_TYPE_PRECISION = 2, FORMAT_TYPE_CHAR = 3, FORMAT_TYPE_STR = 4, FORMAT_TYPE_PTR = 5, FORMAT_TYPE_PERCENT_CHAR = 6, FORMAT_TYPE_INVALID = 7, FORMAT_TYPE_LONG_LONG = 8, FORMAT_TYPE_ULONG = 9, FORMAT_TYPE_LONG = 10, FORMAT_TYPE_UBYTE = 11, FORMAT_TYPE_BYTE = 12, FORMAT_TYPE_USHORT = 13, FORMAT_TYPE_SHORT = 14, FORMAT_TYPE_UINT = 15, FORMAT_TYPE_INT = 16, FORMAT_TYPE_SIZE_T = 17, FORMAT_TYPE_PTRDIFF = 18, }; enum freeze_holder { FREEZE_HOLDER_KERNEL = 1, FREEZE_HOLDER_USERSPACE = 2, FREEZE_MAY_NEST = 4, }; enum freezer_state_flags { CGROUP_FREEZER_ONLINE = 1, CGROUP_FREEZING_SELF = 2, CGROUP_FREEZING_PARENT = 4, CGROUP_FROZEN = 8, CGROUP_FREEZING = 6, }; enum freq_qos_req_type { FREQ_QOS_MIN = 1, FREQ_QOS_MAX = 2, }; enum fs_context_phase { FS_CONTEXT_CREATE_PARAMS = 0, FS_CONTEXT_CREATING = 1, FS_CONTEXT_AWAITING_MOUNT = 2, FS_CONTEXT_AWAITING_RECONF = 3, FS_CONTEXT_RECONF_PARAMS = 4, FS_CONTEXT_RECONFIGURING = 5, FS_CONTEXT_FAILED = 6, }; enum fs_context_purpose { FS_CONTEXT_FOR_MOUNT = 0, FS_CONTEXT_FOR_SUBMOUNT = 1, FS_CONTEXT_FOR_RECONFIGURE = 2, }; enum fs_value_type { fs_value_is_undefined = 0, fs_value_is_flag = 1, fs_value_is_string = 2, fs_value_is_blob = 3, fs_value_is_filename = 4, fs_value_is_file = 5, }; enum fsconfig_command { FSCONFIG_SET_FLAG = 0, FSCONFIG_SET_STRING = 1, FSCONFIG_SET_BINARY = 2, FSCONFIG_SET_PATH = 3, FSCONFIG_SET_PATH_EMPTY = 4, FSCONFIG_SET_FD = 5, FSCONFIG_CMD_CREATE = 6, FSCONFIG_CMD_RECONFIGURE = 7, FSCONFIG_CMD_CREATE_EXCL = 8, }; enum fsl_mc_pool_type { FSL_MC_POOL_DPMCP = 0, FSL_MC_POOL_DPBP = 1, FSL_MC_POOL_DPCON = 2, FSL_MC_POOL_IRQ = 3, FSL_MC_NUM_POOL_TYPES = 4, }; enum fsnotify_data_type { FSNOTIFY_EVENT_NONE = 0, FSNOTIFY_EVENT_PATH = 1, FSNOTIFY_EVENT_INODE = 2, FSNOTIFY_EVENT_DENTRY = 3, FSNOTIFY_EVENT_ERROR = 4, }; enum fsnotify_group_prio { FSNOTIFY_PRIO_NORMAL = 0, FSNOTIFY_PRIO_CONTENT = 1, FSNOTIFY_PRIO_PRE_CONTENT = 2, __FSNOTIFY_PRIO_NUM = 3, }; enum fsnotify_iter_type { FSNOTIFY_ITER_TYPE_INODE = 0, FSNOTIFY_ITER_TYPE_VFSMOUNT = 1, FSNOTIFY_ITER_TYPE_SB = 2, FSNOTIFY_ITER_TYPE_PARENT = 3, FSNOTIFY_ITER_TYPE_INODE2 = 4, FSNOTIFY_ITER_TYPE_COUNT = 5, }; enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_ANY = -1, FSNOTIFY_OBJ_TYPE_INODE = 0, FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1, FSNOTIFY_OBJ_TYPE_SB = 2, FSNOTIFY_OBJ_TYPE_COUNT = 3, FSNOTIFY_OBJ_TYPE_DETACHED = 3, }; enum ftrace_bug_type { FTRACE_BUG_UNKNOWN = 0, FTRACE_BUG_INIT = 1, FTRACE_BUG_NOP = 2, FTRACE_BUG_CALL = 3, FTRACE_BUG_UPDATE = 4, }; enum ftrace_dump_mode { DUMP_NONE = 0, DUMP_ALL = 1, DUMP_ORIG = 2, DUMP_PARAM = 3, }; enum ftrace_ops_cmd { FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF = 0, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER = 1, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER = 2, }; enum futex_access { FUTEX_READ = 0, FUTEX_WRITE = 1, }; enum fw_opt { FW_OPT_UEVENT = 1, FW_OPT_NOWAIT = 2, FW_OPT_USERHELPER = 4, FW_OPT_NO_WARN = 8, FW_OPT_NOCACHE = 16, FW_OPT_NOFALLBACK_SYSFS = 32, FW_OPT_FALLBACK_PLATFORM = 64, FW_OPT_PARTIAL = 128, }; enum fw_status { FW_STATUS_UNKNOWN = 0, FW_STATUS_LOADING = 1, FW_STATUS_DONE = 2, FW_STATUS_ABORTED = 3, }; enum gds_mitigations { GDS_MITIGATION_OFF = 0, GDS_MITIGATION_UCODE_NEEDED = 1, GDS_MITIGATION_FORCE = 2, GDS_MITIGATION_FULL = 3, GDS_MITIGATION_FULL_LOCKED = 4, GDS_MITIGATION_HYPERVISOR = 5, }; enum genl_validate_flags { GENL_DONT_VALIDATE_STRICT = 1, GENL_DONT_VALIDATE_DUMP = 2, GENL_DONT_VALIDATE_DUMP_STRICT = 4, }; enum gpiod_flags { GPIOD_ASIS = 0, GPIOD_IN = 1, GPIOD_OUT_LOW = 3, GPIOD_OUT_HIGH = 7, GPIOD_OUT_LOW_OPEN_DRAIN = 11, GPIOD_OUT_HIGH_OPEN_DRAIN = 15, }; enum graph_filter_type { GRAPH_FILTER_NOTRACE = 0, GRAPH_FILTER_FUNCTION = 1, }; enum gro_result { GRO_MERGED = 0, GRO_MERGED_FREE = 1, GRO_HELD = 2, GRO_NORMAL = 3, GRO_CONSUMED = 4, }; typedef enum gro_result gro_result_t; enum group_type { group_has_spare = 0, group_fully_busy = 1, group_misfit_task = 2, group_smt_balance = 3, group_asym_packing = 4, group_imbalanced = 5, group_overloaded = 6, }; enum handle_to_path_flags { HANDLE_CHECK_PERMS = 1, HANDLE_CHECK_SUBTREE = 2, }; enum hash_algo { HASH_ALGO_MD4 = 0, HASH_ALGO_MD5 = 1, HASH_ALGO_SHA1 = 2, HASH_ALGO_RIPE_MD_160 = 3, HASH_ALGO_SHA256 = 4, HASH_ALGO_SHA384 = 5, HASH_ALGO_SHA512 = 6, HASH_ALGO_SHA224 = 7, HASH_ALGO_RIPE_MD_128 = 8, HASH_ALGO_RIPE_MD_256 = 9, HASH_ALGO_RIPE_MD_320 = 10, HASH_ALGO_WP_256 = 11, HASH_ALGO_WP_384 = 12, HASH_ALGO_WP_512 = 13, HASH_ALGO_TGR_128 = 14, HASH_ALGO_TGR_160 = 15, HASH_ALGO_TGR_192 = 16, HASH_ALGO_SM3_256 = 17, HASH_ALGO_STREEBOG_256 = 18, HASH_ALGO_STREEBOG_512 = 19, HASH_ALGO_SHA3_256 = 20, HASH_ALGO_SHA3_384 = 21, HASH_ALGO_SHA3_512 = 22, HASH_ALGO__LAST = 23, }; enum hbm_host_enum_flags { MEI_HBM_ENUM_F_ALLOW_ADD = 1, MEI_HBM_ENUM_F_IMMEDIATE_ENUM = 2, }; enum hctx_type { HCTX_TYPE_DEFAULT = 0, HCTX_TYPE_READ = 1, HCTX_TYPE_POLL = 2, HCTX_MAX_TYPES = 3, }; enum header_fields { HDR_PCR = 0, HDR_DIGEST = 1, HDR_TEMPLATE_NAME = 2, HDR_TEMPLATE_DATA = 3, HDR__LAST = 4, }; enum hid_class_request { HID_REQ_GET_REPORT = 1, HID_REQ_GET_IDLE = 2, HID_REQ_GET_PROTOCOL = 3, HID_REQ_SET_REPORT = 9, HID_REQ_SET_IDLE = 10, HID_REQ_SET_PROTOCOL = 11, }; enum hid_report_type { HID_INPUT_REPORT = 0, HID_OUTPUT_REPORT = 1, HID_FEATURE_REPORT = 2, HID_REPORT_TYPES = 3, }; enum hid_type { HID_TYPE_OTHER = 0, HID_TYPE_USBMOUSE = 1, HID_TYPE_USBNONE = 2, }; enum hk_flags { HK_FLAG_TIMER = 1, HK_FLAG_RCU = 2, HK_FLAG_MISC = 4, HK_FLAG_SCHED = 8, HK_FLAG_TICK = 16, HK_FLAG_DOMAIN = 32, HK_FLAG_WQ = 64, HK_FLAG_MANAGED_IRQ = 128, HK_FLAG_KTHREAD = 256, }; enum hk_type { HK_TYPE_TIMER = 0, HK_TYPE_RCU = 1, HK_TYPE_MISC = 2, HK_TYPE_SCHED = 3, HK_TYPE_TICK = 4, HK_TYPE_DOMAIN = 5, HK_TYPE_WQ = 6, HK_TYPE_MANAGED_IRQ = 7, HK_TYPE_KTHREAD = 8, HK_TYPE_MAX = 9, }; enum hpet_mode { HPET_MODE_UNUSED = 0, HPET_MODE_LEGACY = 1, HPET_MODE_CLOCKEVT = 2, HPET_MODE_DEVICE = 3, }; enum hpx_type3_cfg_loc { HPX_CFG_PCICFG = 0, HPX_CFG_PCIE_CAP = 1, HPX_CFG_PCIE_CAP_EXT = 2, HPX_CFG_VEND_CAP = 3, HPX_CFG_DVSEC = 4, HPX_CFG_MAX = 5, }; enum hpx_type3_dev_type { HPX_TYPE_ENDPOINT = 1, HPX_TYPE_LEG_END = 2, HPX_TYPE_RC_END = 4, HPX_TYPE_RC_EC = 8, HPX_TYPE_ROOT_PORT = 16, HPX_TYPE_UPSTREAM = 32, HPX_TYPE_DOWNSTREAM = 64, HPX_TYPE_PCI_BRIDGE = 128, HPX_TYPE_PCIE_BRIDGE = 256, }; enum hpx_type3_fn_type { HPX_FN_NORMAL = 1, HPX_FN_SRIOV_PHYS = 2, HPX_FN_SRIOV_VIRT = 4, }; enum hrtimer_base_type { HRTIMER_BASE_MONOTONIC = 0, HRTIMER_BASE_REALTIME = 1, HRTIMER_BASE_BOOTTIME = 2, HRTIMER_BASE_TAI = 3, HRTIMER_BASE_MONOTONIC_SOFT = 4, HRTIMER_BASE_REALTIME_SOFT = 5, HRTIMER_BASE_BOOTTIME_SOFT = 6, HRTIMER_BASE_TAI_SOFT = 7, HRTIMER_MAX_CLOCK_BASES = 8, }; enum hrtimer_mode { HRTIMER_MODE_ABS = 0, HRTIMER_MODE_REL = 1, HRTIMER_MODE_PINNED = 2, HRTIMER_MODE_SOFT = 4, HRTIMER_MODE_HARD = 8, HRTIMER_MODE_ABS_PINNED = 2, HRTIMER_MODE_REL_PINNED = 3, HRTIMER_MODE_ABS_SOFT = 4, HRTIMER_MODE_REL_SOFT = 5, HRTIMER_MODE_ABS_PINNED_SOFT = 6, HRTIMER_MODE_REL_PINNED_SOFT = 7, HRTIMER_MODE_ABS_HARD = 8, HRTIMER_MODE_REL_HARD = 9, HRTIMER_MODE_ABS_PINNED_HARD = 10, HRTIMER_MODE_REL_PINNED_HARD = 11, }; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1, }; enum hugetlb_memory_event { HUGETLB_MAX = 0, HUGETLB_NR_MEMORY_EVENTS = 1, }; enum hugetlb_page_flags { HPG_restore_reserve = 0, HPG_migratable = 1, HPG_temporary = 2, HPG_freed = 3, HPG_vmemmap_optimized = 4, HPG_raw_hwp_unreliable = 5, __NR_HPAGEFLAGS = 6, }; enum hugetlb_param { Opt_gid___4 = 0, Opt_min_size = 1, Opt_mode___4 = 2, Opt_nr_inodes = 3, Opt_pagesize = 4, Opt_size = 5, Opt_uid___4 = 6, }; enum hugetlbfs_size_type { NO_SIZE = 0, SIZE_STD = 1, SIZE_PERCENT = 2, }; enum hv_tlb_flush_fifos { HV_L1_TLB_FLUSH_FIFO = 0, HV_L2_TLB_FLUSH_FIFO = 1, HV_NR_TLB_FLUSH_FIFOS = 2, }; enum hw_event_mc_err_type { HW_EVENT_ERR_CORRECTED = 0, HW_EVENT_ERR_UNCORRECTED = 1, HW_EVENT_ERR_DEFERRED = 2, HW_EVENT_ERR_FATAL = 3, HW_EVENT_ERR_INFO = 4, }; enum hwmon_chip_attributes { hwmon_chip_temp_reset_history = 0, hwmon_chip_in_reset_history = 1, hwmon_chip_curr_reset_history = 2, hwmon_chip_power_reset_history = 3, hwmon_chip_register_tz = 4, hwmon_chip_update_interval = 5, hwmon_chip_alarms = 6, hwmon_chip_samples = 7, hwmon_chip_curr_samples = 8, hwmon_chip_in_samples = 9, hwmon_chip_power_samples = 10, hwmon_chip_temp_samples = 11, hwmon_chip_beep_enable = 12, hwmon_chip_pec = 13, }; enum hwmon_curr_attributes { hwmon_curr_enable = 0, hwmon_curr_input = 1, hwmon_curr_min = 2, hwmon_curr_max = 3, hwmon_curr_lcrit = 4, hwmon_curr_crit = 5, hwmon_curr_average = 6, hwmon_curr_lowest = 7, hwmon_curr_highest = 8, hwmon_curr_reset_history = 9, hwmon_curr_label = 10, hwmon_curr_alarm = 11, hwmon_curr_min_alarm = 12, hwmon_curr_max_alarm = 13, hwmon_curr_lcrit_alarm = 14, hwmon_curr_crit_alarm = 15, hwmon_curr_rated_min = 16, hwmon_curr_rated_max = 17, hwmon_curr_beep = 18, }; enum hwmon_energy_attributes { hwmon_energy_enable = 0, hwmon_energy_input = 1, hwmon_energy_label = 2, }; enum hwmon_fan_attributes { hwmon_fan_enable = 0, hwmon_fan_input = 1, hwmon_fan_label = 2, hwmon_fan_min = 3, hwmon_fan_max = 4, hwmon_fan_div = 5, hwmon_fan_pulses = 6, hwmon_fan_target = 7, hwmon_fan_alarm = 8, hwmon_fan_min_alarm = 9, hwmon_fan_max_alarm = 10, hwmon_fan_fault = 11, hwmon_fan_beep = 12, }; enum hwmon_humidity_attributes { hwmon_humidity_enable = 0, hwmon_humidity_input = 1, hwmon_humidity_label = 2, hwmon_humidity_min = 3, hwmon_humidity_min_hyst = 4, hwmon_humidity_max = 5, hwmon_humidity_max_hyst = 6, hwmon_humidity_alarm = 7, hwmon_humidity_fault = 8, hwmon_humidity_rated_min = 9, hwmon_humidity_rated_max = 10, hwmon_humidity_min_alarm = 11, hwmon_humidity_max_alarm = 12, }; enum hwmon_in_attributes { hwmon_in_enable = 0, hwmon_in_input = 1, hwmon_in_min = 2, hwmon_in_max = 3, hwmon_in_lcrit = 4, hwmon_in_crit = 5, hwmon_in_average = 6, hwmon_in_lowest = 7, hwmon_in_highest = 8, hwmon_in_reset_history = 9, hwmon_in_label = 10, hwmon_in_alarm = 11, hwmon_in_min_alarm = 12, hwmon_in_max_alarm = 13, hwmon_in_lcrit_alarm = 14, hwmon_in_crit_alarm = 15, hwmon_in_rated_min = 16, hwmon_in_rated_max = 17, hwmon_in_beep = 18, hwmon_in_fault = 19, }; enum hwmon_intrusion_attributes { hwmon_intrusion_alarm = 0, hwmon_intrusion_beep = 1, }; enum hwmon_power_attributes { hwmon_power_enable = 0, hwmon_power_average = 1, hwmon_power_average_interval = 2, hwmon_power_average_interval_max = 3, hwmon_power_average_interval_min = 4, hwmon_power_average_highest = 5, hwmon_power_average_lowest = 6, hwmon_power_average_max = 7, hwmon_power_average_min = 8, hwmon_power_input = 9, hwmon_power_input_highest = 10, hwmon_power_input_lowest = 11, hwmon_power_reset_history = 12, hwmon_power_accuracy = 13, hwmon_power_cap = 14, hwmon_power_cap_hyst = 15, hwmon_power_cap_max = 16, hwmon_power_cap_min = 17, hwmon_power_min = 18, hwmon_power_max = 19, hwmon_power_crit = 20, hwmon_power_lcrit = 21, hwmon_power_label = 22, hwmon_power_alarm = 23, hwmon_power_cap_alarm = 24, hwmon_power_min_alarm = 25, hwmon_power_max_alarm = 26, hwmon_power_lcrit_alarm = 27, hwmon_power_crit_alarm = 28, hwmon_power_rated_min = 29, hwmon_power_rated_max = 30, }; enum hwmon_pwm_attributes { hwmon_pwm_input = 0, hwmon_pwm_enable = 1, hwmon_pwm_mode = 2, hwmon_pwm_freq = 3, hwmon_pwm_auto_channels_temp = 4, }; enum hwmon_sensor_types { hwmon_chip = 0, hwmon_temp = 1, hwmon_in = 2, hwmon_curr = 3, hwmon_power = 4, hwmon_energy = 5, hwmon_humidity = 6, hwmon_fan = 7, hwmon_pwm = 8, hwmon_intrusion = 9, hwmon_max = 10, }; enum hwmon_temp_attributes { hwmon_temp_enable = 0, hwmon_temp_input = 1, hwmon_temp_type = 2, hwmon_temp_lcrit = 3, hwmon_temp_lcrit_hyst = 4, hwmon_temp_min = 5, hwmon_temp_min_hyst = 6, hwmon_temp_max = 7, hwmon_temp_max_hyst = 8, hwmon_temp_crit = 9, hwmon_temp_crit_hyst = 10, hwmon_temp_emergency = 11, hwmon_temp_emergency_hyst = 12, hwmon_temp_alarm = 13, hwmon_temp_lcrit_alarm = 14, hwmon_temp_min_alarm = 15, hwmon_temp_max_alarm = 16, hwmon_temp_crit_alarm = 17, hwmon_temp_emergency_alarm = 18, hwmon_temp_fault = 19, hwmon_temp_offset = 20, hwmon_temp_label = 21, hwmon_temp_lowest = 22, hwmon_temp_highest = 23, hwmon_temp_reset_history = 24, hwmon_temp_rated_min = 25, hwmon_temp_rated_max = 26, hwmon_temp_beep = 27, }; enum hwparam_type { hwparam_ioport = 0, hwparam_iomem = 1, hwparam_ioport_or_iomem = 2, hwparam_irq = 3, hwparam_dma = 4, hwparam_dma_addr = 5, hwparam_other = 6, }; enum hwtstamp_flags { HWTSTAMP_FLAG_BONDED_PHC_INDEX = 1, HWTSTAMP_FLAG_LAST = 1, HWTSTAMP_FLAG_MASK = 1, }; enum hwtstamp_rx_filters { HWTSTAMP_FILTER_NONE = 0, HWTSTAMP_FILTER_ALL = 1, HWTSTAMP_FILTER_SOME = 2, HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, HWTSTAMP_FILTER_PTP_V2_EVENT = 12, HWTSTAMP_FILTER_PTP_V2_SYNC = 13, HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, HWTSTAMP_FILTER_NTP_ALL = 15, __HWTSTAMP_FILTER_CNT = 16, }; enum hwtstamp_source { HWTSTAMP_SOURCE_UNSPEC = 0, HWTSTAMP_SOURCE_NETDEV = 1, HWTSTAMP_SOURCE_PHYLIB = 2, }; enum hwtstamp_tx_types { HWTSTAMP_TX_OFF = 0, HWTSTAMP_TX_ON = 1, HWTSTAMP_TX_ONESTEP_SYNC = 2, HWTSTAMP_TX_ONESTEP_P2P = 3, __HWTSTAMP_TX_CNT = 4, }; enum hybrid_cpu_type { HYBRID_INTEL_NONE = 0, HYBRID_INTEL_ATOM = 32, HYBRID_INTEL_CORE = 64, }; enum hybrid_pmu_type { not_hybrid = 0, hybrid_small = 1, hybrid_big = 2, hybrid_big_small = 3, }; enum i8042_controller_reset_mode { I8042_RESET_NEVER = 0, I8042_RESET_ALWAYS = 1, I8042_RESET_ON_S2RAM = 2, }; enum ib_atomic_cap { IB_ATOMIC_NONE = 0, IB_ATOMIC_HCA = 1, IB_ATOMIC_GLOB = 2, }; enum ib_cq_notify_flags { IB_CQ_SOLICITED = 1, IB_CQ_NEXT_COMP = 2, IB_CQ_SOLICITED_MASK = 3, IB_CQ_REPORT_MISSED_EVENTS = 4, }; enum ib_event_type { IB_EVENT_CQ_ERR = 0, IB_EVENT_QP_FATAL = 1, IB_EVENT_QP_REQ_ERR = 2, IB_EVENT_QP_ACCESS_ERR = 3, IB_EVENT_COMM_EST = 4, IB_EVENT_SQ_DRAINED = 5, IB_EVENT_PATH_MIG = 6, IB_EVENT_PATH_MIG_ERR = 7, IB_EVENT_DEVICE_FATAL = 8, IB_EVENT_PORT_ACTIVE = 9, IB_EVENT_PORT_ERR = 10, IB_EVENT_LID_CHANGE = 11, IB_EVENT_PKEY_CHANGE = 12, IB_EVENT_SM_CHANGE = 13, IB_EVENT_SRQ_ERR = 14, IB_EVENT_SRQ_LIMIT_REACHED = 15, IB_EVENT_QP_LAST_WQE_REACHED = 16, IB_EVENT_CLIENT_REREGISTER = 17, IB_EVENT_GID_CHANGE = 18, IB_EVENT_WQ_FATAL = 19, }; enum ib_flow_action_type { IB_FLOW_ACTION_UNSPECIFIED = 0, IB_FLOW_ACTION_ESP = 1, }; enum ib_flow_attr_type { IB_FLOW_ATTR_NORMAL = 0, IB_FLOW_ATTR_ALL_DEFAULT = 1, IB_FLOW_ATTR_MC_DEFAULT = 2, IB_FLOW_ATTR_SNIFFER = 3, }; enum ib_flow_spec_type { IB_FLOW_SPEC_ETH = 32, IB_FLOW_SPEC_IB = 34, IB_FLOW_SPEC_IPV4 = 48, IB_FLOW_SPEC_IPV6 = 49, IB_FLOW_SPEC_ESP = 52, IB_FLOW_SPEC_TCP = 64, IB_FLOW_SPEC_UDP = 65, IB_FLOW_SPEC_VXLAN_TUNNEL = 80, IB_FLOW_SPEC_GRE = 81, IB_FLOW_SPEC_MPLS = 96, IB_FLOW_SPEC_INNER = 256, IB_FLOW_SPEC_ACTION_TAG = 4096, IB_FLOW_SPEC_ACTION_DROP = 4097, IB_FLOW_SPEC_ACTION_HANDLE = 4098, IB_FLOW_SPEC_ACTION_COUNT = 4099, }; enum ib_gid_type { IB_GID_TYPE_IB = 0, IB_GID_TYPE_ROCE = 1, IB_GID_TYPE_ROCE_UDP_ENCAP = 2, IB_GID_TYPE_SIZE = 3, }; enum ib_mig_state { IB_MIG_MIGRATED = 0, IB_MIG_REARM = 1, IB_MIG_ARMED = 2, }; enum ib_mr_type { IB_MR_TYPE_MEM_REG = 0, IB_MR_TYPE_SG_GAPS = 1, IB_MR_TYPE_DM = 2, IB_MR_TYPE_USER = 3, IB_MR_TYPE_DMA = 4, IB_MR_TYPE_INTEGRITY = 5, }; enum ib_mtu { IB_MTU_256 = 1, IB_MTU_512 = 2, IB_MTU_1024 = 3, IB_MTU_2048 = 4, IB_MTU_4096 = 5, }; enum ib_mw_type { IB_MW_TYPE_1 = 1, IB_MW_TYPE_2 = 2, }; enum ib_poll_context { IB_POLL_SOFTIRQ = 0, IB_POLL_WORKQUEUE = 1, IB_POLL_UNBOUND_WORKQUEUE = 2, IB_POLL_LAST_POOL_TYPE = 2, IB_POLL_DIRECT = 3, }; enum ib_port_state { IB_PORT_NOP = 0, IB_PORT_DOWN = 1, IB_PORT_INIT = 2, IB_PORT_ARMED = 3, IB_PORT_ACTIVE = 4, IB_PORT_ACTIVE_DEFER = 5, }; enum ib_qp_state { IB_QPS_RESET = 0, IB_QPS_INIT = 1, IB_QPS_RTR = 2, IB_QPS_RTS = 3, IB_QPS_SQD = 4, IB_QPS_SQE = 5, IB_QPS_ERR = 6, }; enum ib_qp_type { IB_QPT_SMI = 0, IB_QPT_GSI = 1, IB_QPT_RC = 2, IB_QPT_UC = 3, IB_QPT_UD = 4, IB_QPT_RAW_IPV6 = 5, IB_QPT_RAW_ETHERTYPE = 6, IB_QPT_RAW_PACKET = 8, IB_QPT_XRC_INI = 9, IB_QPT_XRC_TGT = 10, IB_QPT_MAX = 11, IB_QPT_DRIVER = 255, IB_QPT_RESERVED1 = 4096, IB_QPT_RESERVED2 = 4097, IB_QPT_RESERVED3 = 4098, IB_QPT_RESERVED4 = 4099, IB_QPT_RESERVED5 = 4100, IB_QPT_RESERVED6 = 4101, IB_QPT_RESERVED7 = 4102, IB_QPT_RESERVED8 = 4103, IB_QPT_RESERVED9 = 4104, IB_QPT_RESERVED10 = 4105, }; enum ib_sig_err_type { IB_SIG_BAD_GUARD = 0, IB_SIG_BAD_REFTAG = 1, IB_SIG_BAD_APPTAG = 2, }; enum ib_sig_type { IB_SIGNAL_ALL_WR = 0, IB_SIGNAL_REQ_WR = 1, }; enum ib_signature_type { IB_SIG_TYPE_NONE = 0, IB_SIG_TYPE_T10_DIF = 1, }; enum ib_srq_attr_mask { IB_SRQ_MAX_WR = 1, IB_SRQ_LIMIT = 2, }; enum ib_srq_type { IB_SRQT_BASIC = 0, IB_SRQT_XRC = 1, IB_SRQT_TM = 2, }; enum ib_t10_dif_bg_type { IB_T10DIF_CRC = 0, IB_T10DIF_CSUM = 1, }; enum ib_uverbs_access_flags { IB_UVERBS_ACCESS_LOCAL_WRITE = 1, IB_UVERBS_ACCESS_REMOTE_WRITE = 2, IB_UVERBS_ACCESS_REMOTE_READ = 4, IB_UVERBS_ACCESS_REMOTE_ATOMIC = 8, IB_UVERBS_ACCESS_MW_BIND = 16, IB_UVERBS_ACCESS_ZERO_BASED = 32, IB_UVERBS_ACCESS_ON_DEMAND = 64, IB_UVERBS_ACCESS_HUGETLB = 128, IB_UVERBS_ACCESS_FLUSH_GLOBAL = 256, IB_UVERBS_ACCESS_FLUSH_PERSISTENT = 512, IB_UVERBS_ACCESS_RELAXED_ORDERING = 1048576, IB_UVERBS_ACCESS_OPTIONAL_RANGE = 1072693248, }; enum ib_uverbs_advise_mr_advice { IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH = 0, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE = 1, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT = 2, }; enum ib_uverbs_create_qp_mask { IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1, }; enum ib_uverbs_device_cap_flags { IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1ULL, IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 2ULL, IB_UVERBS_DEVICE_BAD_QKEY_CNTR = 4ULL, IB_UVERBS_DEVICE_RAW_MULTI = 8ULL, IB_UVERBS_DEVICE_AUTO_PATH_MIG = 16ULL, IB_UVERBS_DEVICE_CHANGE_PHY_PORT = 32ULL, IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE = 64ULL, IB_UVERBS_DEVICE_CURR_QP_STATE_MOD = 128ULL, IB_UVERBS_DEVICE_SHUTDOWN_PORT = 256ULL, IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT = 1024ULL, IB_UVERBS_DEVICE_SYS_IMAGE_GUID = 2048ULL, IB_UVERBS_DEVICE_RC_RNR_NAK_GEN = 4096ULL, IB_UVERBS_DEVICE_SRQ_RESIZE = 8192ULL, IB_UVERBS_DEVICE_N_NOTIFY_CQ = 16384ULL, IB_UVERBS_DEVICE_MEM_WINDOW = 131072ULL, IB_UVERBS_DEVICE_UD_IP_CSUM = 262144ULL, IB_UVERBS_DEVICE_XRC = 1048576ULL, IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS = 2097152ULL, IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A = 8388608ULL, IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B = 16777216ULL, IB_UVERBS_DEVICE_RC_IP_CSUM = 33554432ULL, IB_UVERBS_DEVICE_RAW_IP_CSUM = 67108864ULL, IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING = 536870912ULL, IB_UVERBS_DEVICE_RAW_SCATTER_FCS = 17179869184ULL, IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING = 68719476736ULL, IB_UVERBS_DEVICE_FLUSH_GLOBAL = 274877906944ULL, IB_UVERBS_DEVICE_FLUSH_PERSISTENT = 549755813888ULL, IB_UVERBS_DEVICE_ATOMIC_WRITE = 1099511627776ULL, }; enum ib_uverbs_gid_type { IB_UVERBS_GID_TYPE_IB = 0, IB_UVERBS_GID_TYPE_ROCE_V1 = 1, IB_UVERBS_GID_TYPE_ROCE_V2 = 2, }; enum ib_uverbs_qp_create_flags { IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2, IB_UVERBS_QP_CREATE_SCATTER_FCS = 256, IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 512, IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 2048, IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 4096, }; enum ib_uverbs_qp_type { IB_UVERBS_QPT_RC = 2, IB_UVERBS_QPT_UC = 3, IB_UVERBS_QPT_UD = 4, IB_UVERBS_QPT_RAW_PACKET = 8, IB_UVERBS_QPT_XRC_INI = 9, IB_UVERBS_QPT_XRC_TGT = 10, IB_UVERBS_QPT_DRIVER = 255, }; enum ib_uverbs_raw_packet_caps { IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING = 1, IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS = 2, IB_UVERBS_RAW_PACKET_CAP_IP_CSUM = 4, IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP = 8, }; enum ib_uverbs_srq_type { IB_UVERBS_SRQT_BASIC = 0, IB_UVERBS_SRQT_XRC = 1, IB_UVERBS_SRQT_TM = 2, }; enum ib_uverbs_wc_opcode { IB_UVERBS_WC_SEND = 0, IB_UVERBS_WC_RDMA_WRITE = 1, IB_UVERBS_WC_RDMA_READ = 2, IB_UVERBS_WC_COMP_SWAP = 3, IB_UVERBS_WC_FETCH_ADD = 4, IB_UVERBS_WC_BIND_MW = 5, IB_UVERBS_WC_LOCAL_INV = 6, IB_UVERBS_WC_TSO = 7, IB_UVERBS_WC_FLUSH = 8, IB_UVERBS_WC_ATOMIC_WRITE = 9, }; enum ib_uverbs_wq_flags { IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1, IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 2, IB_UVERBS_WQ_FLAGS_DELAY_DROP = 4, IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 8, }; enum ib_uverbs_wq_type { IB_UVERBS_WQT_RQ = 0, }; enum ib_uverbs_wr_opcode { IB_UVERBS_WR_RDMA_WRITE = 0, IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, IB_UVERBS_WR_SEND = 2, IB_UVERBS_WR_SEND_WITH_IMM = 3, IB_UVERBS_WR_RDMA_READ = 4, IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, IB_UVERBS_WR_LOCAL_INV = 7, IB_UVERBS_WR_BIND_MW = 8, IB_UVERBS_WR_SEND_WITH_INV = 9, IB_UVERBS_WR_TSO = 10, IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, IB_UVERBS_WR_FLUSH = 14, IB_UVERBS_WR_ATOMIC_WRITE = 15, }; enum ib_uverbs_write_cmds { IB_USER_VERBS_CMD_GET_CONTEXT = 0, IB_USER_VERBS_CMD_QUERY_DEVICE = 1, IB_USER_VERBS_CMD_QUERY_PORT = 2, IB_USER_VERBS_CMD_ALLOC_PD = 3, IB_USER_VERBS_CMD_DEALLOC_PD = 4, IB_USER_VERBS_CMD_CREATE_AH = 5, IB_USER_VERBS_CMD_MODIFY_AH = 6, IB_USER_VERBS_CMD_QUERY_AH = 7, IB_USER_VERBS_CMD_DESTROY_AH = 8, IB_USER_VERBS_CMD_REG_MR = 9, IB_USER_VERBS_CMD_REG_SMR = 10, IB_USER_VERBS_CMD_REREG_MR = 11, IB_USER_VERBS_CMD_QUERY_MR = 12, IB_USER_VERBS_CMD_DEREG_MR = 13, IB_USER_VERBS_CMD_ALLOC_MW = 14, IB_USER_VERBS_CMD_BIND_MW = 15, IB_USER_VERBS_CMD_DEALLOC_MW = 16, IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = 17, IB_USER_VERBS_CMD_CREATE_CQ = 18, IB_USER_VERBS_CMD_RESIZE_CQ = 19, IB_USER_VERBS_CMD_DESTROY_CQ = 20, IB_USER_VERBS_CMD_POLL_CQ = 21, IB_USER_VERBS_CMD_PEEK_CQ = 22, IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = 23, IB_USER_VERBS_CMD_CREATE_QP = 24, IB_USER_VERBS_CMD_QUERY_QP = 25, IB_USER_VERBS_CMD_MODIFY_QP = 26, IB_USER_VERBS_CMD_DESTROY_QP = 27, IB_USER_VERBS_CMD_POST_SEND = 28, IB_USER_VERBS_CMD_POST_RECV = 29, IB_USER_VERBS_CMD_ATTACH_MCAST = 30, IB_USER_VERBS_CMD_DETACH_MCAST = 31, IB_USER_VERBS_CMD_CREATE_SRQ = 32, IB_USER_VERBS_CMD_MODIFY_SRQ = 33, IB_USER_VERBS_CMD_QUERY_SRQ = 34, IB_USER_VERBS_CMD_DESTROY_SRQ = 35, IB_USER_VERBS_CMD_POST_SRQ_RECV = 36, IB_USER_VERBS_CMD_OPEN_XRCD = 37, IB_USER_VERBS_CMD_CLOSE_XRCD = 38, IB_USER_VERBS_CMD_CREATE_XSRQ = 39, IB_USER_VERBS_CMD_OPEN_QP = 40, }; enum ib_wc_opcode { IB_WC_SEND = 0, IB_WC_RDMA_WRITE = 1, IB_WC_RDMA_READ = 2, IB_WC_COMP_SWAP = 3, IB_WC_FETCH_ADD = 4, IB_WC_BIND_MW = 5, IB_WC_LOCAL_INV = 6, IB_WC_LSO = 7, IB_WC_ATOMIC_WRITE = 9, IB_WC_REG_MR = 10, IB_WC_MASKED_COMP_SWAP = 11, IB_WC_MASKED_FETCH_ADD = 12, IB_WC_FLUSH = 8, IB_WC_RECV = 128, IB_WC_RECV_RDMA_WITH_IMM = 129, }; enum ib_wc_status { IB_WC_SUCCESS = 0, IB_WC_LOC_LEN_ERR = 1, IB_WC_LOC_QP_OP_ERR = 2, IB_WC_LOC_EEC_OP_ERR = 3, IB_WC_LOC_PROT_ERR = 4, IB_WC_WR_FLUSH_ERR = 5, IB_WC_MW_BIND_ERR = 6, IB_WC_BAD_RESP_ERR = 7, IB_WC_LOC_ACCESS_ERR = 8, IB_WC_REM_INV_REQ_ERR = 9, IB_WC_REM_ACCESS_ERR = 10, IB_WC_REM_OP_ERR = 11, IB_WC_RETRY_EXC_ERR = 12, IB_WC_RNR_RETRY_EXC_ERR = 13, IB_WC_LOC_RDD_VIOL_ERR = 14, IB_WC_REM_INV_RD_REQ_ERR = 15, IB_WC_REM_ABORT_ERR = 16, IB_WC_INV_EECN_ERR = 17, IB_WC_INV_EEC_STATE_ERR = 18, IB_WC_FATAL_ERR = 19, IB_WC_RESP_TIMEOUT_ERR = 20, IB_WC_GENERAL_ERR = 21, }; enum ib_wq_state { IB_WQS_RESET = 0, IB_WQS_RDY = 1, IB_WQS_ERR = 2, }; enum ib_wq_type { IB_WQT_RQ = 0, }; enum ib_wr_opcode { IB_WR_RDMA_WRITE = 0, IB_WR_RDMA_WRITE_WITH_IMM = 1, IB_WR_SEND = 2, IB_WR_SEND_WITH_IMM = 3, IB_WR_RDMA_READ = 4, IB_WR_ATOMIC_CMP_AND_SWP = 5, IB_WR_ATOMIC_FETCH_AND_ADD = 6, IB_WR_BIND_MW = 8, IB_WR_LSO = 10, IB_WR_SEND_WITH_INV = 9, IB_WR_RDMA_READ_WITH_INV = 11, IB_WR_LOCAL_INV = 7, IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, IB_WR_FLUSH = 14, IB_WR_ATOMIC_WRITE = 15, IB_WR_REG_MR = 32, IB_WR_REG_MR_INTEGRITY = 33, IB_WR_RESERVED1 = 240, IB_WR_RESERVED2 = 241, IB_WR_RESERVED3 = 242, IB_WR_RESERVED4 = 243, IB_WR_RESERVED5 = 244, IB_WR_RESERVED6 = 245, IB_WR_RESERVED7 = 246, IB_WR_RESERVED8 = 247, IB_WR_RESERVED9 = 248, IB_WR_RESERVED10 = 249, }; enum ibs_states { IBS_ENABLED = 0, IBS_STARTED = 1, IBS_STOPPING = 2, IBS_STOPPED = 3, IBS_MAX_STATES = 4, }; enum idle_boot_override { IDLE_NO_OVERRIDE = 0, IDLE_HALT = 1, IDLE_NOMWAIT = 2, IDLE_POLL = 3, }; enum ieee_attrs { DCB_ATTR_IEEE_UNSPEC = 0, DCB_ATTR_IEEE_ETS = 1, DCB_ATTR_IEEE_PFC = 2, DCB_ATTR_IEEE_APP_TABLE = 3, DCB_ATTR_IEEE_PEER_ETS = 4, DCB_ATTR_IEEE_PEER_PFC = 5, DCB_ATTR_IEEE_PEER_APP = 6, DCB_ATTR_IEEE_MAXRATE = 7, DCB_ATTR_IEEE_QCN = 8, DCB_ATTR_IEEE_QCN_STATS = 9, DCB_ATTR_DCB_BUFFER = 10, DCB_ATTR_DCB_APP_TRUST_TABLE = 11, DCB_ATTR_DCB_REWR_TABLE = 12, __DCB_ATTR_IEEE_MAX = 13, }; enum ieee_attrs_app { DCB_ATTR_IEEE_APP_UNSPEC = 0, DCB_ATTR_IEEE_APP = 1, DCB_ATTR_DCB_APP = 2, __DCB_ATTR_IEEE_APP_MAX = 3, }; enum ifla_geneve_df { GENEVE_DF_UNSET = 0, GENEVE_DF_SET = 1, GENEVE_DF_INHERIT = 2, __GENEVE_DF_END = 3, GENEVE_DF_MAX = 2, }; enum ifla_vxlan_df { VXLAN_DF_UNSET = 0, VXLAN_DF_SET = 1, VXLAN_DF_INHERIT = 2, __VXLAN_DF_END = 3, VXLAN_DF_MAX = 2, }; enum ifla_vxlan_label_policy { VXLAN_LABEL_FIXED = 0, VXLAN_LABEL_INHERIT = 1, __VXLAN_LABEL_END = 2, VXLAN_LABEL_MAX = 1, }; enum ima_fs_flags { IMA_FS_BUSY = 0, }; enum ima_hooks { NONE___2 = 0, FILE_CHECK = 1, MMAP_CHECK = 2, MMAP_CHECK_REQPROT = 3, BPRM_CHECK = 4, CREDS_CHECK = 5, POST_SETATTR = 6, MODULE_CHECK = 7, FIRMWARE_CHECK = 8, KEXEC_KERNEL_CHECK = 9, KEXEC_INITRAMFS_CHECK = 10, POLICY_CHECK = 11, KEXEC_CMDLINE = 12, KEY_CHECK = 13, CRITICAL_DATA = 14, SETXATTR_CHECK = 15, MAX_CHECK = 16, }; enum ima_show_type { IMA_SHOW_BINARY = 0, IMA_SHOW_BINARY_NO_FIELD_LEN = 1, IMA_SHOW_BINARY_OLD_STRING_FMT = 2, IMA_SHOW_ASCII = 3, }; enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_EUI64 = 0, IN6_ADDR_GEN_MODE_NONE = 1, IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, IN6_ADDR_GEN_MODE_RANDOM = 3, }; enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, ICSK_ACK_TIMER = 2, ICSK_ACK_PUSHED = 4, ICSK_ACK_PUSHED2 = 8, ICSK_ACK_NOW = 16, ICSK_ACK_NOMEM = 32, }; enum inode_i_mutex_lock_class { I_MUTEX_NORMAL = 0, I_MUTEX_PARENT = 1, I_MUTEX_CHILD = 2, I_MUTEX_XATTR = 3, I_MUTEX_NONDIR2 = 4, I_MUTEX_PARENT2 = 5, }; enum input_clock_type { INPUT_CLK_REAL = 0, INPUT_CLK_MONO = 1, INPUT_CLK_BOOT = 2, INPUT_CLK_MAX = 3, }; enum insn_mmio_type { INSN_MMIO_DECODE_FAILED = 0, INSN_MMIO_WRITE = 1, INSN_MMIO_WRITE_IMM = 2, INSN_MMIO_READ = 3, INSN_MMIO_READ_ZERO_EXTEND = 4, INSN_MMIO_READ_SIGN_EXTEND = 5, INSN_MMIO_MOVS = 6, }; enum insn_mode { INSN_MODE_32 = 0, INSN_MODE_64 = 1, INSN_MODE_KERN = 2, INSN_NUM_MODES = 3, }; enum insn_type { CALL = 0, NOP = 1, JMP = 2, RET = 3, JCC = 4, }; enum integrity_status { INTEGRITY_PASS = 0, INTEGRITY_PASS_IMMUTABLE = 1, INTEGRITY_FAIL = 2, INTEGRITY_FAIL_IMMUTABLE = 3, INTEGRITY_NOLABEL = 4, INTEGRITY_NOXATTRS = 5, INTEGRITY_UNKNOWN = 6, }; enum intel_excl_state_type { INTEL_EXCL_UNUSED = 0, INTEL_EXCL_SHARED = 1, INTEL_EXCL_EXCLUSIVE = 2, }; enum io_uring_cmd_flags { IO_URING_F_COMPLETE_DEFER = 1, IO_URING_F_UNLOCKED = 2, IO_URING_F_MULTISHOT = 4, IO_URING_F_IOWQ = 8, IO_URING_F_NONBLOCK = -2147483648, IO_URING_F_SQE128 = 256, IO_URING_F_CQE32 = 512, IO_URING_F_IOPOLL = 1024, IO_URING_F_CANCEL = 2048, IO_URING_F_COMPAT = 4096, }; enum io_uring_msg_ring_flags { IORING_MSG_DATA = 0, IORING_MSG_SEND_FD = 1, }; enum io_uring_op { IORING_OP_NOP = 0, IORING_OP_READV = 1, IORING_OP_WRITEV = 2, IORING_OP_FSYNC = 3, IORING_OP_READ_FIXED = 4, IORING_OP_WRITE_FIXED = 5, IORING_OP_POLL_ADD = 6, IORING_OP_POLL_REMOVE = 7, IORING_OP_SYNC_FILE_RANGE = 8, IORING_OP_SENDMSG = 9, IORING_OP_RECVMSG = 10, IORING_OP_TIMEOUT = 11, IORING_OP_TIMEOUT_REMOVE = 12, IORING_OP_ACCEPT = 13, IORING_OP_ASYNC_CANCEL = 14, IORING_OP_LINK_TIMEOUT = 15, IORING_OP_CONNECT = 16, IORING_OP_FALLOCATE = 17, IORING_OP_OPENAT = 18, IORING_OP_CLOSE = 19, IORING_OP_FILES_UPDATE = 20, IORING_OP_STATX = 21, IORING_OP_READ = 22, IORING_OP_WRITE = 23, IORING_OP_FADVISE = 24, IORING_OP_MADVISE = 25, IORING_OP_SEND = 26, IORING_OP_RECV = 27, IORING_OP_OPENAT2 = 28, IORING_OP_EPOLL_CTL = 29, IORING_OP_SPLICE = 30, IORING_OP_PROVIDE_BUFFERS = 31, IORING_OP_REMOVE_BUFFERS = 32, IORING_OP_TEE = 33, IORING_OP_SHUTDOWN = 34, IORING_OP_RENAMEAT = 35, IORING_OP_UNLINKAT = 36, IORING_OP_MKDIRAT = 37, IORING_OP_SYMLINKAT = 38, IORING_OP_LINKAT = 39, IORING_OP_MSG_RING = 40, IORING_OP_FSETXATTR = 41, IORING_OP_SETXATTR = 42, IORING_OP_FGETXATTR = 43, IORING_OP_GETXATTR = 44, IORING_OP_SOCKET = 45, IORING_OP_URING_CMD = 46, IORING_OP_SEND_ZC = 47, IORING_OP_SENDMSG_ZC = 48, IORING_OP_READ_MULTISHOT = 49, IORING_OP_WAITID = 50, IORING_OP_FUTEX_WAIT = 51, IORING_OP_FUTEX_WAKE = 52, IORING_OP_FUTEX_WAITV = 53, IORING_OP_FIXED_FD_INSTALL = 54, IORING_OP_FTRUNCATE = 55, IORING_OP_BIND = 56, IORING_OP_LISTEN = 57, IORING_OP_LAST = 58, }; enum io_uring_register_op { IORING_REGISTER_BUFFERS = 0, IORING_UNREGISTER_BUFFERS = 1, IORING_REGISTER_FILES = 2, IORING_UNREGISTER_FILES = 3, IORING_REGISTER_EVENTFD = 4, IORING_UNREGISTER_EVENTFD = 5, IORING_REGISTER_FILES_UPDATE = 6, IORING_REGISTER_EVENTFD_ASYNC = 7, IORING_REGISTER_PROBE = 8, IORING_REGISTER_PERSONALITY = 9, IORING_UNREGISTER_PERSONALITY = 10, IORING_REGISTER_RESTRICTIONS = 11, IORING_REGISTER_ENABLE_RINGS = 12, IORING_REGISTER_FILES2 = 13, IORING_REGISTER_FILES_UPDATE2 = 14, IORING_REGISTER_BUFFERS2 = 15, IORING_REGISTER_BUFFERS_UPDATE = 16, IORING_REGISTER_IOWQ_AFF = 17, IORING_UNREGISTER_IOWQ_AFF = 18, IORING_REGISTER_IOWQ_MAX_WORKERS = 19, IORING_REGISTER_RING_FDS = 20, IORING_UNREGISTER_RING_FDS = 21, IORING_REGISTER_PBUF_RING = 22, IORING_UNREGISTER_PBUF_RING = 23, IORING_REGISTER_SYNC_CANCEL = 24, IORING_REGISTER_FILE_ALLOC_RANGE = 25, IORING_REGISTER_PBUF_STATUS = 26, IORING_REGISTER_NAPI = 27, IORING_UNREGISTER_NAPI = 28, IORING_REGISTER_CLOCK = 29, IORING_REGISTER_CLONE_BUFFERS = 30, IORING_REGISTER_LAST = 31, IORING_REGISTER_USE_REGISTERED_RING = 2147483648, }; enum io_uring_register_pbuf_ring_flags { IOU_PBUF_RING_MMAP = 1, IOU_PBUF_RING_INC = 2, }; enum io_uring_register_restriction_op { IORING_RESTRICTION_REGISTER_OP = 0, IORING_RESTRICTION_SQE_OP = 1, IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, IORING_RESTRICTION_LAST = 4, }; enum io_uring_socket_op { SOCKET_URING_OP_SIOCINQ = 0, SOCKET_URING_OP_SIOCOUTQ = 1, SOCKET_URING_OP_GETSOCKOPT = 2, SOCKET_URING_OP_SETSOCKOPT = 3, }; enum io_uring_sqe_flags_bit { IOSQE_FIXED_FILE_BIT = 0, IOSQE_IO_DRAIN_BIT = 1, IOSQE_IO_LINK_BIT = 2, IOSQE_IO_HARDLINK_BIT = 3, IOSQE_ASYNC_BIT = 4, IOSQE_BUFFER_SELECT_BIT = 5, IOSQE_CQE_SKIP_SUCCESS_BIT = 6, }; enum io_wq_cancel { IO_WQ_CANCEL_OK = 0, IO_WQ_CANCEL_RUNNING = 1, IO_WQ_CANCEL_NOTFOUND = 2, }; enum io_wq_type { IO_WQ_BOUND = 0, IO_WQ_UNBOUND = 1, }; enum ioam6_event_attr { IOAM6_EVENT_ATTR_UNSPEC = 0, IOAM6_EVENT_ATTR_TRACE_NAMESPACE = 1, IOAM6_EVENT_ATTR_TRACE_NODELEN = 2, IOAM6_EVENT_ATTR_TRACE_TYPE = 3, IOAM6_EVENT_ATTR_TRACE_DATA = 4, __IOAM6_EVENT_ATTR_MAX = 5, }; enum ioam6_event_type { IOAM6_EVENT_UNSPEC = 0, IOAM6_EVENT_TRACE = 1, }; enum ioapic_domain_type { IOAPIC_DOMAIN_INVALID = 0, IOAPIC_DOMAIN_LEGACY = 1, IOAPIC_DOMAIN_STRICT = 2, IOAPIC_DOMAIN_DYNAMIC = 3, }; enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY = 0, IOMMU_CAP_NOEXEC = 1, IOMMU_CAP_PRE_BOOT_PROTECTION = 2, IOMMU_CAP_ENFORCE_CACHE_COHERENCY = 3, IOMMU_CAP_DEFERRED_FLUSH = 4, IOMMU_CAP_DIRTY_TRACKING = 5, }; enum iommu_dev_features { IOMMU_DEV_FEAT_SVA = 0, IOMMU_DEV_FEAT_IOPF = 1, }; enum iommu_dma_cookie_type { IOMMU_DMA_IOVA_COOKIE = 0, IOMMU_DMA_MSI_COOKIE = 1, }; enum iommu_dma_queue_type { IOMMU_DMA_OPTS_PER_CPU_QUEUE = 0, IOMMU_DMA_OPTS_SINGLE_QUEUE = 1, }; enum iommu_resv_type { IOMMU_RESV_DIRECT = 0, IOMMU_RESV_DIRECT_RELAXABLE = 1, IOMMU_RESV_RESERVED = 2, IOMMU_RESV_MSI = 3, IOMMU_RESV_SW_MSI = 4, }; enum ip6_defrag_users { IP6_DEFRAG_LOCAL_DELIVER = 0, IP6_DEFRAG_CONNTRACK_IN = 1, __IP6_DEFRAG_CONNTRACK_IN = 65536, IP6_DEFRAG_CONNTRACK_OUT = 65537, __IP6_DEFRAG_CONNTRACK_OUT = 131072, IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, }; enum ip_conntrack_dir { IP_CT_DIR_ORIGINAL = 0, IP_CT_DIR_REPLY = 1, IP_CT_DIR_MAX = 2, }; enum ip_conntrack_events { IPCT_NEW = 0, IPCT_RELATED = 1, IPCT_DESTROY = 2, IPCT_REPLY = 3, IPCT_ASSURED = 4, IPCT_PROTOINFO = 5, IPCT_HELPER = 6, IPCT_MARK = 7, IPCT_SEQADJ = 8, IPCT_NATSEQADJ = 8, IPCT_SECMARK = 9, IPCT_LABEL = 10, IPCT_SYNPROXY = 11, __IPCT_MAX = 12, }; enum ip_conntrack_expect_events { IPEXP_NEW = 0, IPEXP_DESTROY = 1, }; enum ip_conntrack_info { IP_CT_ESTABLISHED = 0, IP_CT_RELATED = 1, IP_CT_NEW = 2, IP_CT_IS_REPLY = 3, IP_CT_ESTABLISHED_REPLY = 3, IP_CT_RELATED_REPLY = 4, IP_CT_NUMBER = 5, IP_CT_UNTRACKED = 7, }; enum ip_conntrack_status { IPS_EXPECTED_BIT = 0, IPS_EXPECTED = 1, IPS_SEEN_REPLY_BIT = 1, IPS_SEEN_REPLY = 2, IPS_ASSURED_BIT = 2, IPS_ASSURED = 4, IPS_CONFIRMED_BIT = 3, IPS_CONFIRMED = 8, IPS_SRC_NAT_BIT = 4, IPS_SRC_NAT = 16, IPS_DST_NAT_BIT = 5, IPS_DST_NAT = 32, IPS_NAT_MASK = 48, IPS_SEQ_ADJUST_BIT = 6, IPS_SEQ_ADJUST = 64, IPS_SRC_NAT_DONE_BIT = 7, IPS_SRC_NAT_DONE = 128, IPS_DST_NAT_DONE_BIT = 8, IPS_DST_NAT_DONE = 256, IPS_NAT_DONE_MASK = 384, IPS_DYING_BIT = 9, IPS_DYING = 512, IPS_FIXED_TIMEOUT_BIT = 10, IPS_FIXED_TIMEOUT = 1024, IPS_TEMPLATE_BIT = 11, IPS_TEMPLATE = 2048, IPS_UNTRACKED_BIT = 12, IPS_UNTRACKED = 4096, IPS_NAT_CLASH_BIT = 12, IPS_NAT_CLASH = 4096, IPS_HELPER_BIT = 13, IPS_HELPER = 8192, IPS_OFFLOAD_BIT = 14, IPS_OFFLOAD = 16384, IPS_HW_OFFLOAD_BIT = 15, IPS_HW_OFFLOAD = 32768, IPS_UNCHANGEABLE_MASK = 56313, __IPS_MAX_BIT = 16, }; enum ip_defrag_users { IP_DEFRAG_LOCAL_DELIVER = 0, IP_DEFRAG_CALL_RA_CHAIN = 1, IP_DEFRAG_CONNTRACK_IN = 2, __IP_DEFRAG_CONNTRACK_IN_END = 65537, IP_DEFRAG_CONNTRACK_OUT = 65538, __IP_DEFRAG_CONNTRACK_OUT_END = 131073, IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, IP_DEFRAG_VS_IN = 196610, IP_DEFRAG_VS_OUT = 196611, IP_DEFRAG_VS_FWD = 196612, IP_DEFRAG_AF_PACKET = 196613, IP_DEFRAG_MACVLAN = 196614, }; enum irq_alloc_type { X86_IRQ_ALLOC_TYPE_IOAPIC = 1, X86_IRQ_ALLOC_TYPE_HPET = 2, X86_IRQ_ALLOC_TYPE_PCI_MSI = 3, X86_IRQ_ALLOC_TYPE_PCI_MSIX = 4, X86_IRQ_ALLOC_TYPE_DMAR = 5, X86_IRQ_ALLOC_TYPE_AMDVI = 6, X86_IRQ_ALLOC_TYPE_UV = 7, }; enum irq_domain_bus_token { DOMAIN_BUS_ANY = 0, DOMAIN_BUS_WIRED = 1, DOMAIN_BUS_GENERIC_MSI = 2, DOMAIN_BUS_PCI_MSI = 3, DOMAIN_BUS_PLATFORM_MSI = 4, DOMAIN_BUS_NEXUS = 5, DOMAIN_BUS_IPI = 6, DOMAIN_BUS_FSL_MC_MSI = 7, DOMAIN_BUS_TI_SCI_INTA_MSI = 8, DOMAIN_BUS_WAKEUP = 9, DOMAIN_BUS_VMD_MSI = 10, DOMAIN_BUS_PCI_DEVICE_MSI = 11, DOMAIN_BUS_PCI_DEVICE_MSIX = 12, DOMAIN_BUS_DMAR = 13, DOMAIN_BUS_AMDVI = 14, DOMAIN_BUS_DEVICE_MSI = 15, DOMAIN_BUS_WIRED_TO_MSI = 16, }; enum irq_gc_flags { IRQ_GC_INIT_MASK_CACHE = 1, IRQ_GC_INIT_NESTED_LOCK = 2, IRQ_GC_MASK_CACHE_PER_TYPE = 4, IRQ_GC_NO_MASK = 8, IRQ_GC_BE_IO = 16, }; enum irqchip_irq_state { IRQCHIP_STATE_PENDING = 0, IRQCHIP_STATE_ACTIVE = 1, IRQCHIP_STATE_MASKED = 2, IRQCHIP_STATE_LINE_LEVEL = 3, }; enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2, }; typedef enum irqreturn irqreturn_t; enum iter_type { ITER_UBUF = 0, ITER_IOVEC = 1, ITER_BVEC = 2, ITER_KVEC = 3, ITER_FOLIOQ = 4, ITER_XARRAY = 5, ITER_DISCARD = 6, }; enum jbd2_shrink_type { JBD2_SHRINK_DESTROY = 0, JBD2_SHRINK_BUSY_STOP = 1, JBD2_SHRINK_BUSY_SKIP = 2, }; enum jbd_state_bits { BH_JBD = 16, BH_JWrite = 17, BH_Freed = 18, BH_Revoked = 19, BH_RevokeValid = 20, BH_JBDDirty = 21, BH_JournalHead = 22, BH_Shadow = 23, BH_Verified = 24, BH_JBDPrivateStart = 25, }; enum jump_label_type { JUMP_LABEL_NOP = 0, JUMP_LABEL_JMP = 1, }; enum kcore_type { KCORE_TEXT = 0, KCORE_VMALLOC = 1, KCORE_RAM = 2, KCORE_VMEMMAP = 3, KCORE_USER = 4, }; enum kernel_gp_hint { GP_NO_HINT = 0, GP_NON_CANONICAL = 1, GP_CANONICAL = 2, }; enum kernel_load_data_id { LOADING_UNKNOWN = 0, LOADING_FIRMWARE = 1, LOADING_MODULE = 2, LOADING_KEXEC_IMAGE = 3, LOADING_KEXEC_INITRAMFS = 4, LOADING_POLICY = 5, LOADING_X509_CERTIFICATE = 6, LOADING_MAX_ID = 7, }; enum kernel_pkey_operation { kernel_pkey_encrypt = 0, kernel_pkey_decrypt = 1, kernel_pkey_sign = 2, kernel_pkey_verify = 3, }; enum kernel_read_file_id { READING_UNKNOWN = 0, READING_FIRMWARE = 1, READING_MODULE = 2, READING_KEXEC_IMAGE = 3, READING_KEXEC_INITRAMFS = 4, READING_POLICY = 5, READING_X509_CERTIFICATE = 6, READING_MAX_ID = 7, }; enum kernfs_node_flag { KERNFS_ACTIVATED = 16, KERNFS_NS = 32, KERNFS_HAS_SEQ_SHOW = 64, KERNFS_HAS_MMAP = 128, KERNFS_LOCKDEP = 256, KERNFS_HIDDEN = 512, KERNFS_SUICIDAL = 1024, KERNFS_SUICIDED = 2048, KERNFS_EMPTY_DIR = 4096, KERNFS_HAS_RELEASE = 8192, KERNFS_REMOVING = 16384, }; enum kernfs_node_type { KERNFS_DIR = 1, KERNFS_FILE = 2, KERNFS_LINK = 4, }; enum kernfs_root_flag { KERNFS_ROOT_CREATE_DEACTIVATED = 1, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, KERNFS_ROOT_SUPPORT_EXPORTOP = 4, KERNFS_ROOT_SUPPORT_USER_XATTR = 8, }; enum key_being_used_for { VERIFYING_MODULE_SIGNATURE = 0, VERIFYING_FIRMWARE_SIGNATURE = 1, VERIFYING_KEXEC_PE_SIGNATURE = 2, VERIFYING_KEY_SIGNATURE = 3, VERIFYING_KEY_SELF_SIGNATURE = 4, VERIFYING_UNSPECIFIED_SIGNATURE = 5, NR__KEY_BEING_USED_FOR = 6, }; enum key_lookup_flag { KEY_LOOKUP_CREATE = 1, KEY_LOOKUP_PARTIAL = 2, KEY_LOOKUP_ALL = 3, }; enum key_need_perm { KEY_NEED_UNSPECIFIED = 0, KEY_NEED_VIEW = 1, KEY_NEED_READ = 2, KEY_NEED_WRITE = 3, KEY_NEED_SEARCH = 4, KEY_NEED_LINK = 5, KEY_NEED_SETATTR = 6, KEY_NEED_UNLINK = 7, KEY_SYSADMIN_OVERRIDE = 8, KEY_AUTHTOKEN_OVERRIDE = 9, KEY_DEFER_PERM_CHECK = 10, }; enum key_notification_subtype { NOTIFY_KEY_INSTANTIATED = 0, NOTIFY_KEY_UPDATED = 1, NOTIFY_KEY_LINKED = 2, NOTIFY_KEY_UNLINKED = 3, NOTIFY_KEY_CLEARED = 4, NOTIFY_KEY_REVOKED = 5, NOTIFY_KEY_INVALIDATED = 6, NOTIFY_KEY_SETATTR = 7, }; enum key_state { KEY_IS_UNINSTANTIATED = 0, KEY_IS_POSITIVE = 1, }; enum kfunc_ptr_arg_type { KF_ARG_PTR_TO_CTX = 0, KF_ARG_PTR_TO_ALLOC_BTF_ID = 1, KF_ARG_PTR_TO_REFCOUNTED_KPTR = 2, KF_ARG_PTR_TO_DYNPTR = 3, KF_ARG_PTR_TO_ITER = 4, KF_ARG_PTR_TO_LIST_HEAD = 5, KF_ARG_PTR_TO_LIST_NODE = 6, KF_ARG_PTR_TO_BTF_ID = 7, KF_ARG_PTR_TO_MEM = 8, KF_ARG_PTR_TO_MEM_SIZE = 9, KF_ARG_PTR_TO_CALLBACK = 10, KF_ARG_PTR_TO_RB_ROOT = 11, KF_ARG_PTR_TO_RB_NODE = 12, KF_ARG_PTR_TO_NULL = 13, KF_ARG_PTR_TO_CONST_STR = 14, KF_ARG_PTR_TO_MAP = 15, KF_ARG_PTR_TO_WORKQUEUE = 16, }; enum kmalloc_cache_type { KMALLOC_NORMAL = 0, KMALLOC_RANDOM_START = 0, KMALLOC_RANDOM_END = 0, KMALLOC_RECLAIM = 1, KMALLOC_DMA = 2, KMALLOC_CGROUP = 3, NR_KMALLOC_TYPES = 4, }; enum kmsg_dump_reason { KMSG_DUMP_UNDEF = 0, KMSG_DUMP_PANIC = 1, KMSG_DUMP_OOPS = 2, KMSG_DUMP_EMERG = 3, KMSG_DUMP_SHUTDOWN = 4, KMSG_DUMP_MAX = 5, }; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2, }; enum kobject_action { KOBJ_ADD = 0, KOBJ_REMOVE = 1, KOBJ_CHANGE = 2, KOBJ_MOVE = 3, KOBJ_ONLINE = 4, KOBJ_OFFLINE = 5, KOBJ_BIND = 6, KOBJ_UNBIND = 7, }; enum kprobe_slot_state { SLOT_CLEAN = 0, SLOT_DIRTY = 1, SLOT_USED = 2, }; enum ksm_advisor_type { KSM_ADVISOR_NONE = 0, KSM_ADVISOR_SCAN_TIME = 1, }; enum ksm_get_folio_flags { KSM_GET_FOLIO_NOLOCK = 0, KSM_GET_FOLIO_LOCK = 1, KSM_GET_FOLIO_TRYLOCK = 2, }; enum kvm_apic_logical_mode { KVM_APIC_MODE_SW_DISABLED = 0, KVM_APIC_MODE_XAPIC_CLUSTER = 1, KVM_APIC_MODE_XAPIC_FLAT = 2, KVM_APIC_MODE_X2APIC = 3, KVM_APIC_MODE_MAP_DISABLED = 4, }; enum kvm_bus { KVM_MMIO_BUS = 0, KVM_PIO_BUS = 1, KVM_VIRTIO_CCW_NOTIFY_BUS = 2, KVM_FAST_MMIO_BUS = 3, KVM_NR_BUSES = 4, }; enum kvm_irqchip_mode { KVM_IRQCHIP_NONE = 0, KVM_IRQCHIP_KERNEL = 1, KVM_IRQCHIP_SPLIT = 2, }; enum kvm_reg { VCPU_REGS_RAX = 0, VCPU_REGS_RCX = 1, VCPU_REGS_RDX = 2, VCPU_REGS_RBX = 3, VCPU_REGS_RSP = 4, VCPU_REGS_RBP = 5, VCPU_REGS_RSI = 6, VCPU_REGS_RDI = 7, VCPU_REGS_R8 = 8, VCPU_REGS_R9 = 9, VCPU_REGS_R10 = 10, VCPU_REGS_R11 = 11, VCPU_REGS_R12 = 12, VCPU_REGS_R13 = 13, VCPU_REGS_R14 = 14, VCPU_REGS_R15 = 15, VCPU_REGS_RIP = 16, NR_VCPU_REGS = 17, VCPU_EXREG_PDPTR = 17, VCPU_EXREG_CR0 = 18, VCPU_EXREG_CR3 = 19, VCPU_EXREG_CR4 = 20, VCPU_EXREG_RFLAGS = 21, VCPU_EXREG_SEGMENTS = 22, VCPU_EXREG_EXIT_INFO_1 = 23, VCPU_EXREG_EXIT_INFO_2 = 24, }; enum kvm_stat_kind { KVM_STAT_VM = 0, KVM_STAT_VCPU = 1, }; enum l1d_flush_mitigations { L1D_FLUSH_OFF = 0, L1D_FLUSH_ON = 1, }; enum l1tf_mitigations { L1TF_MITIGATION_OFF = 0, L1TF_MITIGATION_FLUSH_NOWARN = 1, L1TF_MITIGATION_FLUSH = 2, L1TF_MITIGATION_FLUSH_NOSMT = 3, L1TF_MITIGATION_FULL = 4, L1TF_MITIGATION_FULL_FORCE = 5, }; enum l2tp_debug_flags { L2TP_MSG_DEBUG = 1, L2TP_MSG_CONTROL = 2, L2TP_MSG_SEQ = 4, L2TP_MSG_DATA = 8, }; enum l3mdev_type { L3MDEV_TYPE_UNSPEC = 0, L3MDEV_TYPE_VRF = 1, __L3MDEV_TYPE_MAX = 2, }; enum label_initialized { LABEL_INVALID = 0, LABEL_INITIALIZED = 1, LABEL_PENDING = 2, }; enum led_brightness { LED_OFF = 0, LED_ON = 1, LED_HALF = 127, LED_FULL = 255, }; enum legacy_fs_param { LEGACY_FS_UNSET_PARAMS = 0, LEGACY_FS_MONOLITHIC_PARAMS = 1, LEGACY_FS_INDIVIDUAL_PARAMS = 2, }; enum linux_mptcp_mib_field { MPTCP_MIB_NUM = 0, MPTCP_MIB_MPCAPABLEPASSIVE = 1, MPTCP_MIB_MPCAPABLEACTIVE = 2, MPTCP_MIB_MPCAPABLEACTIVEACK = 3, MPTCP_MIB_MPCAPABLEPASSIVEACK = 4, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK = 5, MPTCP_MIB_MPCAPABLEACTIVEFALLBACK = 6, MPTCP_MIB_MPCAPABLEACTIVEDROP = 7, MPTCP_MIB_MPCAPABLEACTIVEDISABLED = 8, MPTCP_MIB_TOKENFALLBACKINIT = 9, MPTCP_MIB_RETRANSSEGS = 10, MPTCP_MIB_JOINNOTOKEN = 11, MPTCP_MIB_JOINSYNRX = 12, MPTCP_MIB_JOINSYNBACKUPRX = 13, MPTCP_MIB_JOINSYNACKRX = 14, MPTCP_MIB_JOINSYNACKBACKUPRX = 15, MPTCP_MIB_JOINSYNACKMAC = 16, MPTCP_MIB_JOINACKRX = 17, MPTCP_MIB_JOINACKMAC = 18, MPTCP_MIB_JOINSYNTX = 19, MPTCP_MIB_JOINSYNTXCREATSKERR = 20, MPTCP_MIB_JOINSYNTXBINDERR = 21, MPTCP_MIB_JOINSYNTXCONNECTERR = 22, MPTCP_MIB_DSSNOMATCH = 23, MPTCP_MIB_INFINITEMAPTX = 24, MPTCP_MIB_INFINITEMAPRX = 25, MPTCP_MIB_DSSTCPMISMATCH = 26, MPTCP_MIB_DATACSUMERR = 27, MPTCP_MIB_OFOQUEUETAIL = 28, MPTCP_MIB_OFOQUEUE = 29, MPTCP_MIB_OFOMERGE = 30, MPTCP_MIB_NODSSWINDOW = 31, MPTCP_MIB_DUPDATA = 32, MPTCP_MIB_ADDADDR = 33, MPTCP_MIB_ADDADDRTX = 34, MPTCP_MIB_ADDADDRTXDROP = 35, MPTCP_MIB_ECHOADD = 36, MPTCP_MIB_ECHOADDTX = 37, MPTCP_MIB_ECHOADDTXDROP = 38, MPTCP_MIB_PORTADD = 39, MPTCP_MIB_ADDADDRDROP = 40, MPTCP_MIB_JOINPORTSYNRX = 41, MPTCP_MIB_JOINPORTSYNACKRX = 42, MPTCP_MIB_JOINPORTACKRX = 43, MPTCP_MIB_MISMATCHPORTSYNRX = 44, MPTCP_MIB_MISMATCHPORTACKRX = 45, MPTCP_MIB_RMADDR = 46, MPTCP_MIB_RMADDRDROP = 47, MPTCP_MIB_RMADDRTX = 48, MPTCP_MIB_RMADDRTXDROP = 49, MPTCP_MIB_RMSUBFLOW = 50, MPTCP_MIB_MPPRIOTX = 51, MPTCP_MIB_MPPRIORX = 52, MPTCP_MIB_MPFAILTX = 53, MPTCP_MIB_MPFAILRX = 54, MPTCP_MIB_MPFASTCLOSETX = 55, MPTCP_MIB_MPFASTCLOSERX = 56, MPTCP_MIB_MPRSTTX = 57, MPTCP_MIB_MPRSTRX = 58, MPTCP_MIB_RCVPRUNED = 59, MPTCP_MIB_SUBFLOWSTALE = 60, MPTCP_MIB_SUBFLOWRECOVER = 61, MPTCP_MIB_SNDWNDSHARED = 62, MPTCP_MIB_RCVWNDSHARED = 63, MPTCP_MIB_RCVWNDCONFLICTUPDATE = 64, MPTCP_MIB_RCVWNDCONFLICT = 65, MPTCP_MIB_CURRESTAB = 66, MPTCP_MIB_BLACKHOLE = 67, __MPTCP_MIB_MAX = 68, }; enum lock_usage_bit { LOCK_USED_IN_HARDIRQ = 0, LOCK_USED_IN_HARDIRQ_READ = 1, LOCK_ENABLED_HARDIRQ = 2, LOCK_ENABLED_HARDIRQ_READ = 3, LOCK_USED_IN_SOFTIRQ = 4, LOCK_USED_IN_SOFTIRQ_READ = 5, LOCK_ENABLED_SOFTIRQ = 6, LOCK_ENABLED_SOFTIRQ_READ = 7, LOCK_USED = 8, LOCK_USED_READ = 9, LOCK_USAGE_STATES = 10, }; enum lockdep_lock_type { LD_LOCK_NORMAL = 0, LD_LOCK_PERCPU = 1, LD_LOCK_WAIT_OVERRIDE = 2, LD_LOCK_MAX = 3, }; enum lockdep_ok { LOCKDEP_STILL_OK = 0, LOCKDEP_NOW_UNRELIABLE = 1, }; enum lockdep_wait_type { LD_WAIT_INV = 0, LD_WAIT_FREE = 1, LD_WAIT_SPIN = 2, LD_WAIT_CONFIG = 2, LD_WAIT_SLEEP = 3, LD_WAIT_MAX = 4, }; enum lockdown_reason { LOCKDOWN_NONE = 0, LOCKDOWN_MODULE_SIGNATURE = 1, LOCKDOWN_DEV_MEM = 2, LOCKDOWN_EFI_TEST = 3, LOCKDOWN_KEXEC = 4, LOCKDOWN_HIBERNATION = 5, LOCKDOWN_PCI_ACCESS = 6, LOCKDOWN_IOPORT = 7, LOCKDOWN_MSR = 8, LOCKDOWN_ACPI_TABLES = 9, LOCKDOWN_DEVICE_TREE = 10, LOCKDOWN_PCMCIA_CIS = 11, LOCKDOWN_TIOCSSERIAL = 12, LOCKDOWN_MODULE_PARAMETERS = 13, LOCKDOWN_MMIOTRACE = 14, LOCKDOWN_DEBUGFS = 15, LOCKDOWN_XMON_WR = 16, LOCKDOWN_BPF_WRITE_USER = 17, LOCKDOWN_DBG_WRITE_KERNEL = 18, LOCKDOWN_RTAS_ERROR_INJECTION = 19, LOCKDOWN_INTEGRITY_MAX = 20, LOCKDOWN_KCORE = 21, LOCKDOWN_KPROBES = 22, LOCKDOWN_BPF_READ_KERNEL = 23, LOCKDOWN_DBG_READ_KERNEL = 24, LOCKDOWN_PERF = 25, LOCKDOWN_TRACEFS = 26, LOCKDOWN_XMON_RW = 27, LOCKDOWN_XFRM_SECRET = 28, LOCKDOWN_CONFIDENTIALITY_MAX = 29, }; enum lru_list { LRU_INACTIVE_ANON = 0, LRU_ACTIVE_ANON = 1, LRU_INACTIVE_FILE = 2, LRU_ACTIVE_FILE = 3, LRU_UNEVICTABLE = 4, NR_LRU_LISTS = 5, }; enum lru_status { LRU_REMOVED = 0, LRU_REMOVED_RETRY = 1, LRU_ROTATE = 2, LRU_SKIP = 3, LRU_RETRY = 4, LRU_STOP = 5, }; enum lruvec_flags { LRUVEC_CGROUP_CONGESTED = 0, LRUVEC_NODE_CONGESTED = 1, }; enum lsm_event { LSM_POLICY_CHANGE = 0, }; enum lsm_integrity_type { LSM_INT_DMVERITY_SIG_VALID = 0, LSM_INT_DMVERITY_ROOTHASH = 1, LSM_INT_FSVERITY_BUILTINSIG_VALID = 2, }; enum lsm_order { LSM_ORDER_FIRST = -1, LSM_ORDER_MUTABLE = 0, LSM_ORDER_LAST = 1, }; enum lsm_rule_types { LSM_OBJ_USER = 0, LSM_OBJ_ROLE = 1, LSM_OBJ_TYPE = 2, LSM_SUBJ_USER = 3, LSM_SUBJ_ROLE = 4, LSM_SUBJ_TYPE = 5, }; enum lw_bits { LW_URGENT = 0, }; enum lwtunnel_encap_types { LWTUNNEL_ENCAP_NONE = 0, LWTUNNEL_ENCAP_MPLS = 1, LWTUNNEL_ENCAP_IP = 2, LWTUNNEL_ENCAP_ILA = 3, LWTUNNEL_ENCAP_IP6 = 4, LWTUNNEL_ENCAP_SEG6 = 5, LWTUNNEL_ENCAP_BPF = 6, LWTUNNEL_ENCAP_SEG6_LOCAL = 7, LWTUNNEL_ENCAP_RPL = 8, LWTUNNEL_ENCAP_IOAM6 = 9, LWTUNNEL_ENCAP_XFRM = 10, __LWTUNNEL_ENCAP_MAX = 11, }; enum lwtunnel_ip6_t { LWTUNNEL_IP6_UNSPEC = 0, LWTUNNEL_IP6_ID = 1, LWTUNNEL_IP6_DST = 2, LWTUNNEL_IP6_SRC = 3, LWTUNNEL_IP6_HOPLIMIT = 4, LWTUNNEL_IP6_TC = 5, LWTUNNEL_IP6_FLAGS = 6, LWTUNNEL_IP6_PAD = 7, LWTUNNEL_IP6_OPTS = 8, __LWTUNNEL_IP6_MAX = 9, }; enum lwtunnel_ip_t { LWTUNNEL_IP_UNSPEC = 0, LWTUNNEL_IP_ID = 1, LWTUNNEL_IP_DST = 2, LWTUNNEL_IP_SRC = 3, LWTUNNEL_IP_TTL = 4, LWTUNNEL_IP_TOS = 5, LWTUNNEL_IP_FLAGS = 6, LWTUNNEL_IP_PAD = 7, LWTUNNEL_IP_OPTS = 8, __LWTUNNEL_IP_MAX = 9, }; enum lzma2_seq { SEQ_CONTROL = 0, SEQ_UNCOMPRESSED_1 = 1, SEQ_UNCOMPRESSED_2 = 2, SEQ_COMPRESSED_0 = 3, SEQ_COMPRESSED_1 = 4, SEQ_PROPERTIES = 5, SEQ_LZMA_PREPARE = 6, SEQ_LZMA_RUN = 7, SEQ_COPY = 8, }; enum lzma_state { STATE_LIT_LIT = 0, STATE_MATCH_LIT_LIT = 1, STATE_REP_LIT_LIT = 2, STATE_SHORTREP_LIT_LIT = 3, STATE_MATCH_LIT = 4, STATE_REP_LIT = 5, STATE_SHORTREP_LIT = 6, STATE_LIT_MATCH = 7, STATE_LIT_LONGREP = 8, STATE_LIT_SHORTREP = 9, STATE_NONLIT_MATCH = 10, STATE_NONLIT_REP = 11, }; enum maple_status { ma_active = 0, ma_start = 1, ma_root = 2, ma_none = 3, ma_pause = 4, ma_overflow = 5, ma_underflow = 6, ma_error = 7, }; enum maple_type { maple_dense = 0, maple_leaf_64 = 1, maple_range_64 = 2, maple_arange_64 = 3, }; enum mapping_flags { AS_EIO = 0, AS_ENOSPC = 1, AS_MM_ALL_LOCKS = 2, AS_UNEVICTABLE = 3, AS_EXITING = 4, AS_NO_WRITEBACK_TAGS = 5, AS_RELEASE_ALWAYS = 6, AS_STABLE_WRITES = 7, AS_INACCESSIBLE = 8, AS_FOLIO_ORDER_BITS = 5, AS_FOLIO_ORDER_MIN = 16, AS_FOLIO_ORDER_MAX = 21, }; enum mapping_status { MAPPING_OK = 0, MAPPING_INVALID = 1, MAPPING_EMPTY = 2, MAPPING_DATA_FIN = 3, MAPPING_DUMMY = 4, MAPPING_BAD_CSUM = 5, }; enum mca_msr { MCA_CTL = 0, MCA_STATUS = 1, MCA_ADDR = 2, MCA_MISC = 3, }; enum mce_notifier_prios { MCE_PRIO_LOWEST = 0, MCE_PRIO_MCELOG = 1, MCE_PRIO_EDAC = 2, MCE_PRIO_NFIT = 3, MCE_PRIO_EXTLOG = 4, MCE_PRIO_UC = 5, MCE_PRIO_EARLY = 6, MCE_PRIO_CEC = 7, MCE_PRIO_HIGHEST = 7, }; enum mcp_flags { MCP_TIMESTAMP = 1, MCP_UC = 2, MCP_DONTLOG = 4, MCP_QUEUE_LOG = 8, }; enum mds_mitigations { MDS_MITIGATION_OFF = 0, MDS_MITIGATION_FULL = 1, MDS_MITIGATION_VMWERV = 2, }; enum mei_cb_file_ops { MEI_FOP_READ = 0, MEI_FOP_WRITE = 1, MEI_FOP_CONNECT = 2, MEI_FOP_DISCONNECT = 3, MEI_FOP_DISCONNECT_RSP = 4, MEI_FOP_NOTIFY_START = 5, MEI_FOP_NOTIFY_STOP = 6, MEI_FOP_DMA_MAP = 7, MEI_FOP_DMA_UNMAP = 8, }; enum mei_cfg_idx { MEI_ME_UNDEF_CFG = 0, MEI_ME_ICH_CFG = 1, MEI_ME_ICH10_CFG = 2, MEI_ME_PCH6_CFG = 3, MEI_ME_PCH7_CFG = 4, MEI_ME_PCH_CPT_PBG_CFG = 5, MEI_ME_PCH8_CFG = 6, MEI_ME_PCH8_ITOUCH_CFG = 7, MEI_ME_PCH8_SPS_4_CFG = 8, MEI_ME_PCH12_CFG = 9, MEI_ME_PCH12_SPS_4_CFG = 10, MEI_ME_PCH12_SPS_CFG = 11, MEI_ME_PCH12_SPS_ITOUCH_CFG = 12, MEI_ME_PCH15_CFG = 13, MEI_ME_PCH15_SPS_CFG = 14, MEI_ME_GSC_CFG = 15, MEI_ME_GSCFI_CFG = 16, MEI_ME_NUM_CFG = 17, }; enum mei_cl_connect_status { MEI_CL_CONN_SUCCESS = 0, MEI_CL_CONN_NOT_FOUND = 1, MEI_CL_CONN_ALREADY_STARTED = 2, MEI_CL_CONN_OUT_OF_RESOURCES = 3, MEI_CL_CONN_MESSAGE_SMALL = 4, MEI_CL_CONN_NOT_ALLOWED = 5, }; enum mei_cl_disconnect_status { MEI_CL_DISCONN_SUCCESS = 0, }; enum mei_cl_io_mode { MEI_CL_IO_TX_BLOCKING = 1, MEI_CL_IO_TX_INTERNAL = 2, MEI_CL_IO_RX_NONBLOCK = 4, MEI_CL_IO_SGL = 8, }; enum mei_dev_pxp_mode { MEI_DEV_PXP_DEFAULT = 0, MEI_DEV_PXP_INIT = 1, MEI_DEV_PXP_SETUP = 2, MEI_DEV_PXP_READY = 3, }; enum mei_dev_reset_to_pxp { MEI_DEV_RESET_TO_PXP_DEFAULT = 0, MEI_DEV_RESET_TO_PXP_PERFORMED = 1, MEI_DEV_RESET_TO_PXP_DONE = 2, }; enum mei_dev_state { MEI_DEV_INITIALIZING = 0, MEI_DEV_INIT_CLIENTS = 1, MEI_DEV_ENABLED = 2, MEI_DEV_RESETTING = 3, MEI_DEV_DISABLED = 4, MEI_DEV_POWERING_DOWN = 5, MEI_DEV_POWER_DOWN = 6, MEI_DEV_POWER_UP = 7, }; enum mei_ext_hdr_type { MEI_EXT_HDR_NONE = 0, MEI_EXT_HDR_VTAG = 1, MEI_EXT_HDR_GSC = 2, }; enum mei_file_transaction_states { MEI_IDLE = 0, MEI_WRITING = 1, MEI_WRITE_COMPLETE = 2, }; enum mei_hbm_state { MEI_HBM_IDLE = 0, MEI_HBM_STARTING = 1, MEI_HBM_CAP_SETUP = 2, MEI_HBM_DR_SETUP = 3, MEI_HBM_ENUM_CLIENTS = 4, MEI_HBM_CLIENT_PROPERTIES = 5, MEI_HBM_STARTED = 6, MEI_HBM_STOPPED = 7, }; enum mei_hbm_status { MEI_HBMS_SUCCESS = 0, MEI_HBMS_CLIENT_NOT_FOUND = 1, MEI_HBMS_ALREADY_EXISTS = 2, MEI_HBMS_REJECTED = 3, MEI_HBMS_INVALID_PARAMETER = 4, MEI_HBMS_NOT_ALLOWED = 5, MEI_HBMS_ALREADY_STARTED = 6, MEI_HBMS_NOT_STARTED = 7, MEI_HBMS_MAX = 8, }; enum mei_pg_event { MEI_PG_EVENT_IDLE = 0, MEI_PG_EVENT_WAIT = 1, MEI_PG_EVENT_RECEIVED = 2, MEI_PG_EVENT_INTR_WAIT = 3, MEI_PG_EVENT_INTR_RECEIVED = 4, }; enum mei_pg_state { MEI_PG_OFF = 0, MEI_PG_ON = 1, }; enum mei_stop_reason_types { DRIVER_STOP_REQUEST = 0, DEVICE_D1_ENTRY = 1, DEVICE_D2_ENTRY = 2, DEVICE_D3_ENTRY = 3, SYSTEM_S1_ENTRY = 4, SYSTEM_S2_ENTRY = 5, SYSTEM_S3_ENTRY = 6, SYSTEM_S4_ENTRY = 7, SYSTEM_S5_ENTRY = 8, }; enum membarrier_cmd { MEMBARRIER_CMD_QUERY = 0, MEMBARRIER_CMD_GLOBAL = 1, MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = 128, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = 256, MEMBARRIER_CMD_GET_REGISTRATIONS = 512, MEMBARRIER_CMD_SHARED = 1, }; enum membarrier_cmd_flag { MEMBARRIER_CMD_FLAG_CPU = 1, }; enum memblock_flags { MEMBLOCK_NONE = 0, MEMBLOCK_HOTPLUG = 1, MEMBLOCK_MIRROR = 2, MEMBLOCK_NOMAP = 4, MEMBLOCK_DRIVER_MANAGED = 8, MEMBLOCK_RSRV_NOINIT = 16, }; enum memcg_memory_event { MEMCG_LOW = 0, MEMCG_HIGH = 1, MEMCG_MAX = 2, MEMCG_OOM = 3, MEMCG_OOM_KILL = 4, MEMCG_OOM_GROUP_KILL = 5, MEMCG_SWAP_HIGH = 6, MEMCG_SWAP_MAX = 7, MEMCG_SWAP_FAIL = 8, MEMCG_NR_MEMORY_EVENTS = 9, }; enum memcg_stat_item { MEMCG_SWAP = 47, MEMCG_SOCK = 48, MEMCG_PERCPU_B = 49, MEMCG_VMALLOC = 50, MEMCG_KMEM = 51, MEMCG_ZSWAP_B = 52, MEMCG_ZSWAPPED = 53, MEMCG_NR_STAT = 54, }; enum meminit_context { MEMINIT_EARLY = 0, MEMINIT_HOTPLUG = 1, }; enum memory_type { MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_COHERENT = 2, MEMORY_DEVICE_FS_DAX = 3, MEMORY_DEVICE_GENERIC = 4, MEMORY_DEVICE_PCI_P2PDMA = 5, }; enum metadata_type { METADATA_IP_TUNNEL = 0, METADATA_HW_PORT_MUX = 1, METADATA_MACSEC = 2, METADATA_XFRM = 3, }; enum mf_action_page_type { MF_MSG_KERNEL = 0, MF_MSG_KERNEL_HIGH_ORDER = 1, MF_MSG_DIFFERENT_COMPOUND = 2, MF_MSG_HUGE = 3, MF_MSG_FREE_HUGE = 4, MF_MSG_GET_HWPOISON = 5, MF_MSG_UNMAP_FAILED = 6, MF_MSG_DIRTY_SWAPCACHE = 7, MF_MSG_CLEAN_SWAPCACHE = 8, MF_MSG_DIRTY_MLOCKED_LRU = 9, MF_MSG_CLEAN_MLOCKED_LRU = 10, MF_MSG_DIRTY_UNEVICTABLE_LRU = 11, MF_MSG_CLEAN_UNEVICTABLE_LRU = 12, MF_MSG_DIRTY_LRU = 13, MF_MSG_CLEAN_LRU = 14, MF_MSG_TRUNCATED_LRU = 15, MF_MSG_BUDDY = 16, MF_MSG_DAX = 17, MF_MSG_UNSPLIT_THP = 18, MF_MSG_ALREADY_POISONED = 19, MF_MSG_UNKNOWN = 20, }; enum mf_flags { MF_COUNT_INCREASED = 1, MF_ACTION_REQUIRED = 2, MF_MUST_KILL = 4, MF_SOFT_OFFLINE = 8, MF_UNPOISON = 16, MF_SW_SIMULATED = 32, MF_NO_RETRY = 64, MF_MEM_PRE_REMOVE = 128, }; enum mf_result { MF_IGNORED = 0, MF_FAILED = 1, MF_DELAYED = 2, MF_RECOVERED = 3, }; enum mfill_atomic_mode { MFILL_ATOMIC_COPY = 0, MFILL_ATOMIC_ZEROPAGE = 1, MFILL_ATOMIC_CONTINUE = 2, MFILL_ATOMIC_POISON = 3, NR_MFILL_ATOMIC_MODES = 4, }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2, }; enum migrate_reason { MR_COMPACTION = 0, MR_MEMORY_FAILURE = 1, MR_MEMORY_HOTPLUG = 2, MR_SYSCALL = 3, MR_MEMPOLICY_MBIND = 4, MR_NUMA_MISPLACED = 5, MR_CONTIG_RANGE = 6, MR_LONGTERM_PIN = 7, MR_DEMOTION = 8, MR_DAMON = 9, MR_TYPES = 10, }; enum migratetype { MIGRATE_UNMOVABLE = 0, MIGRATE_MOVABLE = 1, MIGRATE_RECLAIMABLE = 2, MIGRATE_PCPTYPES = 3, MIGRATE_HIGHATOMIC = 3, MIGRATE_CMA = 4, MIGRATE_ISOLATE = 5, MIGRATE_TYPES = 6, }; enum migration_type { migrate_load = 0, migrate_util = 1, migrate_task = 2, migrate_misfit = 3, }; enum mm_cid_state { MM_CID_UNSET = 4294967295, MM_CID_LAZY_PUT = 2147483648, }; enum mminit_level { MMINIT_WARNING = 0, MMINIT_VERIFY = 1, MMINIT_TRACE = 2, }; enum mmio_mitigations { MMIO_MITIGATION_OFF = 0, MMIO_MITIGATION_UCODE_NEEDED = 1, MMIO_MITIGATION_VERW = 2, }; enum mnt_tree_flags_t { MNT_TREE_MOVE = 1, MNT_TREE_BENEATH = 2, }; enum mod_license { NOT_GPL_ONLY = 0, GPL_ONLY = 1, }; enum mod_mem_type { MOD_TEXT = 0, MOD_DATA = 1, MOD_RODATA = 2, MOD_RO_AFTER_INIT = 3, MOD_INIT_TEXT = 4, MOD_INIT_DATA = 5, MOD_INIT_RODATA = 6, MOD_MEM_NUM_TYPES = 7, MOD_INVALID = -1, }; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3, }; enum mp_irq_source_types { mp_INT = 0, mp_NMI = 1, mp_SMI = 2, mp_ExtINT = 3, }; enum mpls_payload_type { MPT_UNSPEC = 0, MPT_IPV4 = 4, MPT_IPV6 = 6, }; enum mpls_ttl_propagation { MPLS_TTL_PROP_DEFAULT = 0, MPLS_TTL_PROP_ENABLED = 1, MPLS_TTL_PROP_DISABLED = 2, }; enum mptcp_addr_signal_status { MPTCP_ADD_ADDR_SIGNAL = 0, MPTCP_ADD_ADDR_ECHO = 1, MPTCP_RM_ADDR_SIGNAL = 2, }; enum mptcp_event_attr { MPTCP_ATTR_UNSPEC = 0, MPTCP_ATTR_TOKEN = 1, MPTCP_ATTR_FAMILY = 2, MPTCP_ATTR_LOC_ID = 3, MPTCP_ATTR_REM_ID = 4, MPTCP_ATTR_SADDR4 = 5, MPTCP_ATTR_SADDR6 = 6, MPTCP_ATTR_DADDR4 = 7, MPTCP_ATTR_DADDR6 = 8, MPTCP_ATTR_SPORT = 9, MPTCP_ATTR_DPORT = 10, MPTCP_ATTR_BACKUP = 11, MPTCP_ATTR_ERROR = 12, MPTCP_ATTR_FLAGS = 13, MPTCP_ATTR_TIMEOUT = 14, MPTCP_ATTR_IF_IDX = 15, MPTCP_ATTR_RESET_REASON = 16, MPTCP_ATTR_RESET_FLAGS = 17, MPTCP_ATTR_SERVER_SIDE = 18, __MPTCP_ATTR_MAX = 19, }; enum mptcp_event_type { MPTCP_EVENT_UNSPEC = 0, MPTCP_EVENT_CREATED = 1, MPTCP_EVENT_ESTABLISHED = 2, MPTCP_EVENT_CLOSED = 3, MPTCP_EVENT_ANNOUNCED = 6, MPTCP_EVENT_REMOVED = 7, MPTCP_EVENT_SUB_ESTABLISHED = 10, MPTCP_EVENT_SUB_CLOSED = 11, MPTCP_EVENT_SUB_PRIORITY = 13, MPTCP_EVENT_LISTENER_CREATED = 15, MPTCP_EVENT_LISTENER_CLOSED = 16, }; enum mptcp_pm_status { MPTCP_PM_ADD_ADDR_RECEIVED = 0, MPTCP_PM_ADD_ADDR_SEND_ACK = 1, MPTCP_PM_RM_ADDR_RECEIVED = 2, MPTCP_PM_ESTABLISHED = 3, MPTCP_PM_SUBFLOW_ESTABLISHED = 4, MPTCP_PM_ALREADY_ESTABLISHED = 5, MPTCP_PM_MPC_ENDPOINT_ACCOUNTED = 6, }; enum mptcp_pm_type { MPTCP_PM_TYPE_KERNEL = 0, MPTCP_PM_TYPE_USERSPACE = 1, __MPTCP_PM_TYPE_NR = 2, __MPTCP_PM_TYPE_MAX = 1, }; enum mq_rq_state { MQ_RQ_IDLE = 0, MQ_RQ_IN_FLIGHT = 1, MQ_RQ_COMPLETE = 2, }; enum msdos_sys_ind { DOS_EXTENDED_PARTITION = 5, LINUX_EXTENDED_PARTITION = 133, WIN98_EXTENDED_PARTITION = 15, LINUX_DATA_PARTITION = 131, LINUX_LVM_PARTITION = 142, LINUX_RAID_PARTITION___3 = 253, SOLARIS_X86_PARTITION = 130, NEW_SOLARIS_X86_PARTITION = 191, DM6_AUX1PARTITION = 81, DM6_AUX3PARTITION = 83, DM6_PARTITION = 84, EZD_PARTITION = 85, FREEBSD_PARTITION = 165, OPENBSD_PARTITION = 166, NETBSD_PARTITION = 169, BSDI_PARTITION = 183, MINIX_PARTITION = 129, UNIXWARE_PARTITION = 99, }; enum msi_desc_filter { MSI_DESC_ALL = 0, MSI_DESC_NOTASSOCIATED = 1, MSI_DESC_ASSOCIATED = 2, }; enum msi_domain_ids { MSI_DEFAULT_DOMAIN = 0, MSI_MAX_DEVICE_IRQDOMAINS = 1, }; enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC = 0, MTHP_STAT_ANON_FAULT_FALLBACK = 1, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE = 2, MTHP_STAT_SWPOUT = 3, MTHP_STAT_SWPOUT_FALLBACK = 4, MTHP_STAT_SHMEM_ALLOC = 5, MTHP_STAT_SHMEM_FALLBACK = 6, MTHP_STAT_SHMEM_FALLBACK_CHARGE = 7, MTHP_STAT_SPLIT = 8, MTHP_STAT_SPLIT_FAILED = 9, MTHP_STAT_SPLIT_DEFERRED = 10, MTHP_STAT_NR_ANON = 11, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED = 12, __MTHP_STAT_COUNT = 13, }; enum multi_stop_state { MULTI_STOP_NONE = 0, MULTI_STOP_PREPARE = 1, MULTI_STOP_DISABLE_IRQ = 2, MULTI_STOP_RUN = 3, MULTI_STOP_EXIT = 4, }; enum nbcon_prio { NBCON_PRIO_NONE = 0, NBCON_PRIO_NORMAL = 1, NBCON_PRIO_EMERGENCY = 2, NBCON_PRIO_PANIC = 3, NBCON_PRIO_MAX = 4, }; enum net_device_flags { IFF_UP = 1, IFF_BROADCAST = 2, IFF_DEBUG = 4, IFF_LOOPBACK = 8, IFF_POINTOPOINT = 16, IFF_NOTRAILERS = 32, IFF_RUNNING = 64, IFF_NOARP = 128, IFF_PROMISC = 256, IFF_ALLMULTI = 512, IFF_MASTER = 1024, IFF_SLAVE = 2048, IFF_MULTICAST = 4096, IFF_PORTSEL = 8192, IFF_AUTOMEDIA = 16384, IFF_DYNAMIC = 32768, IFF_LOWER_UP = 65536, IFF_DORMANT = 131072, IFF_ECHO = 262144, }; enum net_device_path_type { DEV_PATH_ETHERNET = 0, DEV_PATH_VLAN = 1, DEV_PATH_BRIDGE = 2, DEV_PATH_PPPOE = 3, DEV_PATH_DSA = 4, DEV_PATH_MTK_WDMA = 5, }; enum net_xmit_qdisc_t { __NET_XMIT_STOLEN = 65536, __NET_XMIT_BYPASS = 131072, }; enum netdev_cmd { NETDEV_UP = 1, NETDEV_DOWN = 2, NETDEV_REBOOT = 3, NETDEV_CHANGE = 4, NETDEV_REGISTER = 5, NETDEV_UNREGISTER = 6, NETDEV_CHANGEMTU = 7, NETDEV_CHANGEADDR = 8, NETDEV_PRE_CHANGEADDR = 9, NETDEV_GOING_DOWN = 10, NETDEV_CHANGENAME = 11, NETDEV_FEAT_CHANGE = 12, NETDEV_BONDING_FAILOVER = 13, NETDEV_PRE_UP = 14, NETDEV_PRE_TYPE_CHANGE = 15, NETDEV_POST_TYPE_CHANGE = 16, NETDEV_POST_INIT = 17, NETDEV_PRE_UNINIT = 18, NETDEV_RELEASE = 19, NETDEV_NOTIFY_PEERS = 20, NETDEV_JOIN = 21, NETDEV_CHANGEUPPER = 22, NETDEV_RESEND_IGMP = 23, NETDEV_PRECHANGEMTU = 24, NETDEV_CHANGEINFODATA = 25, NETDEV_BONDING_INFO = 26, NETDEV_PRECHANGEUPPER = 27, NETDEV_CHANGELOWERSTATE = 28, NETDEV_UDP_TUNNEL_PUSH_INFO = 29, NETDEV_UDP_TUNNEL_DROP_INFO = 30, NETDEV_CHANGE_TX_QUEUE_LEN = 31, NETDEV_CVLAN_FILTER_PUSH_INFO = 32, NETDEV_CVLAN_FILTER_DROP_INFO = 33, NETDEV_SVLAN_FILTER_PUSH_INFO = 34, NETDEV_SVLAN_FILTER_DROP_INFO = 35, NETDEV_OFFLOAD_XSTATS_ENABLE = 36, NETDEV_OFFLOAD_XSTATS_DISABLE = 37, NETDEV_OFFLOAD_XSTATS_REPORT_USED = 38, NETDEV_OFFLOAD_XSTATS_REPORT_DELTA = 39, NETDEV_XDP_FEAT_CHANGE = 40, }; enum netdev_lag_hash { NETDEV_LAG_HASH_NONE = 0, NETDEV_LAG_HASH_L2 = 1, NETDEV_LAG_HASH_L34 = 2, NETDEV_LAG_HASH_L23 = 3, NETDEV_LAG_HASH_E23 = 4, NETDEV_LAG_HASH_E34 = 5, NETDEV_LAG_HASH_VLAN_SRCMAC = 6, NETDEV_LAG_HASH_UNKNOWN = 7, }; enum netdev_lag_tx_type { NETDEV_LAG_TX_TYPE_UNKNOWN = 0, NETDEV_LAG_TX_TYPE_RANDOM = 1, NETDEV_LAG_TX_TYPE_BROADCAST = 2, NETDEV_LAG_TX_TYPE_ROUNDROBIN = 3, NETDEV_LAG_TX_TYPE_ACTIVEBACKUP = 4, NETDEV_LAG_TX_TYPE_HASH = 5, }; enum netdev_ml_priv_type { ML_PRIV_NONE = 0, ML_PRIV_CAN = 1, }; enum netdev_offload_xstats_type { NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, }; enum netdev_priv_flags { IFF_802_1Q_VLAN = 1, IFF_EBRIDGE = 2, IFF_BONDING = 4, IFF_ISATAP = 8, IFF_WAN_HDLC = 16, IFF_XMIT_DST_RELEASE = 32, IFF_DONT_BRIDGE = 64, IFF_DISABLE_NETPOLL = 128, IFF_MACVLAN_PORT = 256, IFF_BRIDGE_PORT = 512, IFF_OVS_DATAPATH = 1024, IFF_TX_SKB_SHARING = 2048, IFF_UNICAST_FLT = 4096, IFF_TEAM_PORT = 8192, IFF_SUPP_NOFCS = 16384, IFF_LIVE_ADDR_CHANGE = 32768, IFF_MACVLAN = 65536, IFF_XMIT_DST_RELEASE_PERM = 131072, IFF_L3MDEV_MASTER = 262144, IFF_NO_QUEUE = 524288, IFF_OPENVSWITCH = 1048576, IFF_L3MDEV_SLAVE = 2097152, IFF_TEAM = 4194304, IFF_RXFH_CONFIGURED = 8388608, IFF_PHONY_HEADROOM = 16777216, IFF_MACSEC = 33554432, IFF_NO_RX_HANDLER = 67108864, IFF_FAILOVER = 134217728, IFF_FAILOVER_SLAVE = 268435456, IFF_L3MDEV_RX_HANDLER = 536870912, IFF_NO_ADDRCONF = 1073741824, IFF_TX_SKB_NO_LINEAR = 2147483648, }; enum netdev_qstats_scope { NETDEV_QSTATS_SCOPE_QUEUE = 1, }; enum netdev_queue_state_t { __QUEUE_STATE_DRV_XOFF = 0, __QUEUE_STATE_STACK_XOFF = 1, __QUEUE_STATE_FROZEN = 2, }; enum netdev_queue_type { NETDEV_QUEUE_TYPE_RX = 0, NETDEV_QUEUE_TYPE_TX = 1, }; enum netdev_reg_state { NETREG_UNINITIALIZED = 0, NETREG_REGISTERED = 1, NETREG_UNREGISTERING = 2, NETREG_UNREGISTERED = 3, NETREG_RELEASED = 4, NETREG_DUMMY = 5, }; enum netdev_stat_type { NETDEV_PCPU_STAT_NONE = 0, NETDEV_PCPU_STAT_LSTATS = 1, NETDEV_PCPU_STAT_TSTATS = 2, NETDEV_PCPU_STAT_DSTATS = 3, }; enum netdev_state_t { __LINK_STATE_START = 0, __LINK_STATE_PRESENT = 1, __LINK_STATE_NOCARRIER = 2, __LINK_STATE_LINKWATCH_PENDING = 3, __LINK_STATE_DORMANT = 4, __LINK_STATE_TESTING = 5, }; enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, }; typedef enum netdev_tx netdev_tx_t; enum netdev_xdp_act { NETDEV_XDP_ACT_BASIC = 1, NETDEV_XDP_ACT_REDIRECT = 2, NETDEV_XDP_ACT_NDO_XMIT = 4, NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, NETDEV_XDP_ACT_HW_OFFLOAD = 16, NETDEV_XDP_ACT_RX_SG = 32, NETDEV_XDP_ACT_NDO_XMIT_SG = 64, NETDEV_XDP_ACT_MASK = 127, }; enum netdev_xdp_rx_metadata { NETDEV_XDP_RX_METADATA_TIMESTAMP = 1, NETDEV_XDP_RX_METADATA_HASH = 2, NETDEV_XDP_RX_METADATA_VLAN_TAG = 4, }; enum netdev_xsk_flags { NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, }; enum netevent_notif_type { NETEVENT_NEIGH_UPDATE = 1, NETEVENT_REDIRECT = 2, NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, }; enum netkit_action { NETKIT_NEXT = -1, NETKIT_PASS = 0, NETKIT_DROP = 2, NETKIT_REDIRECT = 7, }; enum netkit_mode { NETKIT_L2 = 0, NETKIT_L3 = 1, }; enum netlink_attribute_type { NL_ATTR_TYPE_INVALID = 0, NL_ATTR_TYPE_FLAG = 1, NL_ATTR_TYPE_U8 = 2, NL_ATTR_TYPE_U16 = 3, NL_ATTR_TYPE_U32 = 4, NL_ATTR_TYPE_U64 = 5, NL_ATTR_TYPE_S8 = 6, NL_ATTR_TYPE_S16 = 7, NL_ATTR_TYPE_S32 = 8, NL_ATTR_TYPE_S64 = 9, NL_ATTR_TYPE_BINARY = 10, NL_ATTR_TYPE_STRING = 11, NL_ATTR_TYPE_NUL_STRING = 12, NL_ATTR_TYPE_NESTED = 13, NL_ATTR_TYPE_NESTED_ARRAY = 14, NL_ATTR_TYPE_BITFIELD32 = 15, NL_ATTR_TYPE_SINT = 16, NL_ATTR_TYPE_UINT = 17, }; enum netlink_policy_type_attr { NL_POLICY_TYPE_ATTR_UNSPEC = 0, NL_POLICY_TYPE_ATTR_TYPE = 1, NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2, NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3, NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4, NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5, NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6, NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7, NL_POLICY_TYPE_ATTR_POLICY_IDX = 8, NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9, NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10, NL_POLICY_TYPE_ATTR_PAD = 11, NL_POLICY_TYPE_ATTR_MASK = 12, __NL_POLICY_TYPE_ATTR_MAX = 13, NL_POLICY_TYPE_ATTR_MAX = 12, }; enum netlink_skb_flags { NETLINK_SKB_DST = 8, }; enum netlink_validation { NL_VALIDATE_LIBERAL = 0, NL_VALIDATE_TRAILING = 1, NL_VALIDATE_MAXTYPE = 2, NL_VALIDATE_UNSPEC = 4, NL_VALIDATE_STRICT_ATTRS = 8, NL_VALIDATE_NESTED = 16, }; enum netns_bpf_attach_type { NETNS_BPF_INVALID = -1, NETNS_BPF_FLOW_DISSECTOR = 0, NETNS_BPF_SK_LOOKUP = 1, MAX_NETNS_BPF_ATTACH_TYPE = 2, }; enum new_flow { NEW_FLOW = 0, OLD_FLOW = 1, }; enum nexthop_event_type { NEXTHOP_EVENT_DEL = 0, NEXTHOP_EVENT_REPLACE = 1, NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE = 2, NEXTHOP_EVENT_BUCKET_REPLACE = 3, NEXTHOP_EVENT_HW_STATS_REPORT_DELTA = 4, }; enum nf_ct_ecache_state { NFCT_ECACHE_DESTROY_FAIL = 0, NFCT_ECACHE_DESTROY_SENT = 1, }; enum nf_ct_ext_id { NF_CT_EXT_HELPER = 0, NF_CT_EXT_NAT = 1, NF_CT_EXT_SEQADJ = 2, NF_CT_EXT_ACCT = 3, NF_CT_EXT_SYNPROXY = 4, NF_CT_EXT_NUM = 5, }; enum nf_ct_helper_flags { NF_CT_HELPER_F_USERSPACE = 1, NF_CT_HELPER_F_CONFIGURED = 2, }; enum nf_ct_sysctl_index { NF_SYSCTL_CT_MAX = 0, NF_SYSCTL_CT_COUNT = 1, NF_SYSCTL_CT_BUCKETS = 2, NF_SYSCTL_CT_CHECKSUM = 3, NF_SYSCTL_CT_LOG_INVALID = 4, NF_SYSCTL_CT_EXPECT_MAX = 5, NF_SYSCTL_CT_ACCT = 6, NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC = 7, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT = 8, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV = 9, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED = 10, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT = 11, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT = 12, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK = 13, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT = 14, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE = 15, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS = 16, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK = 17, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD = 18, NF_SYSCTL_CT_PROTO_TCP_LOOSE = 19, NF_SYSCTL_CT_PROTO_TCP_LIBERAL = 20, NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST = 21, NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS = 22, NF_SYSCTL_CT_PROTO_TIMEOUT_UDP = 23, NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM = 24, NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD = 25, NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP = 26, NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6 = 27, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED = 28, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT = 29, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED = 30, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED = 31, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT = 32, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD = 33, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT = 34, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT = 35, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST = 36, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND = 37, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN = 38, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN = 39, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ = 40, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING = 41, NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT = 42, NF_SYSCTL_CT_PROTO_DCCP_LOOSE = 43, NF_SYSCTL_CT_LAST_SYSCTL = 44, }; enum nf_ct_tcp_action { NFCT_TCP_IGNORE = 0, NFCT_TCP_INVALID = 1, NFCT_TCP_ACCEPT = 2, }; enum nf_dev_hooks { NF_NETDEV_INGRESS = 0, NF_NETDEV_EGRESS = 1, NF_NETDEV_NUMHOOKS = 2, }; enum nf_flow_flags { NF_FLOW_SNAT = 0, NF_FLOW_DNAT = 1, NF_FLOW_TEARDOWN = 2, NF_FLOW_HW = 3, NF_FLOW_HW_DYING = 4, NF_FLOW_HW_DEAD = 5, NF_FLOW_HW_PENDING = 6, NF_FLOW_HW_BIDIRECTIONAL = 7, NF_FLOW_HW_ESTABLISHED = 8, }; enum nf_flowtable_flags { NF_FLOWTABLE_HW_OFFLOAD = 1, NF_FLOWTABLE_COUNTER = 2, }; enum nf_hook_ops_type { NF_HOOK_OP_UNDEFINED = 0, NF_HOOK_OP_NF_TABLES = 1, NF_HOOK_OP_BPF = 2, }; enum nf_inet_hooks { NF_INET_PRE_ROUTING = 0, NF_INET_LOCAL_IN = 1, NF_INET_FORWARD = 2, NF_INET_LOCAL_OUT = 3, NF_INET_POST_ROUTING = 4, NF_INET_NUMHOOKS = 5, NF_INET_INGRESS = 5, }; enum nf_ip6_hook_priorities { NF_IP6_PRI_FIRST = -2147483648, NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450, NF_IP6_PRI_CONNTRACK_DEFRAG = -400, NF_IP6_PRI_RAW = -300, NF_IP6_PRI_SELINUX_FIRST = -225, NF_IP6_PRI_CONNTRACK = -200, NF_IP6_PRI_MANGLE = -150, NF_IP6_PRI_NAT_DST = -100, NF_IP6_PRI_FILTER = 0, NF_IP6_PRI_SECURITY = 50, NF_IP6_PRI_NAT_SRC = 100, NF_IP6_PRI_SELINUX_LAST = 225, NF_IP6_PRI_CONNTRACK_HELPER = 300, NF_IP6_PRI_LAST = 2147483647, }; enum nf_ip_hook_priorities { NF_IP_PRI_FIRST = -2147483648, NF_IP_PRI_RAW_BEFORE_DEFRAG = -450, NF_IP_PRI_CONNTRACK_DEFRAG = -400, NF_IP_PRI_RAW = -300, NF_IP_PRI_SELINUX_FIRST = -225, NF_IP_PRI_CONNTRACK = -200, NF_IP_PRI_MANGLE = -150, NF_IP_PRI_NAT_DST = -100, NF_IP_PRI_FILTER = 0, NF_IP_PRI_SECURITY = 50, NF_IP_PRI_NAT_SRC = 100, NF_IP_PRI_SELINUX_LAST = 225, NF_IP_PRI_CONNTRACK_HELPER = 300, NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, NF_IP_PRI_LAST = 2147483647, }; enum nf_log_type { NF_LOG_TYPE_LOG = 0, NF_LOG_TYPE_ULOG = 1, NF_LOG_TYPE_MAX = 2, }; enum nf_nat_manip_type { NF_NAT_MANIP_SRC = 0, NF_NAT_MANIP_DST = 1, }; enum nf_tables_msg_types { NFT_MSG_NEWTABLE = 0, NFT_MSG_GETTABLE = 1, NFT_MSG_DELTABLE = 2, NFT_MSG_NEWCHAIN = 3, NFT_MSG_GETCHAIN = 4, NFT_MSG_DELCHAIN = 5, NFT_MSG_NEWRULE = 6, NFT_MSG_GETRULE = 7, NFT_MSG_DELRULE = 8, NFT_MSG_NEWSET = 9, NFT_MSG_GETSET = 10, NFT_MSG_DELSET = 11, NFT_MSG_NEWSETELEM = 12, NFT_MSG_GETSETELEM = 13, NFT_MSG_DELSETELEM = 14, NFT_MSG_NEWGEN = 15, NFT_MSG_GETGEN = 16, NFT_MSG_TRACE = 17, NFT_MSG_NEWOBJ = 18, NFT_MSG_GETOBJ = 19, NFT_MSG_DELOBJ = 20, NFT_MSG_GETOBJ_RESET = 21, NFT_MSG_NEWFLOWTABLE = 22, NFT_MSG_GETFLOWTABLE = 23, NFT_MSG_DELFLOWTABLE = 24, NFT_MSG_GETRULE_RESET = 25, NFT_MSG_DESTROYTABLE = 26, NFT_MSG_DESTROYCHAIN = 27, NFT_MSG_DESTROYRULE = 28, NFT_MSG_DESTROYSET = 29, NFT_MSG_DESTROYSETELEM = 30, NFT_MSG_DESTROYOBJ = 31, NFT_MSG_DESTROYFLOWTABLE = 32, NFT_MSG_GETSETELEM_RESET = 33, NFT_MSG_MAX = 34, }; enum nfnetlink_groups { NFNLGRP_NONE = 0, NFNLGRP_CONNTRACK_NEW = 1, NFNLGRP_CONNTRACK_UPDATE = 2, NFNLGRP_CONNTRACK_DESTROY = 3, NFNLGRP_CONNTRACK_EXP_NEW = 4, NFNLGRP_CONNTRACK_EXP_UPDATE = 5, NFNLGRP_CONNTRACK_EXP_DESTROY = 6, NFNLGRP_NFTABLES = 7, NFNLGRP_ACCT_QUOTA = 8, NFNLGRP_NFTRACE = 9, __NFNLGRP_MAX = 10, }; enum nfnl_abort_action { NFNL_ABORT_NONE = 0, NFNL_ABORT_AUTOLOAD = 1, NFNL_ABORT_VALIDATE = 2, }; enum nfnl_batch_attributes { NFNL_BATCH_UNSPEC = 0, NFNL_BATCH_GENID = 1, __NFNL_BATCH_MAX = 2, }; enum nfnl_callback_type { NFNL_CB_UNSPEC = 0, NFNL_CB_MUTEX = 1, NFNL_CB_RCU = 2, NFNL_CB_BATCH = 3, }; enum nfqnl_attr_config { NFQA_CFG_UNSPEC = 0, NFQA_CFG_CMD = 1, NFQA_CFG_PARAMS = 2, NFQA_CFG_QUEUE_MAXLEN = 3, NFQA_CFG_MASK = 4, NFQA_CFG_FLAGS = 5, __NFQA_CFG_MAX = 6, }; enum nfqnl_attr_type { NFQA_UNSPEC = 0, NFQA_PACKET_HDR = 1, NFQA_VERDICT_HDR = 2, NFQA_MARK = 3, NFQA_TIMESTAMP = 4, NFQA_IFINDEX_INDEV = 5, NFQA_IFINDEX_OUTDEV = 6, NFQA_IFINDEX_PHYSINDEV = 7, NFQA_IFINDEX_PHYSOUTDEV = 8, NFQA_HWADDR = 9, NFQA_PAYLOAD = 10, NFQA_CT = 11, NFQA_CT_INFO = 12, NFQA_CAP_LEN = 13, NFQA_SKB_INFO = 14, NFQA_EXP = 15, NFQA_UID = 16, NFQA_GID = 17, NFQA_SECCTX = 18, NFQA_VLAN = 19, NFQA_L2HDR = 20, NFQA_PRIORITY = 21, NFQA_CGROUP_CLASSID = 22, __NFQA_MAX = 23, }; enum nfqnl_config_mode { NFQNL_COPY_NONE = 0, NFQNL_COPY_META = 1, NFQNL_COPY_PACKET = 2, }; enum nfqnl_msg_config_cmds { NFQNL_CFG_CMD_NONE = 0, NFQNL_CFG_CMD_BIND = 1, NFQNL_CFG_CMD_UNBIND = 2, NFQNL_CFG_CMD_PF_BIND = 3, NFQNL_CFG_CMD_PF_UNBIND = 4, }; enum nfqnl_msg_types { NFQNL_MSG_PACKET = 0, NFQNL_MSG_VERDICT = 1, NFQNL_MSG_CONFIG = 2, NFQNL_MSG_VERDICT_BATCH = 3, NFQNL_MSG_MAX = 4, }; enum nfqnl_vlan_attr { NFQA_VLAN_UNSPEC = 0, NFQA_VLAN_PROTO = 1, NFQA_VLAN_TCI = 2, __NFQA_VLAN_MAX = 3, }; enum nfs_opnum4 { OP_ACCESS = 3, OP_CLOSE = 4, OP_COMMIT = 5, OP_CREATE = 6, OP_DELEGPURGE = 7, OP_DELEGRETURN = 8, OP_GETATTR = 9, OP_GETFH = 10, OP_LINK = 11, OP_LOCK = 12, OP_LOCKT = 13, OP_LOCKU = 14, OP_LOOKUP = 15, OP_LOOKUPP = 16, OP_NVERIFY = 17, OP_OPEN = 18, OP_OPENATTR = 19, OP_OPEN_CONFIRM = 20, OP_OPEN_DOWNGRADE = 21, OP_PUTFH = 22, OP_PUTPUBFH = 23, OP_PUTROOTFH = 24, OP_READ = 25, OP_READDIR = 26, OP_READLINK = 27, OP_REMOVE = 28, OP_RENAME = 29, OP_RENEW = 30, OP_RESTOREFH = 31, OP_SAVEFH = 32, OP_SECINFO = 33, OP_SETATTR = 34, OP_SETCLIENTID = 35, OP_SETCLIENTID_CONFIRM = 36, OP_VERIFY = 37, OP_WRITE = 38, OP_RELEASE_LOCKOWNER = 39, OP_BACKCHANNEL_CTL = 40, OP_BIND_CONN_TO_SESSION = 41, OP_EXCHANGE_ID = 42, OP_CREATE_SESSION = 43, OP_DESTROY_SESSION = 44, OP_FREE_STATEID = 45, OP_GET_DIR_DELEGATION = 46, OP_GETDEVICEINFO = 47, OP_GETDEVICELIST = 48, OP_LAYOUTCOMMIT = 49, OP_LAYOUTGET = 50, OP_LAYOUTRETURN = 51, OP_SECINFO_NO_NAME = 52, OP_SEQUENCE = 53, OP_SET_SSV = 54, OP_TEST_STATEID = 55, OP_WANT_DELEGATION = 56, OP_DESTROY_CLIENTID = 57, OP_RECLAIM_COMPLETE = 58, OP_ALLOCATE = 59, OP_COPY = 60, OP_COPY_NOTIFY = 61, OP_DEALLOCATE = 62, OP_IO_ADVISE = 63, OP_LAYOUTERROR = 64, OP_LAYOUTSTATS = 65, OP_OFFLOAD_CANCEL = 66, OP_OFFLOAD_STATUS = 67, OP_READ_PLUS = 68, OP_SEEK = 69, OP_WRITE_SAME = 70, OP_CLONE = 71, OP_GETXATTR = 72, OP_SETXATTR = 73, OP_LISTXATTRS = 74, OP_REMOVEXATTR = 75, OP_ILLEGAL = 10044, }; enum nft_bitwise_attributes { NFTA_BITWISE_UNSPEC = 0, NFTA_BITWISE_SREG = 1, NFTA_BITWISE_DREG = 2, NFTA_BITWISE_LEN = 3, NFTA_BITWISE_MASK = 4, NFTA_BITWISE_XOR = 5, NFTA_BITWISE_OP = 6, NFTA_BITWISE_DATA = 7, __NFTA_BITWISE_MAX = 8, }; enum nft_bitwise_ops { NFT_BITWISE_BOOL = 0, NFT_BITWISE_LSHIFT = 1, NFT_BITWISE_RSHIFT = 2, }; enum nft_byteorder_attributes { NFTA_BYTEORDER_UNSPEC = 0, NFTA_BYTEORDER_SREG = 1, NFTA_BYTEORDER_DREG = 2, NFTA_BYTEORDER_OP = 3, NFTA_BYTEORDER_LEN = 4, NFTA_BYTEORDER_SIZE = 5, __NFTA_BYTEORDER_MAX = 6, }; enum nft_byteorder_ops { NFT_BYTEORDER_NTOH = 0, NFT_BYTEORDER_HTON = 1, }; enum nft_chain_attributes { NFTA_CHAIN_UNSPEC = 0, NFTA_CHAIN_TABLE = 1, NFTA_CHAIN_HANDLE = 2, NFTA_CHAIN_NAME = 3, NFTA_CHAIN_HOOK = 4, NFTA_CHAIN_POLICY = 5, NFTA_CHAIN_USE = 6, NFTA_CHAIN_TYPE = 7, NFTA_CHAIN_COUNTERS = 8, NFTA_CHAIN_PAD = 9, NFTA_CHAIN_FLAGS = 10, NFTA_CHAIN_ID = 11, NFTA_CHAIN_USERDATA = 12, __NFTA_CHAIN_MAX = 13, }; enum nft_chain_flags { NFT_CHAIN_BASE = 1, NFT_CHAIN_HW_OFFLOAD = 2, NFT_CHAIN_BINDING = 4, }; enum nft_chain_types { NFT_CHAIN_T_DEFAULT = 0, NFT_CHAIN_T_ROUTE = 1, NFT_CHAIN_T_NAT = 2, NFT_CHAIN_T_MAX = 3, }; enum nft_cmp_attributes { NFTA_CMP_UNSPEC = 0, NFTA_CMP_SREG = 1, NFTA_CMP_OP = 2, NFTA_CMP_DATA = 3, __NFTA_CMP_MAX = 4, }; enum nft_cmp_ops { NFT_CMP_EQ = 0, NFT_CMP_NEQ = 1, NFT_CMP_LT = 2, NFT_CMP_LTE = 3, NFT_CMP_GT = 4, NFT_CMP_GTE = 5, }; enum nft_counter_attributes { NFTA_COUNTER_UNSPEC = 0, NFTA_COUNTER_BYTES = 1, NFTA_COUNTER_PACKETS = 2, NFTA_COUNTER_PAD = 3, __NFTA_COUNTER_MAX = 4, }; enum nft_data_attributes { NFTA_DATA_UNSPEC = 0, NFTA_DATA_VALUE = 1, NFTA_DATA_VERDICT = 2, __NFTA_DATA_MAX = 3, }; enum nft_data_desc_flags { NFT_DATA_DESC_SETELEM = 1, }; enum nft_data_types { NFT_DATA_VALUE = 0, NFT_DATA_VERDICT = 4294967040, }; enum nft_devices_attributes { NFTA_DEVICE_UNSPEC = 0, NFTA_DEVICE_NAME = 1, __NFTA_DEVICE_MAX = 2, }; enum nft_dynset_attributes { NFTA_DYNSET_UNSPEC = 0, NFTA_DYNSET_SET_NAME = 1, NFTA_DYNSET_SET_ID = 2, NFTA_DYNSET_OP = 3, NFTA_DYNSET_SREG_KEY = 4, NFTA_DYNSET_SREG_DATA = 5, NFTA_DYNSET_TIMEOUT = 6, NFTA_DYNSET_EXPR = 7, NFTA_DYNSET_PAD = 8, NFTA_DYNSET_FLAGS = 9, NFTA_DYNSET_EXPRESSIONS = 10, __NFTA_DYNSET_MAX = 11, }; enum nft_dynset_flags { NFT_DYNSET_F_INV = 1, NFT_DYNSET_F_EXPR = 2, }; enum nft_dynset_ops { NFT_DYNSET_OP_ADD = 0, NFT_DYNSET_OP_UPDATE = 1, NFT_DYNSET_OP_DELETE = 2, }; enum nft_expr_attributes { NFTA_EXPR_UNSPEC = 0, NFTA_EXPR_NAME = 1, NFTA_EXPR_DATA = 2, __NFTA_EXPR_MAX = 3, }; enum nft_exthdr_attributes { NFTA_EXTHDR_UNSPEC = 0, NFTA_EXTHDR_DREG = 1, NFTA_EXTHDR_TYPE = 2, NFTA_EXTHDR_OFFSET = 3, NFTA_EXTHDR_LEN = 4, NFTA_EXTHDR_FLAGS = 5, NFTA_EXTHDR_OP = 6, NFTA_EXTHDR_SREG = 7, __NFTA_EXTHDR_MAX = 8, }; enum nft_exthdr_flags { NFT_EXTHDR_F_PRESENT = 1, }; enum nft_exthdr_op { NFT_EXTHDR_OP_IPV6 = 0, NFT_EXTHDR_OP_TCPOPT = 1, NFT_EXTHDR_OP_IPV4 = 2, NFT_EXTHDR_OP_SCTP = 3, NFT_EXTHDR_OP_DCCP = 4, __NFT_EXTHDR_OP_MAX = 5, }; enum nft_flowtable_attributes { NFTA_FLOWTABLE_UNSPEC = 0, NFTA_FLOWTABLE_TABLE = 1, NFTA_FLOWTABLE_NAME = 2, NFTA_FLOWTABLE_HOOK = 3, NFTA_FLOWTABLE_USE = 4, NFTA_FLOWTABLE_HANDLE = 5, NFTA_FLOWTABLE_PAD = 6, NFTA_FLOWTABLE_FLAGS = 7, __NFTA_FLOWTABLE_MAX = 8, }; enum nft_flowtable_flags { NFT_FLOWTABLE_HW_OFFLOAD = 1, NFT_FLOWTABLE_COUNTER = 2, NFT_FLOWTABLE_MASK = 3, }; enum nft_flowtable_hook_attributes { NFTA_FLOWTABLE_HOOK_UNSPEC = 0, NFTA_FLOWTABLE_HOOK_NUM = 1, NFTA_FLOWTABLE_HOOK_PRIORITY = 2, NFTA_FLOWTABLE_HOOK_DEVS = 3, __NFTA_FLOWTABLE_HOOK_MAX = 4, }; enum nft_gen_attributes { NFTA_GEN_UNSPEC = 0, NFTA_GEN_ID = 1, NFTA_GEN_PROC_PID = 2, NFTA_GEN_PROC_NAME = 3, __NFTA_GEN_MAX = 4, }; enum nft_hook_attributes { NFTA_HOOK_UNSPEC = 0, NFTA_HOOK_HOOKNUM = 1, NFTA_HOOK_PRIORITY = 2, NFTA_HOOK_DEV = 3, NFTA_HOOK_DEVS = 4, __NFTA_HOOK_MAX = 5, }; enum nft_immediate_attributes { NFTA_IMMEDIATE_UNSPEC = 0, NFTA_IMMEDIATE_DREG = 1, NFTA_IMMEDIATE_DATA = 2, __NFTA_IMMEDIATE_MAX = 3, }; enum nft_inner_attributes { NFTA_INNER_UNSPEC = 0, NFTA_INNER_NUM = 1, NFTA_INNER_TYPE = 2, NFTA_INNER_FLAGS = 3, NFTA_INNER_HDRSIZE = 4, NFTA_INNER_EXPR = 5, __NFTA_INNER_MAX = 6, }; enum nft_inner_flags { NFT_INNER_HDRSIZE = 1, NFT_INNER_LL = 2, NFT_INNER_NH = 4, NFT_INNER_TH = 8, }; enum nft_inner_type { NFT_INNER_UNSPEC = 0, NFT_INNER_VXLAN = 1, NFT_INNER_GENEVE = 2, }; enum nft_iter_type { NFT_ITER_UNSPEC = 0, NFT_ITER_READ = 1, NFT_ITER_UPDATE = 2, }; enum nft_last_attributes { NFTA_LAST_UNSPEC = 0, NFTA_LAST_SET = 1, NFTA_LAST_MSECS = 2, NFTA_LAST_PAD = 3, __NFTA_LAST_MAX = 4, }; enum nft_list_attributes { NFTA_LIST_UNSPEC = 0, NFTA_LIST_ELEM = 1, __NFTA_LIST_MAX = 2, }; enum nft_lookup_attributes { NFTA_LOOKUP_UNSPEC = 0, NFTA_LOOKUP_SET = 1, NFTA_LOOKUP_SREG = 2, NFTA_LOOKUP_DREG = 3, NFTA_LOOKUP_SET_ID = 4, NFTA_LOOKUP_FLAGS = 5, __NFTA_LOOKUP_MAX = 6, }; enum nft_lookup_flags { NFT_LOOKUP_F_INV = 1, }; enum nft_meta_attributes { NFTA_META_UNSPEC = 0, NFTA_META_DREG = 1, NFTA_META_KEY = 2, NFTA_META_SREG = 3, __NFTA_META_MAX = 4, }; enum nft_meta_keys { NFT_META_LEN = 0, NFT_META_PROTOCOL = 1, NFT_META_PRIORITY = 2, NFT_META_MARK = 3, NFT_META_IIF = 4, NFT_META_OIF = 5, NFT_META_IIFNAME = 6, NFT_META_OIFNAME = 7, NFT_META_IFTYPE = 8, NFT_META_OIFTYPE = 9, NFT_META_SKUID = 10, NFT_META_SKGID = 11, NFT_META_NFTRACE = 12, NFT_META_RTCLASSID = 13, NFT_META_SECMARK = 14, NFT_META_NFPROTO = 15, NFT_META_L4PROTO = 16, NFT_META_BRI_IIFNAME = 17, NFT_META_BRI_OIFNAME = 18, NFT_META_PKTTYPE = 19, NFT_META_CPU = 20, NFT_META_IIFGROUP = 21, NFT_META_OIFGROUP = 22, NFT_META_CGROUP = 23, NFT_META_PRANDOM = 24, NFT_META_SECPATH = 25, NFT_META_IIFKIND = 26, NFT_META_OIFKIND = 27, NFT_META_BRI_IIFPVID = 28, NFT_META_BRI_IIFVPROTO = 29, NFT_META_TIME_NS = 30, NFT_META_TIME_DAY = 31, NFT_META_TIME_HOUR = 32, NFT_META_SDIF = 33, NFT_META_SDIFNAME = 34, NFT_META_BRI_BROUTE = 35, __NFT_META_IIFTYPE = 36, }; enum nft_object_attributes { NFTA_OBJ_UNSPEC = 0, NFTA_OBJ_TABLE = 1, NFTA_OBJ_NAME = 2, NFTA_OBJ_TYPE = 3, NFTA_OBJ_DATA = 4, NFTA_OBJ_USE = 5, NFTA_OBJ_HANDLE = 6, NFTA_OBJ_PAD = 7, NFTA_OBJ_USERDATA = 8, __NFTA_OBJ_MAX = 9, }; enum nft_objref_attributes { NFTA_OBJREF_UNSPEC = 0, NFTA_OBJREF_IMM_TYPE = 1, NFTA_OBJREF_IMM_NAME = 2, NFTA_OBJREF_SET_SREG = 3, NFTA_OBJREF_SET_NAME = 4, NFTA_OBJREF_SET_ID = 5, __NFTA_OBJREF_MAX = 6, }; enum nft_offload_attributes { NFTA_FLOW_UNSPEC = 0, NFTA_FLOW_TABLE_NAME = 1, __NFTA_FLOW_MAX = 2, }; enum nft_offload_dep_type { NFT_OFFLOAD_DEP_UNSPEC = 0, NFT_OFFLOAD_DEP_NETWORK = 1, NFT_OFFLOAD_DEP_TRANSPORT = 2, }; enum nft_offload_reg_flags { NFT_OFFLOAD_F_NETWORK2HOST = 1, }; enum nft_payload_attributes { NFTA_PAYLOAD_UNSPEC = 0, NFTA_PAYLOAD_DREG = 1, NFTA_PAYLOAD_BASE = 2, NFTA_PAYLOAD_OFFSET = 3, NFTA_PAYLOAD_LEN = 4, NFTA_PAYLOAD_SREG = 5, NFTA_PAYLOAD_CSUM_TYPE = 6, NFTA_PAYLOAD_CSUM_OFFSET = 7, NFTA_PAYLOAD_CSUM_FLAGS = 8, __NFTA_PAYLOAD_MAX = 9, }; enum nft_payload_bases { NFT_PAYLOAD_LL_HEADER = 0, NFT_PAYLOAD_NETWORK_HEADER = 1, NFT_PAYLOAD_TRANSPORT_HEADER = 2, NFT_PAYLOAD_INNER_HEADER = 3, NFT_PAYLOAD_TUN_HEADER = 4, }; enum nft_payload_csum_flags { NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 1, }; enum nft_payload_csum_types { NFT_PAYLOAD_CSUM_NONE = 0, NFT_PAYLOAD_CSUM_INET = 1, NFT_PAYLOAD_CSUM_SCTP = 2, }; enum nft_range_attributes { NFTA_RANGE_UNSPEC = 0, NFTA_RANGE_SREG = 1, NFTA_RANGE_OP = 2, NFTA_RANGE_FROM_DATA = 3, NFTA_RANGE_TO_DATA = 4, __NFTA_RANGE_MAX = 5, }; enum nft_range_ops { NFT_RANGE_EQ = 0, NFT_RANGE_NEQ = 1, }; enum nft_registers { NFT_REG_VERDICT = 0, NFT_REG_1 = 1, NFT_REG_2 = 2, NFT_REG_3 = 3, NFT_REG_4 = 4, __NFT_REG_MAX = 5, NFT_REG32_00 = 8, NFT_REG32_01 = 9, NFT_REG32_02 = 10, NFT_REG32_03 = 11, NFT_REG32_04 = 12, NFT_REG32_05 = 13, NFT_REG32_06 = 14, NFT_REG32_07 = 15, NFT_REG32_08 = 16, NFT_REG32_09 = 17, NFT_REG32_10 = 18, NFT_REG32_11 = 19, NFT_REG32_12 = 20, NFT_REG32_13 = 21, NFT_REG32_14 = 22, NFT_REG32_15 = 23, }; enum nft_rt_attributes { NFTA_RT_UNSPEC = 0, NFTA_RT_DREG = 1, NFTA_RT_KEY = 2, __NFTA_RT_MAX = 3, }; enum nft_rt_keys { NFT_RT_CLASSID = 0, NFT_RT_NEXTHOP4 = 1, NFT_RT_NEXTHOP6 = 2, NFT_RT_TCPMSS = 3, NFT_RT_XFRM = 4, __NFT_RT_MAX = 5, }; enum nft_rule_attributes { NFTA_RULE_UNSPEC = 0, NFTA_RULE_TABLE = 1, NFTA_RULE_CHAIN = 2, NFTA_RULE_HANDLE = 3, NFTA_RULE_EXPRESSIONS = 4, NFTA_RULE_COMPAT = 5, NFTA_RULE_POSITION = 6, NFTA_RULE_USERDATA = 7, NFTA_RULE_PAD = 8, NFTA_RULE_ID = 9, NFTA_RULE_POSITION_ID = 10, NFTA_RULE_CHAIN_ID = 11, __NFTA_RULE_MAX = 12, }; enum nft_secmark_attributes { NFTA_SECMARK_UNSPEC = 0, NFTA_SECMARK_CTX = 1, __NFTA_SECMARK_MAX = 2, }; enum nft_set_attributes { NFTA_SET_UNSPEC = 0, NFTA_SET_TABLE = 1, NFTA_SET_NAME = 2, NFTA_SET_FLAGS = 3, NFTA_SET_KEY_TYPE = 4, NFTA_SET_KEY_LEN = 5, NFTA_SET_DATA_TYPE = 6, NFTA_SET_DATA_LEN = 7, NFTA_SET_POLICY = 8, NFTA_SET_DESC = 9, NFTA_SET_ID = 10, NFTA_SET_TIMEOUT = 11, NFTA_SET_GC_INTERVAL = 12, NFTA_SET_USERDATA = 13, NFTA_SET_PAD = 14, NFTA_SET_OBJ_TYPE = 15, NFTA_SET_HANDLE = 16, NFTA_SET_EXPR = 17, NFTA_SET_EXPRESSIONS = 18, __NFTA_SET_MAX = 19, }; enum nft_set_class { NFT_SET_CLASS_O_1 = 0, NFT_SET_CLASS_O_LOG_N = 1, NFT_SET_CLASS_O_N = 2, }; enum nft_set_desc_attributes { NFTA_SET_DESC_UNSPEC = 0, NFTA_SET_DESC_SIZE = 1, NFTA_SET_DESC_CONCAT = 2, __NFTA_SET_DESC_MAX = 3, }; enum nft_set_elem_attributes { NFTA_SET_ELEM_UNSPEC = 0, NFTA_SET_ELEM_KEY = 1, NFTA_SET_ELEM_DATA = 2, NFTA_SET_ELEM_FLAGS = 3, NFTA_SET_ELEM_TIMEOUT = 4, NFTA_SET_ELEM_EXPIRATION = 5, NFTA_SET_ELEM_USERDATA = 6, NFTA_SET_ELEM_EXPR = 7, NFTA_SET_ELEM_PAD = 8, NFTA_SET_ELEM_OBJREF = 9, NFTA_SET_ELEM_KEY_END = 10, NFTA_SET_ELEM_EXPRESSIONS = 11, __NFTA_SET_ELEM_MAX = 12, }; enum nft_set_elem_flags { NFT_SET_ELEM_INTERVAL_END = 1, NFT_SET_ELEM_CATCHALL = 2, }; enum nft_set_elem_list_attributes { NFTA_SET_ELEM_LIST_UNSPEC = 0, NFTA_SET_ELEM_LIST_TABLE = 1, NFTA_SET_ELEM_LIST_SET = 2, NFTA_SET_ELEM_LIST_ELEMENTS = 3, NFTA_SET_ELEM_LIST_SET_ID = 4, __NFTA_SET_ELEM_LIST_MAX = 5, }; enum nft_set_extensions { NFT_SET_EXT_KEY = 0, NFT_SET_EXT_KEY_END = 1, NFT_SET_EXT_DATA = 2, NFT_SET_EXT_FLAGS = 3, NFT_SET_EXT_TIMEOUT = 4, NFT_SET_EXT_USERDATA = 5, NFT_SET_EXT_EXPRESSIONS = 6, NFT_SET_EXT_OBJREF = 7, NFT_SET_EXT_NUM = 8, }; enum nft_set_field_attributes { NFTA_SET_FIELD_UNSPEC = 0, NFTA_SET_FIELD_LEN = 1, __NFTA_SET_FIELD_MAX = 2, }; enum nft_set_flags { NFT_SET_ANONYMOUS = 1, NFT_SET_CONSTANT = 2, NFT_SET_INTERVAL = 4, NFT_SET_MAP = 8, NFT_SET_TIMEOUT = 16, NFT_SET_EVAL = 32, NFT_SET_OBJECT = 64, NFT_SET_CONCAT = 128, NFT_SET_EXPR = 256, }; enum nft_set_policies { NFT_SET_POL_PERFORMANCE = 0, NFT_SET_POL_MEMORY = 1, }; enum nft_table_attributes { NFTA_TABLE_UNSPEC = 0, NFTA_TABLE_NAME = 1, NFTA_TABLE_FLAGS = 2, NFTA_TABLE_USE = 3, NFTA_TABLE_HANDLE = 4, NFTA_TABLE_PAD = 5, NFTA_TABLE_USERDATA = 6, NFTA_TABLE_OWNER = 7, __NFTA_TABLE_MAX = 8, }; enum nft_table_flags { NFT_TABLE_F_DORMANT = 1, NFT_TABLE_F_OWNER = 2, NFT_TABLE_F_PERSIST = 4, }; enum nft_trace_attributes { NFTA_TRACE_UNSPEC = 0, NFTA_TRACE_TABLE = 1, NFTA_TRACE_CHAIN = 2, NFTA_TRACE_RULE_HANDLE = 3, NFTA_TRACE_TYPE = 4, NFTA_TRACE_VERDICT = 5, NFTA_TRACE_ID = 6, NFTA_TRACE_LL_HEADER = 7, NFTA_TRACE_NETWORK_HEADER = 8, NFTA_TRACE_TRANSPORT_HEADER = 9, NFTA_TRACE_IIF = 10, NFTA_TRACE_IIFTYPE = 11, NFTA_TRACE_OIF = 12, NFTA_TRACE_OIFTYPE = 13, NFTA_TRACE_MARK = 14, NFTA_TRACE_NFPROTO = 15, NFTA_TRACE_POLICY = 16, NFTA_TRACE_PAD = 17, __NFTA_TRACE_MAX = 18, }; enum nft_trace_types { NFT_TRACETYPE_UNSPEC = 0, NFT_TRACETYPE_POLICY = 1, NFT_TRACETYPE_RETURN = 2, NFT_TRACETYPE_RULE = 3, __NFT_TRACETYPE_MAX = 4, }; enum nft_trans_elem_flags { NFT_TRANS_UPD_TIMEOUT = 1, NFT_TRANS_UPD_EXPIRATION = 2, }; enum nft_trans_phase { NFT_TRANS_PREPARE = 0, NFT_TRANS_PREPARE_ERROR = 1, NFT_TRANS_ABORT = 2, NFT_TRANS_COMMIT = 3, NFT_TRANS_RELEASE = 4, }; enum nft_verdict_attributes { NFTA_VERDICT_UNSPEC = 0, NFTA_VERDICT_CODE = 1, NFTA_VERDICT_CHAIN = 2, NFTA_VERDICT_CHAIN_ID = 3, __NFTA_VERDICT_MAX = 4, }; enum nft_verdicts { NFT_CONTINUE = -1, NFT_BREAK = -2, NFT_JUMP = -3, NFT_GOTO = -4, NFT_RETURN = -5, }; enum nfulnl_attr_config { NFULA_CFG_UNSPEC = 0, NFULA_CFG_CMD = 1, NFULA_CFG_MODE = 2, NFULA_CFG_NLBUFSIZ = 3, NFULA_CFG_TIMEOUT = 4, NFULA_CFG_QTHRESH = 5, NFULA_CFG_FLAGS = 6, __NFULA_CFG_MAX = 7, }; enum nfulnl_attr_type { NFULA_UNSPEC = 0, NFULA_PACKET_HDR = 1, NFULA_MARK = 2, NFULA_TIMESTAMP = 3, NFULA_IFINDEX_INDEV = 4, NFULA_IFINDEX_OUTDEV = 5, NFULA_IFINDEX_PHYSINDEV = 6, NFULA_IFINDEX_PHYSOUTDEV = 7, NFULA_HWADDR = 8, NFULA_PAYLOAD = 9, NFULA_PREFIX = 10, NFULA_UID = 11, NFULA_SEQ = 12, NFULA_SEQ_GLOBAL = 13, NFULA_GID = 14, NFULA_HWTYPE = 15, NFULA_HWHEADER = 16, NFULA_HWLEN = 17, NFULA_CT = 18, NFULA_CT_INFO = 19, NFULA_VLAN = 20, NFULA_L2HDR = 21, __NFULA_MAX = 22, }; enum nfulnl_msg_config_cmds { NFULNL_CFG_CMD_NONE = 0, NFULNL_CFG_CMD_BIND = 1, NFULNL_CFG_CMD_UNBIND = 2, NFULNL_CFG_CMD_PF_BIND = 3, NFULNL_CFG_CMD_PF_UNBIND = 4, }; enum nfulnl_msg_types { NFULNL_MSG_PACKET = 0, NFULNL_MSG_CONFIG = 1, NFULNL_MSG_MAX = 2, }; enum nfulnl_vlan_attr { NFULA_VLAN_UNSPEC = 0, NFULA_VLAN_PROTO = 1, NFULA_VLAN_TCI = 2, __NFULA_VLAN_MAX = 3, }; enum nh_notifier_info_type { NH_NOTIFIER_INFO_TYPE_SINGLE = 0, NH_NOTIFIER_INFO_TYPE_GRP = 1, NH_NOTIFIER_INFO_TYPE_RES_TABLE = 2, NH_NOTIFIER_INFO_TYPE_RES_BUCKET = 3, NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS = 4, }; enum nla_policy_validation { NLA_VALIDATE_NONE = 0, NLA_VALIDATE_RANGE = 1, NLA_VALIDATE_RANGE_WARN_TOO_LONG = 2, NLA_VALIDATE_MIN = 3, NLA_VALIDATE_MAX = 4, NLA_VALIDATE_MASK = 5, NLA_VALIDATE_RANGE_PTR = 6, NLA_VALIDATE_FUNCTION = 7, }; enum nlmsgerr_attrs { NLMSGERR_ATTR_UNUSED = 0, NLMSGERR_ATTR_MSG = 1, NLMSGERR_ATTR_OFFS = 2, NLMSGERR_ATTR_COOKIE = 3, NLMSGERR_ATTR_POLICY = 4, NLMSGERR_ATTR_MISS_TYPE = 5, NLMSGERR_ATTR_MISS_NEST = 6, __NLMSGERR_ATTR_MAX = 7, NLMSGERR_ATTR_MAX = 6, }; enum nmi_states { NMI_NOT_RUNNING = 0, NMI_EXECUTING = 1, NMI_LATCHED = 2, }; enum node_stat_item { NR_LRU_BASE = 0, NR_INACTIVE_ANON = 0, NR_ACTIVE_ANON = 1, NR_INACTIVE_FILE = 2, NR_ACTIVE_FILE = 3, NR_UNEVICTABLE = 4, NR_SLAB_RECLAIMABLE_B = 5, NR_SLAB_UNRECLAIMABLE_B = 6, NR_ISOLATED_ANON = 7, NR_ISOLATED_FILE = 8, WORKINGSET_NODES = 9, WORKINGSET_REFAULT_BASE = 10, WORKINGSET_REFAULT_ANON = 10, WORKINGSET_REFAULT_FILE = 11, WORKINGSET_ACTIVATE_BASE = 12, WORKINGSET_ACTIVATE_ANON = 12, WORKINGSET_ACTIVATE_FILE = 13, WORKINGSET_RESTORE_BASE = 14, WORKINGSET_RESTORE_ANON = 14, WORKINGSET_RESTORE_FILE = 15, WORKINGSET_NODERECLAIM = 16, NR_ANON_MAPPED = 17, NR_FILE_MAPPED = 18, NR_FILE_PAGES = 19, NR_FILE_DIRTY = 20, NR_WRITEBACK = 21, NR_WRITEBACK_TEMP = 22, NR_SHMEM = 23, NR_SHMEM_THPS = 24, NR_SHMEM_PMDMAPPED = 25, NR_FILE_THPS = 26, NR_FILE_PMDMAPPED = 27, NR_ANON_THPS = 28, NR_VMSCAN_WRITE = 29, NR_VMSCAN_IMMEDIATE = 30, NR_DIRTIED = 31, NR_WRITTEN = 32, NR_THROTTLED_WRITTEN = 33, NR_KERNEL_MISC_RECLAIMABLE = 34, NR_FOLL_PIN_ACQUIRED = 35, NR_FOLL_PIN_RELEASED = 36, NR_KERNEL_STACK_KB = 37, NR_PAGETABLE = 38, NR_SECONDARY_PAGETABLE = 39, NR_IOMMU_PAGES = 40, NR_SWAPCACHE = 41, PGPROMOTE_SUCCESS = 42, PGPROMOTE_CANDIDATE = 43, PGDEMOTE_KSWAPD = 44, PGDEMOTE_DIRECT = 45, PGDEMOTE_KHUGEPAGED = 46, NR_VM_NODE_STAT_ITEMS = 47, }; enum node_states { N_POSSIBLE = 0, N_ONLINE = 1, N_NORMAL_MEMORY = 2, N_HIGH_MEMORY = 2, N_MEMORY = 3, N_CPU = 4, N_GENERIC_INITIATOR = 5, NR_NODE_STATES = 6, }; enum notify_state { SECCOMP_NOTIFY_INIT = 0, SECCOMP_NOTIFY_SENT = 1, SECCOMP_NOTIFY_REPLIED = 2, }; enum nsim_dev_hwstats_do { NSIM_DEV_HWSTATS_DO_DISABLE = 0, NSIM_DEV_HWSTATS_DO_ENABLE = 1, NSIM_DEV_HWSTATS_DO_FAIL = 2, }; enum nsim_dev_port_type { NSIM_DEV_PORT_TYPE_PF = 0, NSIM_DEV_PORT_TYPE_VF = 1, }; enum nsim_devlink_param_id { NSIM_DEVLINK_PARAM_ID_BASE = 16, NSIM_DEVLINK_PARAM_ID_TEST1 = 17, }; enum nsim_resource_id { NSIM_RESOURCE_NONE = 0, NSIM_RESOURCE_IPV4 = 1, NSIM_RESOURCE_IPV4_FIB = 2, NSIM_RESOURCE_IPV4_FIB_RULES = 3, NSIM_RESOURCE_IPV6 = 4, NSIM_RESOURCE_IPV6_FIB = 5, NSIM_RESOURCE_IPV6_FIB_RULES = 6, NSIM_RESOURCE_NEXTHOPS = 7, }; enum numa_faults_stats { NUMA_MEM = 0, NUMA_CPU = 1, NUMA_MEMBUF = 2, NUMA_CPUBUF = 3, }; enum numa_stat_item { NUMA_HIT = 0, NUMA_MISS = 1, NUMA_FOREIGN = 2, NUMA_INTERLEAVE_HIT = 3, NUMA_LOCAL = 4, NUMA_OTHER = 5, NR_VM_NUMA_EVENT_ITEMS = 6, }; enum numa_topology_type { NUMA_DIRECT = 0, NUMA_GLUELESS_MESH = 1, NUMA_BACKPLANE = 2, }; enum numa_type { node_has_spare = 0, node_fully_busy = 1, node_overloaded = 2, }; enum numa_vmaskip_reason { NUMAB_SKIP_UNSUITABLE = 0, NUMAB_SKIP_SHARED_RO = 1, NUMAB_SKIP_INACCESSIBLE = 2, NUMAB_SKIP_SCAN_DELAY = 3, NUMAB_SKIP_PID_INACTIVE = 4, NUMAB_SKIP_IGNORE_PID = 5, NUMAB_SKIP_SEQ_COMPLETED = 6, }; enum nvmem_type { NVMEM_TYPE_UNKNOWN = 0, NVMEM_TYPE_EEPROM = 1, NVMEM_TYPE_OTP = 2, NVMEM_TYPE_BATTERY_BACKED = 3, NVMEM_TYPE_FRAM = 4, }; enum objext_flags { OBJEXTS_ALLOC_FAIL = 4, __NR_OBJEXTS_FLAGS = 8, }; enum offload_act_command { FLOW_ACT_REPLACE = 0, FLOW_ACT_DESTROY = 1, FLOW_ACT_STATS = 2, }; enum oom_constraint { CONSTRAINT_NONE = 0, CONSTRAINT_CPUSET = 1, CONSTRAINT_MEMORY_POLICY = 2, CONSTRAINT_MEMCG = 3, }; enum owner_state { OWNER_NULL = 1, OWNER_WRITER = 2, OWNER_READER = 4, OWNER_NONSPINNABLE = 8, }; enum packet_sock_flags { PACKET_SOCK_ORIGDEV = 0, PACKET_SOCK_AUXDATA = 1, PACKET_SOCK_TX_HAS_OFF = 2, PACKET_SOCK_TP_LOSS = 3, PACKET_SOCK_RUNNING = 4, PACKET_SOCK_PRESSURE = 5, PACKET_SOCK_QDISC_BYPASS = 6, }; enum page_cache_mode { _PAGE_CACHE_MODE_WB = 0, _PAGE_CACHE_MODE_WC = 1, _PAGE_CACHE_MODE_UC_MINUS = 2, _PAGE_CACHE_MODE_UC = 3, _PAGE_CACHE_MODE_WT = 4, _PAGE_CACHE_MODE_WP = 5, _PAGE_CACHE_MODE_NUM = 8, }; enum page_memcg_data_flags { MEMCG_DATA_OBJEXTS = 1, MEMCG_DATA_KMEM = 2, __NR_MEMCG_DATA_FLAGS = 4, }; enum page_size_enum { __PAGE_SIZE = 4096, }; enum page_walk_action { ACTION_SUBTREE = 0, ACTION_CONTINUE = 1, ACTION_AGAIN = 2, }; enum page_walk_lock { PGWALK_RDLOCK = 0, PGWALK_WRLOCK = 1, PGWALK_WRLOCK_VERIFY = 2, }; enum pageblock_bits { PB_migrate = 0, PB_migrate_end = 2, PB_migrate_skip = 3, NR_PAGEBLOCK_BITS = 4, }; enum pageflags { PG_locked = 0, PG_writeback = 1, PG_referenced = 2, PG_uptodate = 3, PG_dirty = 4, PG_lru = 5, PG_head = 6, PG_waiters = 7, PG_active = 8, PG_workingset = 9, PG_owner_priv_1 = 10, PG_owner_2 = 11, PG_arch_1 = 12, PG_reserved = 13, PG_private = 14, PG_private_2 = 15, PG_reclaim = 16, PG_swapbacked = 17, PG_unevictable = 18, PG_mlocked = 19, PG_hwpoison = 20, PG_arch_2 = 21, __NR_PAGEFLAGS = 22, PG_readahead = 16, PG_swapcache = 10, PG_checked = 10, PG_anon_exclusive = 11, PG_mappedtodisk = 11, PG_fscache = 15, PG_pinned = 10, PG_savepinned = 4, PG_foreign = 10, PG_xen_remapped = 10, PG_isolated = 16, PG_reported = 3, PG_has_hwpoisoned = 8, PG_large_rmappable = 9, PG_partially_mapped = 16, }; enum pagetype { PGTY_buddy = 240, PGTY_offline = 241, PGTY_table = 242, PGTY_guard = 243, PGTY_hugetlb = 244, PGTY_slab = 245, PGTY_zsmalloc = 246, PGTY_unaccepted = 247, PGTY_mapcount_underflow = 255, }; enum partition_cmd { partcmd_enable = 0, partcmd_enablei = 1, partcmd_disable = 2, partcmd_update = 3, partcmd_invalidate = 4, }; enum passtype { PASS_SCAN = 0, PASS_REVOKE = 1, PASS_REPLAY = 2, }; enum pci_bar_type { pci_bar_unknown = 0, pci_bar_io = 1, pci_bar_mem32 = 2, pci_bar_mem64 = 3, }; enum pci_bf_sort_state { pci_bf_sort_default = 0, pci_force_nobf = 1, pci_force_bf = 2, pci_dmi_bf = 3, }; enum pci_board_num_t { pbn_default = 0, pbn_b0_1_115200 = 1, pbn_b0_2_115200 = 2, pbn_b0_4_115200 = 3, pbn_b0_5_115200 = 4, pbn_b0_8_115200 = 5, pbn_b0_1_921600 = 6, pbn_b0_2_921600 = 7, pbn_b0_4_921600 = 8, pbn_b0_2_1130000 = 9, pbn_b0_4_1152000 = 10, pbn_b0_4_1250000 = 11, pbn_b0_2_1843200 = 12, pbn_b0_4_1843200 = 13, pbn_b0_1_15625000 = 14, pbn_b0_bt_1_115200 = 15, pbn_b0_bt_2_115200 = 16, pbn_b0_bt_4_115200 = 17, pbn_b0_bt_8_115200 = 18, pbn_b0_bt_1_460800 = 19, pbn_b0_bt_2_460800 = 20, pbn_b0_bt_4_460800 = 21, pbn_b0_bt_1_921600 = 22, pbn_b0_bt_2_921600 = 23, pbn_b0_bt_4_921600 = 24, pbn_b0_bt_8_921600 = 25, pbn_b1_1_115200 = 26, pbn_b1_2_115200 = 27, pbn_b1_4_115200 = 28, pbn_b1_8_115200 = 29, pbn_b1_16_115200 = 30, pbn_b1_1_921600 = 31, pbn_b1_2_921600 = 32, pbn_b1_4_921600 = 33, pbn_b1_8_921600 = 34, pbn_b1_2_1250000 = 35, pbn_b1_bt_1_115200 = 36, pbn_b1_bt_2_115200 = 37, pbn_b1_bt_4_115200 = 38, pbn_b1_bt_2_921600 = 39, pbn_b1_1_1382400 = 40, pbn_b1_2_1382400 = 41, pbn_b1_4_1382400 = 42, pbn_b1_8_1382400 = 43, pbn_b2_1_115200 = 44, pbn_b2_2_115200 = 45, pbn_b2_4_115200 = 46, pbn_b2_8_115200 = 47, pbn_b2_1_460800 = 48, pbn_b2_4_460800 = 49, pbn_b2_8_460800 = 50, pbn_b2_16_460800 = 51, pbn_b2_1_921600 = 52, pbn_b2_4_921600 = 53, pbn_b2_8_921600 = 54, pbn_b2_8_1152000 = 55, pbn_b2_bt_1_115200 = 56, pbn_b2_bt_2_115200 = 57, pbn_b2_bt_4_115200 = 58, pbn_b2_bt_2_921600 = 59, pbn_b2_bt_4_921600 = 60, pbn_b3_2_115200 = 61, pbn_b3_4_115200 = 62, pbn_b3_8_115200 = 63, pbn_b4_bt_2_921600 = 64, pbn_b4_bt_4_921600 = 65, pbn_b4_bt_8_921600 = 66, pbn_panacom = 67, pbn_panacom2 = 68, pbn_panacom4 = 69, pbn_plx_romulus = 70, pbn_oxsemi = 71, pbn_oxsemi_1_15625000 = 72, pbn_oxsemi_2_15625000 = 73, pbn_oxsemi_4_15625000 = 74, pbn_oxsemi_8_15625000 = 75, pbn_intel_i960 = 76, pbn_sgi_ioc3 = 77, pbn_computone_4 = 78, pbn_computone_6 = 79, pbn_computone_8 = 80, pbn_sbsxrsio = 81, pbn_pasemi_1682M = 82, pbn_ni8430_2 = 83, pbn_ni8430_4 = 84, pbn_ni8430_8 = 85, pbn_ni8430_16 = 86, pbn_ADDIDATA_PCIe_1_3906250 = 87, pbn_ADDIDATA_PCIe_2_3906250 = 88, pbn_ADDIDATA_PCIe_4_3906250 = 89, pbn_ADDIDATA_PCIe_8_3906250 = 90, pbn_ce4100_1_115200 = 91, pbn_omegapci = 92, pbn_NETMOS9900_2s_115200 = 93, pbn_brcm_trumanage = 94, pbn_fintek_4 = 95, pbn_fintek_8 = 96, pbn_fintek_12 = 97, pbn_fintek_F81504A = 98, pbn_fintek_F81508A = 99, pbn_fintek_F81512A = 100, pbn_wch382_2 = 101, pbn_wch384_4 = 102, pbn_wch384_8 = 103, pbn_sunix_pci_1s = 104, pbn_sunix_pci_2s = 105, pbn_sunix_pci_4s = 106, pbn_sunix_pci_8s = 107, pbn_sunix_pci_16s = 108, pbn_titan_1_4000000 = 109, pbn_titan_2_4000000 = 110, pbn_titan_4_4000000 = 111, pbn_titan_8_4000000 = 112, pbn_moxa_2 = 113, pbn_moxa_4 = 114, pbn_moxa_8 = 115, }; enum pci_bus_flags { PCI_BUS_FLAGS_NO_MSI = 1, PCI_BUS_FLAGS_NO_MMRBC = 2, PCI_BUS_FLAGS_NO_AERSID = 4, PCI_BUS_FLAGS_NO_EXTCFG = 8, }; enum pci_bus_speed { PCI_SPEED_33MHz = 0, PCI_SPEED_66MHz = 1, PCI_SPEED_66MHz_PCIX = 2, PCI_SPEED_100MHz_PCIX = 3, PCI_SPEED_133MHz_PCIX = 4, PCI_SPEED_66MHz_PCIX_ECC = 5, PCI_SPEED_100MHz_PCIX_ECC = 6, PCI_SPEED_133MHz_PCIX_ECC = 7, PCI_SPEED_66MHz_PCIX_266 = 9, PCI_SPEED_100MHz_PCIX_266 = 10, PCI_SPEED_133MHz_PCIX_266 = 11, AGP_UNKNOWN = 12, AGP_1X = 13, AGP_2X = 14, AGP_4X = 15, AGP_8X = 16, PCI_SPEED_66MHz_PCIX_533 = 17, PCI_SPEED_100MHz_PCIX_533 = 18, PCI_SPEED_133MHz_PCIX_533 = 19, PCIE_SPEED_2_5GT = 20, PCIE_SPEED_5_0GT = 21, PCIE_SPEED_8_0GT = 22, PCIE_SPEED_16_0GT = 23, PCIE_SPEED_32_0GT = 24, PCIE_SPEED_64_0GT = 25, PCI_SPEED_UNKNOWN = 255, }; enum pci_dev_flags { PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, PCI_DEV_FLAGS_NO_D3 = 2, PCI_DEV_FLAGS_ASSIGNED = 4, PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, PCI_DEV_FLAGS_NO_BUS_RESET = 64, PCI_DEV_FLAGS_NO_PM_RESET = 128, PCI_DEV_FLAGS_VPD_REF_F0 = 256, PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, PCI_DEV_FLAGS_NO_FLR_RESET = 1024, PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, PCI_DEV_FLAGS_HAS_MSI_MASKING = 4096, }; enum pci_ers_result { PCI_ERS_RESULT_NONE = 1, PCI_ERS_RESULT_CAN_RECOVER = 2, PCI_ERS_RESULT_NEED_RESET = 3, PCI_ERS_RESULT_DISCONNECT = 4, PCI_ERS_RESULT_RECOVERED = 5, PCI_ERS_RESULT_NO_AER_DRIVER = 6, }; enum pci_fixup_pass { pci_fixup_early = 0, pci_fixup_header = 1, pci_fixup_final = 2, pci_fixup_enable = 3, pci_fixup_resume = 4, pci_fixup_suspend = 5, pci_fixup_resume_early = 6, pci_fixup_suspend_late = 7, }; enum pci_irq_reroute_variant { INTEL_IRQ_REROUTE_VARIANT = 1, MAX_IRQ_REROUTE_VARIANTS = 3, }; enum pci_mmap_api { PCI_MMAP_SYSFS = 0, PCI_MMAP_PROCFS = 1, }; enum pci_mmap_state { pci_mmap_io = 0, pci_mmap_mem = 1, }; enum pci_p2pdma_map_type { PCI_P2PDMA_MAP_UNKNOWN = 0, PCI_P2PDMA_MAP_NOT_SUPPORTED = 1, PCI_P2PDMA_MAP_BUS_ADDR = 2, PCI_P2PDMA_MAP_THRU_HOST_BRIDGE = 3, }; enum pcie_bus_config_types { PCIE_BUS_TUNE_OFF = 0, PCIE_BUS_DEFAULT = 1, PCIE_BUS_SAFE = 2, PCIE_BUS_PERFORMANCE = 3, PCIE_BUS_PEER2PEER = 4, }; enum pcie_link_width { PCIE_LNK_WIDTH_RESRV = 0, PCIE_LNK_X1 = 1, PCIE_LNK_X2 = 2, PCIE_LNK_X4 = 4, PCIE_LNK_X8 = 8, PCIE_LNK_X12 = 12, PCIE_LNK_X16 = 16, PCIE_LNK_X32 = 32, PCIE_LNK_WIDTH_UNKNOWN = 255, }; enum pcie_reset_state { pcie_deassert_reset = 1, pcie_warm_reset = 2, pcie_hot_reset = 3, }; enum pcim_addr_devres_type { PCIM_ADDR_DEVRES_TYPE_INVALID = 0, PCIM_ADDR_DEVRES_TYPE_REGION = 1, PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING = 2, PCIM_ADDR_DEVRES_TYPE_MAPPING = 3, }; enum pcpu_fc { PCPU_FC_AUTO = 0, PCPU_FC_EMBED = 1, PCPU_FC_PAGE = 2, PCPU_FC_NR = 3, }; enum pedit_cmd { TCA_PEDIT_KEY_EX_CMD_SET = 0, TCA_PEDIT_KEY_EX_CMD_ADD = 1, __PEDIT_CMD_MAX = 2, }; enum pedit_header_type { TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0, TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1, TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2, TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3, TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4, TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5, __PEDIT_HDR_TYPE_MAX = 6, }; enum peer_app_attr { DCB_ATTR_CEE_PEER_APP_UNSPEC = 0, DCB_ATTR_CEE_PEER_APP_INFO = 1, DCB_ATTR_CEE_PEER_APP = 2, __DCB_ATTR_CEE_PEER_APP_MAX = 3, }; enum perf_addr_filter_action_t { PERF_ADDR_FILTER_ACTION_STOP = 0, PERF_ADDR_FILTER_ACTION_START = 1, PERF_ADDR_FILTER_ACTION_FILTER = 2, }; enum perf_adl_uncore_imc_freerunning_types { ADL_MMIO_UNCORE_IMC_DATA_TOTAL = 0, ADL_MMIO_UNCORE_IMC_DATA_READ = 1, ADL_MMIO_UNCORE_IMC_DATA_WRITE = 2, ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX = 3, }; enum perf_bpf_event_type { PERF_BPF_EVENT_UNKNOWN = 0, PERF_BPF_EVENT_PROG_LOAD = 1, PERF_BPF_EVENT_PROG_UNLOAD = 2, PERF_BPF_EVENT_MAX = 3, }; enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_USER = 1, PERF_SAMPLE_BRANCH_KERNEL = 2, PERF_SAMPLE_BRANCH_HV = 4, PERF_SAMPLE_BRANCH_ANY = 8, PERF_SAMPLE_BRANCH_ANY_CALL = 16, PERF_SAMPLE_BRANCH_ANY_RETURN = 32, PERF_SAMPLE_BRANCH_IND_CALL = 64, PERF_SAMPLE_BRANCH_ABORT_TX = 128, PERF_SAMPLE_BRANCH_IN_TX = 256, PERF_SAMPLE_BRANCH_NO_TX = 512, PERF_SAMPLE_BRANCH_COND = 1024, PERF_SAMPLE_BRANCH_CALL_STACK = 2048, PERF_SAMPLE_BRANCH_IND_JUMP = 4096, PERF_SAMPLE_BRANCH_CALL = 8192, PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, PERF_SAMPLE_BRANCH_HW_INDEX = 131072, PERF_SAMPLE_BRANCH_PRIV_SAVE = 262144, PERF_SAMPLE_BRANCH_COUNTERS = 524288, PERF_SAMPLE_BRANCH_MAX = 1048576, }; enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_USER_SHIFT = 0, PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, PERF_SAMPLE_BRANCH_HV_SHIFT = 2, PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, PERF_SAMPLE_BRANCH_COND_SHIFT = 10, PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, PERF_SAMPLE_BRANCH_MAX_SHIFT = 20, }; enum perf_callchain_context { PERF_CONTEXT_HV = 18446744073709551584ULL, PERF_CONTEXT_KERNEL = 18446744073709551488ULL, PERF_CONTEXT_USER = 18446744073709551104ULL, PERF_CONTEXT_GUEST = 18446744073709549568ULL, PERF_CONTEXT_GUEST_KERNEL = 18446744073709549440ULL, PERF_CONTEXT_GUEST_USER = 18446744073709549056ULL, PERF_CONTEXT_MAX = 18446744073709547521ULL, }; enum perf_cstate_core_events { PERF_CSTATE_CORE_C1_RES = 0, PERF_CSTATE_CORE_C3_RES = 1, PERF_CSTATE_CORE_C6_RES = 2, PERF_CSTATE_CORE_C7_RES = 3, PERF_CSTATE_CORE_EVENT_MAX = 4, }; enum perf_cstate_module_events { PERF_CSTATE_MODULE_C6_RES = 0, PERF_CSTATE_MODULE_EVENT_MAX = 1, }; enum perf_cstate_pkg_events { PERF_CSTATE_PKG_C2_RES = 0, PERF_CSTATE_PKG_C3_RES = 1, PERF_CSTATE_PKG_C6_RES = 2, PERF_CSTATE_PKG_C7_RES = 3, PERF_CSTATE_PKG_C8_RES = 4, PERF_CSTATE_PKG_C9_RES = 5, PERF_CSTATE_PKG_C10_RES = 6, PERF_CSTATE_PKG_EVENT_MAX = 7, }; enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1, }; enum perf_event_read_format { PERF_FORMAT_TOTAL_TIME_ENABLED = 1, PERF_FORMAT_TOTAL_TIME_RUNNING = 2, PERF_FORMAT_ID = 4, PERF_FORMAT_GROUP = 8, PERF_FORMAT_LOST = 16, PERF_FORMAT_MAX = 32, }; enum perf_event_sample_format { PERF_SAMPLE_IP = 1, PERF_SAMPLE_TID = 2, PERF_SAMPLE_TIME = 4, PERF_SAMPLE_ADDR = 8, PERF_SAMPLE_READ = 16, PERF_SAMPLE_CALLCHAIN = 32, PERF_SAMPLE_ID = 64, PERF_SAMPLE_CPU = 128, PERF_SAMPLE_PERIOD = 256, PERF_SAMPLE_STREAM_ID = 512, PERF_SAMPLE_RAW = 1024, PERF_SAMPLE_BRANCH_STACK = 2048, PERF_SAMPLE_REGS_USER = 4096, PERF_SAMPLE_STACK_USER = 8192, PERF_SAMPLE_WEIGHT = 16384, PERF_SAMPLE_DATA_SRC = 32768, PERF_SAMPLE_IDENTIFIER = 65536, PERF_SAMPLE_TRANSACTION = 131072, PERF_SAMPLE_REGS_INTR = 262144, PERF_SAMPLE_PHYS_ADDR = 524288, PERF_SAMPLE_AUX = 1048576, PERF_SAMPLE_CGROUP = 2097152, PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, PERF_SAMPLE_WEIGHT_STRUCT = 16777216, PERF_SAMPLE_MAX = 33554432, }; enum perf_event_state { PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, PERF_EVENT_STATE_ACTIVE = 1, }; enum perf_event_task_context { perf_invalid_context = -1, perf_hw_context = 0, perf_sw_context = 1, perf_nr_task_contexts = 2, }; enum perf_event_type { PERF_RECORD_MMAP = 1, PERF_RECORD_LOST = 2, PERF_RECORD_COMM = 3, PERF_RECORD_EXIT = 4, PERF_RECORD_THROTTLE = 5, PERF_RECORD_UNTHROTTLE = 6, PERF_RECORD_FORK = 7, PERF_RECORD_READ = 8, PERF_RECORD_SAMPLE = 9, PERF_RECORD_MMAP2 = 10, PERF_RECORD_AUX = 11, PERF_RECORD_ITRACE_START = 12, PERF_RECORD_LOST_SAMPLES = 13, PERF_RECORD_SWITCH = 14, PERF_RECORD_SWITCH_CPU_WIDE = 15, PERF_RECORD_NAMESPACES = 16, PERF_RECORD_KSYMBOL = 17, PERF_RECORD_BPF_EVENT = 18, PERF_RECORD_CGROUP = 19, PERF_RECORD_TEXT_POKE = 20, PERF_RECORD_AUX_OUTPUT_HW_ID = 21, PERF_RECORD_MAX = 22, }; enum perf_event_x86_regs { PERF_REG_X86_AX = 0, PERF_REG_X86_BX = 1, PERF_REG_X86_CX = 2, PERF_REG_X86_DX = 3, PERF_REG_X86_SI = 4, PERF_REG_X86_DI = 5, PERF_REG_X86_BP = 6, PERF_REG_X86_SP = 7, PERF_REG_X86_IP = 8, PERF_REG_X86_FLAGS = 9, PERF_REG_X86_CS = 10, PERF_REG_X86_SS = 11, PERF_REG_X86_DS = 12, PERF_REG_X86_ES = 13, PERF_REG_X86_FS = 14, PERF_REG_X86_GS = 15, PERF_REG_X86_R8 = 16, PERF_REG_X86_R9 = 17, PERF_REG_X86_R10 = 18, PERF_REG_X86_R11 = 19, PERF_REG_X86_R12 = 20, PERF_REG_X86_R13 = 21, PERF_REG_X86_R14 = 22, PERF_REG_X86_R15 = 23, PERF_REG_X86_32_MAX = 16, PERF_REG_X86_64_MAX = 24, PERF_REG_X86_XMM0 = 32, PERF_REG_X86_XMM1 = 34, PERF_REG_X86_XMM2 = 36, PERF_REG_X86_XMM3 = 38, PERF_REG_X86_XMM4 = 40, PERF_REG_X86_XMM5 = 42, PERF_REG_X86_XMM6 = 44, PERF_REG_X86_XMM7 = 46, PERF_REG_X86_XMM8 = 48, PERF_REG_X86_XMM9 = 50, PERF_REG_X86_XMM10 = 52, PERF_REG_X86_XMM11 = 54, PERF_REG_X86_XMM12 = 56, PERF_REG_X86_XMM13 = 58, PERF_REG_X86_XMM14 = 60, PERF_REG_X86_XMM15 = 62, PERF_REG_X86_XMM_MAX = 64, }; enum perf_hw_cache_id { PERF_COUNT_HW_CACHE_L1D = 0, PERF_COUNT_HW_CACHE_L1I = 1, PERF_COUNT_HW_CACHE_LL = 2, PERF_COUNT_HW_CACHE_DTLB = 3, PERF_COUNT_HW_CACHE_ITLB = 4, PERF_COUNT_HW_CACHE_BPU = 5, PERF_COUNT_HW_CACHE_NODE = 6, PERF_COUNT_HW_CACHE_MAX = 7, }; enum perf_hw_cache_op_id { PERF_COUNT_HW_CACHE_OP_READ = 0, PERF_COUNT_HW_CACHE_OP_WRITE = 1, PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, PERF_COUNT_HW_CACHE_OP_MAX = 3, }; enum perf_hw_cache_op_result_id { PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, PERF_COUNT_HW_CACHE_RESULT_MISS = 1, PERF_COUNT_HW_CACHE_RESULT_MAX = 2, }; enum perf_hw_id { PERF_COUNT_HW_CPU_CYCLES = 0, PERF_COUNT_HW_INSTRUCTIONS = 1, PERF_COUNT_HW_CACHE_REFERENCES = 2, PERF_COUNT_HW_CACHE_MISSES = 3, PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, PERF_COUNT_HW_REF_CPU_CYCLES = 9, PERF_COUNT_HW_MAX = 10, }; enum perf_msr_id { PERF_MSR_TSC = 0, PERF_MSR_APERF = 1, PERF_MSR_MPERF = 2, PERF_MSR_PPERF = 3, PERF_MSR_SMI = 4, PERF_MSR_PTSC = 5, PERF_MSR_IRPERF = 6, PERF_MSR_THERM = 7, PERF_MSR_EVENT_MAX = 8, }; enum perf_pmu_scope { PERF_PMU_SCOPE_NONE = 0, PERF_PMU_SCOPE_CORE = 1, PERF_PMU_SCOPE_DIE = 2, PERF_PMU_SCOPE_CLUSTER = 3, PERF_PMU_SCOPE_PKG = 4, PERF_PMU_SCOPE_SYS_WIDE = 5, PERF_PMU_MAX_SCOPE = 6, }; enum perf_probe_config { PERF_PROBE_CONFIG_IS_RETPROBE = 1, PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32, }; enum perf_rapl_events { PERF_RAPL_PP0 = 0, PERF_RAPL_PKG = 1, PERF_RAPL_RAM = 2, PERF_RAPL_PP1 = 3, PERF_RAPL_PSYS = 4, PERF_RAPL_MAX = 5, NR_RAPL_DOMAINS = 5, }; enum perf_record_ksymbol_type { PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, PERF_RECORD_KSYMBOL_TYPE_BPF = 1, PERF_RECORD_KSYMBOL_TYPE_OOL = 2, PERF_RECORD_KSYMBOL_TYPE_MAX = 3, }; enum perf_sample_regs_abi { PERF_SAMPLE_REGS_ABI_NONE = 0, PERF_SAMPLE_REGS_ABI_32 = 1, PERF_SAMPLE_REGS_ABI_64 = 2, }; enum perf_snb_uncore_imc_freerunning_types { SNB_PCI_UNCORE_IMC_DATA_READS = 0, SNB_PCI_UNCORE_IMC_DATA_WRITES = 1, SNB_PCI_UNCORE_IMC_GT_REQUESTS = 2, SNB_PCI_UNCORE_IMC_IA_REQUESTS = 3, SNB_PCI_UNCORE_IMC_IO_REQUESTS = 4, SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX = 5, }; enum perf_sw_ids { PERF_COUNT_SW_CPU_CLOCK = 0, PERF_COUNT_SW_TASK_CLOCK = 1, PERF_COUNT_SW_PAGE_FAULTS = 2, PERF_COUNT_SW_CONTEXT_SWITCHES = 3, PERF_COUNT_SW_CPU_MIGRATIONS = 4, PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_DUMMY = 9, PERF_COUNT_SW_BPF_OUTPUT = 10, PERF_COUNT_SW_CGROUP_SWITCHES = 11, PERF_COUNT_SW_MAX = 12, }; enum perf_tgl_uncore_imc_freerunning_types { TGL_MMIO_UNCORE_IMC_DATA_TOTAL = 0, TGL_MMIO_UNCORE_IMC_DATA_READ = 1, TGL_MMIO_UNCORE_IMC_DATA_WRITE = 2, TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX = 3, }; enum perf_type_id { PERF_TYPE_HARDWARE = 0, PERF_TYPE_SOFTWARE = 1, PERF_TYPE_TRACEPOINT = 2, PERF_TYPE_HW_CACHE = 3, PERF_TYPE_RAW = 4, PERF_TYPE_BREAKPOINT = 5, PERF_TYPE_MAX = 6, }; enum perf_uncore_icx_iio_freerunning_type_id { ICX_IIO_MSR_IOCLK = 0, ICX_IIO_MSR_BW_IN = 1, ICX_IIO_FREERUNNING_TYPE_MAX = 2, }; enum perf_uncore_icx_imc_freerunning_type_id { ICX_IMC_DCLK = 0, ICX_IMC_DDR = 1, ICX_IMC_DDRT = 2, ICX_IMC_FREERUNNING_TYPE_MAX = 3, }; enum perf_uncore_iio_freerunning_type_id { SKX_IIO_MSR_IOCLK = 0, SKX_IIO_MSR_BW = 1, SKX_IIO_MSR_UTIL = 2, SKX_IIO_FREERUNNING_TYPE_MAX = 3, }; enum perf_uncore_snr_iio_freerunning_type_id { SNR_IIO_MSR_IOCLK = 0, SNR_IIO_MSR_BW_IN = 1, SNR_IIO_FREERUNNING_TYPE_MAX = 2, }; enum perf_uncore_snr_imc_freerunning_type_id { SNR_IMC_DCLK = 0, SNR_IMC_DDR = 1, SNR_IMC_FREERUNNING_TYPE_MAX = 2, }; enum perf_uncore_spr_iio_freerunning_type_id { SPR_IIO_MSR_IOCLK = 0, SPR_IIO_MSR_BW_IN = 1, SPR_IIO_MSR_BW_OUT = 2, SPR_IIO_FREERUNNING_TYPE_MAX = 3, }; enum perf_uncore_spr_imc_freerunning_type_id { SPR_IMC_DCLK = 0, SPR_IMC_PQ_CYCLES = 1, SPR_IMC_FREERUNNING_TYPE_MAX = 2, }; enum pg_level { PG_LEVEL_NONE = 0, PG_LEVEL_4K = 1, PG_LEVEL_2M = 2, PG_LEVEL_1G = 3, PG_LEVEL_512G = 4, PG_LEVEL_256T = 5, PG_LEVEL_NUM = 6, }; enum pgdat_flags { PGDAT_DIRTY = 0, PGDAT_WRITEBACK = 1, PGDAT_RECLAIM_LOCKED = 2, }; enum pgt_entry { NORMAL_PMD = 0, HPAGE_PMD = 1, NORMAL_PUD = 2, HPAGE_PUD = 3, }; enum phy_media { PHY_MEDIA_DEFAULT = 0, PHY_MEDIA_SR = 1, PHY_MEDIA_DAC = 2, }; enum phy_mode { PHY_MODE_INVALID = 0, PHY_MODE_USB_HOST = 1, PHY_MODE_USB_HOST_LS = 2, PHY_MODE_USB_HOST_FS = 3, PHY_MODE_USB_HOST_HS = 4, PHY_MODE_USB_HOST_SS = 5, PHY_MODE_USB_DEVICE = 6, PHY_MODE_USB_DEVICE_LS = 7, PHY_MODE_USB_DEVICE_FS = 8, PHY_MODE_USB_DEVICE_HS = 9, PHY_MODE_USB_DEVICE_SS = 10, PHY_MODE_USB_OTG = 11, PHY_MODE_UFS_HS_A = 12, PHY_MODE_UFS_HS_B = 13, PHY_MODE_PCIE = 14, PHY_MODE_ETHERNET = 15, PHY_MODE_MIPI_DPHY = 16, PHY_MODE_SATA = 17, PHY_MODE_LVDS = 18, PHY_MODE_DP = 19, }; enum phy_state { PHY_DOWN = 0, PHY_READY = 1, PHY_HALTED = 2, PHY_ERROR = 3, PHY_UP = 4, PHY_RUNNING = 5, PHY_NOLINK = 6, PHY_CABLETEST = 7, }; enum phy_tunable_id { ETHTOOL_PHY_ID_UNSPEC = 0, ETHTOOL_PHY_DOWNSHIFT = 1, ETHTOOL_PHY_FAST_LINK_DOWN = 2, ETHTOOL_PHY_EDPD = 3, __ETHTOOL_PHY_TUNABLE_COUNT = 4, }; enum phy_upstream { PHY_UPSTREAM_MAC = 0, PHY_UPSTREAM_PHY = 1, }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_TGID = 1, PIDTYPE_PGID = 2, PIDTYPE_SID = 3, PIDTYPE_MAX = 4, }; enum pkcs7_actions { ACT_pkcs7_check_content_type = 0, ACT_pkcs7_extract_cert = 1, ACT_pkcs7_note_OID = 2, ACT_pkcs7_note_certificate_list = 3, ACT_pkcs7_note_content = 4, ACT_pkcs7_note_data = 5, ACT_pkcs7_note_signed_info = 6, ACT_pkcs7_note_signeddata_version = 7, ACT_pkcs7_note_signerinfo_version = 8, ACT_pkcs7_sig_note_authenticated_attr = 9, ACT_pkcs7_sig_note_digest_algo = 10, ACT_pkcs7_sig_note_issuer = 11, ACT_pkcs7_sig_note_pkey_algo = 12, ACT_pkcs7_sig_note_serial = 13, ACT_pkcs7_sig_note_set_of_authattrs = 14, ACT_pkcs7_sig_note_signature = 15, ACT_pkcs7_sig_note_skid = 16, NR__pkcs7_actions = 17, }; enum pkey_id_type { PKEY_ID_PGP = 0, PKEY_ID_X509 = 1, PKEY_ID_PKCS7 = 2, }; enum pkt_hash_types { PKT_HASH_TYPE_NONE = 0, PKT_HASH_TYPE_L2 = 1, PKT_HASH_TYPE_L3 = 2, PKT_HASH_TYPE_L4 = 3, }; enum pm_qos_flags_status { PM_QOS_FLAGS_UNDEFINED = -1, PM_QOS_FLAGS_NONE = 0, PM_QOS_FLAGS_SOME = 1, PM_QOS_FLAGS_ALL = 2, }; enum pm_qos_req_action { PM_QOS_ADD_REQ = 0, PM_QOS_UPDATE_REQ = 1, PM_QOS_REMOVE_REQ = 2, }; enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2, }; enum pmc_type { KVM_PMC_GP = 0, KVM_PMC_FIXED = 1, }; enum policy_opt { Opt_measure = 0, Opt_dont_measure = 1, Opt_appraise = 2, Opt_dont_appraise = 3, Opt_audit = 4, Opt_hash___2 = 5, Opt_dont_hash = 6, Opt_obj_user = 7, Opt_obj_role = 8, Opt_obj_type = 9, Opt_subj_user = 10, Opt_subj_role = 11, Opt_subj_type = 12, Opt_func = 13, Opt_mask = 14, Opt_fsmagic = 15, Opt_fsname = 16, Opt_fsuuid = 17, Opt_uid_eq = 18, Opt_euid_eq = 19, Opt_gid_eq = 20, Opt_egid_eq = 21, Opt_fowner_eq = 22, Opt_fgroup_eq = 23, Opt_uid_gt = 24, Opt_euid_gt = 25, Opt_gid_gt = 26, Opt_egid_gt = 27, Opt_fowner_gt = 28, Opt_fgroup_gt = 29, Opt_uid_lt = 30, Opt_euid_lt = 31, Opt_gid_lt = 32, Opt_egid_lt = 33, Opt_fowner_lt = 34, Opt_fgroup_lt = 35, Opt_digest_type = 36, Opt_appraise_type = 37, Opt_appraise_flag = 38, Opt_appraise_algos = 39, Opt_permit_directio = 40, Opt_pcr = 41, Opt_template = 42, Opt_keyrings = 43, Opt_label = 44, Opt_err___4 = 45, }; enum policy_rule_list { IMA_DEFAULT_POLICY = 1, IMA_CUSTOM_POLICY = 2, }; enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB = 2, }; enum poll_time_type { PT_TIMEVAL = 0, PT_OLD_TIMEVAL = 1, PT_TIMESPEC = 2, PT_OLD_TIMESPEC = 3, }; enum pool_workqueue_stats { PWQ_STAT_STARTED = 0, PWQ_STAT_COMPLETED = 1, PWQ_STAT_CPU_TIME = 2, PWQ_STAT_CPU_INTENSIVE = 3, PWQ_STAT_CM_WAKEUP = 4, PWQ_STAT_REPATRIATED = 5, PWQ_STAT_MAYDAY = 6, PWQ_STAT_RESCUED = 7, PWQ_NR_STATS = 8, }; enum port_pkey_state { IB_PORT_PKEY_NOT_VALID = 0, IB_PORT_PKEY_VALID = 1, IB_PORT_PKEY_LISTED = 2, }; enum positive_aop_returns { AOP_WRITEPAGE_ACTIVATE = 524288, AOP_TRUNCATED_PAGE = 524289, }; enum power_supply_charge_behaviour { POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0, POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE = 1, POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE = 2, }; enum power_supply_notifier_events { PSY_EVENT_PROP_CHANGED = 0, }; enum power_supply_property { POWER_SUPPLY_PROP_STATUS = 0, POWER_SUPPLY_PROP_CHARGE_TYPE = 1, POWER_SUPPLY_PROP_HEALTH = 2, POWER_SUPPLY_PROP_PRESENT = 3, POWER_SUPPLY_PROP_ONLINE = 4, POWER_SUPPLY_PROP_AUTHENTIC = 5, POWER_SUPPLY_PROP_TECHNOLOGY = 6, POWER_SUPPLY_PROP_CYCLE_COUNT = 7, POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, POWER_SUPPLY_PROP_CURRENT_MAX = 16, POWER_SUPPLY_PROP_CURRENT_NOW = 17, POWER_SUPPLY_PROP_CURRENT_AVG = 18, POWER_SUPPLY_PROP_CURRENT_BOOT = 19, POWER_SUPPLY_PROP_POWER_NOW = 20, POWER_SUPPLY_PROP_POWER_AVG = 21, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, POWER_SUPPLY_PROP_CHARGE_FULL = 24, POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, POWER_SUPPLY_PROP_CHARGE_NOW = 26, POWER_SUPPLY_PROP_CHARGE_AVG = 27, POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD = 35, POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD = 36, POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR = 37, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 38, POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT = 39, POWER_SUPPLY_PROP_INPUT_POWER_LIMIT = 40, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 41, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 42, POWER_SUPPLY_PROP_ENERGY_FULL = 43, POWER_SUPPLY_PROP_ENERGY_EMPTY = 44, POWER_SUPPLY_PROP_ENERGY_NOW = 45, POWER_SUPPLY_PROP_ENERGY_AVG = 46, POWER_SUPPLY_PROP_CAPACITY = 47, POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 48, POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 49, POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN = 50, POWER_SUPPLY_PROP_CAPACITY_LEVEL = 51, POWER_SUPPLY_PROP_TEMP = 52, POWER_SUPPLY_PROP_TEMP_MAX = 53, POWER_SUPPLY_PROP_TEMP_MIN = 54, POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 55, POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 56, POWER_SUPPLY_PROP_TEMP_AMBIENT = 57, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 58, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 59, POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 60, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 61, POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 62, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 63, POWER_SUPPLY_PROP_TYPE = 64, POWER_SUPPLY_PROP_USB_TYPE = 65, POWER_SUPPLY_PROP_SCOPE = 66, POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 67, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 68, POWER_SUPPLY_PROP_CALIBRATE = 69, POWER_SUPPLY_PROP_MANUFACTURE_YEAR = 70, POWER_SUPPLY_PROP_MANUFACTURE_MONTH = 71, POWER_SUPPLY_PROP_MANUFACTURE_DAY = 72, POWER_SUPPLY_PROP_MODEL_NAME = 73, POWER_SUPPLY_PROP_MANUFACTURER = 74, POWER_SUPPLY_PROP_SERIAL_NUMBER = 75, }; enum power_supply_type { POWER_SUPPLY_TYPE_UNKNOWN = 0, POWER_SUPPLY_TYPE_BATTERY = 1, POWER_SUPPLY_TYPE_UPS = 2, POWER_SUPPLY_TYPE_MAINS = 3, POWER_SUPPLY_TYPE_USB = 4, POWER_SUPPLY_TYPE_USB_DCP = 5, POWER_SUPPLY_TYPE_USB_CDP = 6, POWER_SUPPLY_TYPE_USB_ACA = 7, POWER_SUPPLY_TYPE_USB_TYPE_C = 8, POWER_SUPPLY_TYPE_USB_PD = 9, POWER_SUPPLY_TYPE_USB_PD_DRP = 10, POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, POWER_SUPPLY_TYPE_WIRELESS = 12, }; enum power_supply_usb_type { POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, POWER_SUPPLY_USB_TYPE_SDP = 1, POWER_SUPPLY_USB_TYPE_DCP = 2, POWER_SUPPLY_USB_TYPE_CDP = 3, POWER_SUPPLY_USB_TYPE_ACA = 4, POWER_SUPPLY_USB_TYPE_C = 5, POWER_SUPPLY_USB_TYPE_PD = 6, POWER_SUPPLY_USB_TYPE_PD_DRP = 7, POWER_SUPPLY_USB_TYPE_PD_PPS = 8, POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, }; enum pr_type { PR_WRITE_EXCLUSIVE = 1, PR_EXCLUSIVE_ACCESS = 2, PR_WRITE_EXCLUSIVE_REG_ONLY = 3, PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, PR_WRITE_EXCLUSIVE_ALL_REGS = 5, PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, }; enum prep_dispatch { PREP_DISPATCH_OK = 0, PREP_DISPATCH_NO_TAG = 1, PREP_DISPATCH_NO_BUDGET = 2, }; enum print_line_t { TRACE_TYPE_PARTIAL_LINE = 0, TRACE_TYPE_HANDLED = 1, TRACE_TYPE_UNHANDLED = 2, TRACE_TYPE_NO_CONSUME = 3, }; enum printk_info_flags { LOG_NEWLINE = 2, LOG_CONT = 8, }; enum probe_print_type { PROBE_PRINT_NORMAL = 0, PROBE_PRINT_RETURN = 1, PROBE_PRINT_EVENT = 2, }; enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2, }; enum proc_cn_event { PROC_EVENT_NONE = 0, PROC_EVENT_FORK = 1, PROC_EVENT_EXEC = 2, PROC_EVENT_UID = 4, PROC_EVENT_GID = 64, PROC_EVENT_SID = 128, PROC_EVENT_PTRACE = 256, PROC_EVENT_COMM = 512, PROC_EVENT_NONZERO_EXIT = 536870912, PROC_EVENT_COREDUMP = 1073741824, PROC_EVENT_EXIT = 2147483648, }; enum proc_hidepid { HIDEPID_OFF = 0, HIDEPID_NO_ACCESS = 1, HIDEPID_INVISIBLE = 2, HIDEPID_NOT_PTRACEABLE = 4, }; enum proc_mem_force { PROC_MEM_FORCE_ALWAYS = 0, PROC_MEM_FORCE_PTRACE = 1, PROC_MEM_FORCE_NEVER = 2, }; enum proc_param { Opt_gid___5 = 0, Opt_hidepid = 1, Opt_subset = 2, }; enum proc_pidonly { PROC_PIDONLY_OFF = 0, PROC_PIDONLY_ON = 1, }; enum procmap_query_flags { PROCMAP_QUERY_VMA_READABLE = 1, PROCMAP_QUERY_VMA_WRITABLE = 2, PROCMAP_QUERY_VMA_EXECUTABLE = 4, PROCMAP_QUERY_VMA_SHARED = 8, PROCMAP_QUERY_COVERING_OR_NEXT_VMA = 16, PROCMAP_QUERY_FILE_BACKED_VMA = 32, }; enum prs_errcode { PERR_NONE = 0, PERR_INVCPUS = 1, PERR_INVPARENT = 2, PERR_NOTPART = 3, PERR_NOTEXCL = 4, PERR_NOCPUS = 5, PERR_HOTPLUG = 6, PERR_CPUSEMPTY = 7, PERR_HKEEPING = 8, PERR_ACCESS = 9, }; enum ps2_disposition { PS2_PROCESS = 0, PS2_IGNORE = 1, PS2_ERROR = 2, }; enum psmouse_scale { PSMOUSE_SCALE11 = 0, PSMOUSE_SCALE21 = 1, }; enum psmouse_state { PSMOUSE_IGNORE = 0, PSMOUSE_INITIALIZING = 1, PSMOUSE_RESYNCING = 2, PSMOUSE_CMD_MODE = 3, PSMOUSE_ACTIVATED = 4, }; enum psmouse_type { PSMOUSE_NONE = 0, PSMOUSE_PS2 = 1, PSMOUSE_PS2PP = 2, PSMOUSE_THINKPS = 3, PSMOUSE_GENPS = 4, PSMOUSE_IMPS = 5, PSMOUSE_IMEX = 6, PSMOUSE_SYNAPTICS = 7, PSMOUSE_ALPS = 8, PSMOUSE_LIFEBOOK = 9, PSMOUSE_TRACKPOINT = 10, PSMOUSE_TOUCHKIT_PS2 = 11, PSMOUSE_CORTRON = 12, PSMOUSE_HGPK = 13, PSMOUSE_ELANTECH = 14, PSMOUSE_FSP = 15, PSMOUSE_SYNAPTICS_RELATIVE = 16, PSMOUSE_CYPRESS = 17, PSMOUSE_FOCALTECH = 18, PSMOUSE_VMMOUSE = 19, PSMOUSE_BYD = 20, PSMOUSE_SYNAPTICS_SMBUS = 21, PSMOUSE_ELANTECH_SMBUS = 22, PSMOUSE_AUTO = 23, }; enum pt_capabilities { PT_CAP_max_subleaf = 0, PT_CAP_cr3_filtering = 1, PT_CAP_psb_cyc = 2, PT_CAP_ip_filtering = 3, PT_CAP_mtc = 4, PT_CAP_ptwrite = 5, PT_CAP_power_event_trace = 6, PT_CAP_event_trace = 7, PT_CAP_tnt_disable = 8, PT_CAP_topa_output = 9, PT_CAP_topa_multiple_entries = 10, PT_CAP_single_range_output = 11, PT_CAP_output_subsys = 12, PT_CAP_payloads_lip = 13, PT_CAP_num_address_ranges = 14, PT_CAP_mtc_periods = 15, PT_CAP_cycle_thresholds = 16, PT_CAP_psb_periods = 17, }; enum pti_clone_level { PTI_CLONE_PMD = 0, PTI_CLONE_PTE = 1, }; enum pti_mode { PTI_AUTO = 0, PTI_FORCE_OFF = 1, PTI_FORCE_ON = 2, }; enum ptp_clock_events { PTP_CLOCK_ALARM = 0, PTP_CLOCK_EXTTS = 1, PTP_CLOCK_EXTOFF = 2, PTP_CLOCK_PPS = 3, PTP_CLOCK_PPSUSR = 4, }; enum ptp_pin_function { PTP_PF_NONE = 0, PTP_PF_EXTTS = 1, PTP_PF_PEROUT = 2, PTP_PF_PHYSYNC = 3, }; enum qdisc_class_ops_flags { QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, }; enum qdisc_state2_t { __QDISC_STATE2_RUNNING = 0, }; enum qdisc_state_t { __QDISC_STATE_SCHED = 0, __QDISC_STATE_DEACTIVATED = 1, __QDISC_STATE_MISSED = 2, __QDISC_STATE_DRAINING = 3, }; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2, }; enum ramfs_param { Opt_mode___5 = 0, }; enum rapl_unit_quirk { RAPL_UNIT_QUIRK_NONE = 0, RAPL_UNIT_QUIRK_INTEL_HSW = 1, RAPL_UNIT_QUIRK_INTEL_SPR = 2, }; enum rc_driver_type { RC_DRIVER_SCANCODE = 0, RC_DRIVER_IR_RAW = 1, RC_DRIVER_IR_RAW_TX = 2, }; enum rc_filter_type { RC_FILTER_NORMAL = 0, RC_FILTER_WAKEUP = 1, RC_FILTER_MAX = 2, }; enum rc_proto { RC_PROTO_UNKNOWN = 0, RC_PROTO_OTHER = 1, RC_PROTO_RC5 = 2, RC_PROTO_RC5X_20 = 3, RC_PROTO_RC5_SZ = 4, RC_PROTO_JVC = 5, RC_PROTO_SONY12 = 6, RC_PROTO_SONY15 = 7, RC_PROTO_SONY20 = 8, RC_PROTO_NEC = 9, RC_PROTO_NECX = 10, RC_PROTO_NEC32 = 11, RC_PROTO_SANYO = 12, RC_PROTO_MCIR2_KBD = 13, RC_PROTO_MCIR2_MSE = 14, RC_PROTO_RC6_0 = 15, RC_PROTO_RC6_6A_20 = 16, RC_PROTO_RC6_6A_24 = 17, RC_PROTO_RC6_6A_32 = 18, RC_PROTO_RC6_MCE = 19, RC_PROTO_SHARP = 20, RC_PROTO_XMP = 21, RC_PROTO_CEC = 22, RC_PROTO_IMON = 23, RC_PROTO_RCMM12 = 24, RC_PROTO_RCMM24 = 25, RC_PROTO_RCMM32 = 26, RC_PROTO_XBOX_DVD = 27, RC_PROTO_MAX = 27, }; enum rdma_ah_attr_type { RDMA_AH_ATTR_TYPE_UNDEFINED = 0, RDMA_AH_ATTR_TYPE_IB = 1, RDMA_AH_ATTR_TYPE_ROCE = 2, RDMA_AH_ATTR_TYPE_OPA = 3, }; enum rdma_driver_id { RDMA_DRIVER_UNKNOWN = 0, RDMA_DRIVER_MLX5 = 1, RDMA_DRIVER_MLX4 = 2, RDMA_DRIVER_CXGB3 = 3, RDMA_DRIVER_CXGB4 = 4, RDMA_DRIVER_MTHCA = 5, RDMA_DRIVER_BNXT_RE = 6, RDMA_DRIVER_OCRDMA = 7, RDMA_DRIVER_NES = 8, RDMA_DRIVER_I40IW = 9, RDMA_DRIVER_IRDMA = 9, RDMA_DRIVER_VMW_PVRDMA = 10, RDMA_DRIVER_QEDR = 11, RDMA_DRIVER_HNS = 12, RDMA_DRIVER_USNIC = 13, RDMA_DRIVER_RXE = 14, RDMA_DRIVER_HFI1 = 15, RDMA_DRIVER_QIB = 16, RDMA_DRIVER_EFA = 17, RDMA_DRIVER_SIW = 18, RDMA_DRIVER_ERDMA = 19, RDMA_DRIVER_MANA = 20, }; enum rdma_link_layer { IB_LINK_LAYER_UNSPECIFIED = 0, IB_LINK_LAYER_INFINIBAND = 1, IB_LINK_LAYER_ETHERNET = 2, }; enum rdma_netdev_t { RDMA_NETDEV_OPA_VNIC = 0, RDMA_NETDEV_IPOIB = 1, }; enum rdma_nl_counter_mask { RDMA_COUNTER_MASK_QP_TYPE = 1, RDMA_COUNTER_MASK_PID = 2, }; enum rdma_nl_counter_mode { RDMA_COUNTER_MODE_NONE = 0, RDMA_COUNTER_MODE_AUTO = 1, RDMA_COUNTER_MODE_MANUAL = 2, RDMA_COUNTER_MODE_MAX = 3, }; enum rdma_nl_dev_type { RDMA_DEVICE_TYPE_SMI = 1, }; enum rdma_nl_name_assign_type { RDMA_NAME_ASSIGN_TYPE_UNKNOWN = 0, RDMA_NAME_ASSIGN_TYPE_USER = 1, }; enum rdma_restrack_type { RDMA_RESTRACK_PD = 0, RDMA_RESTRACK_CQ = 1, RDMA_RESTRACK_QP = 2, RDMA_RESTRACK_CM_ID = 3, RDMA_RESTRACK_MR = 4, RDMA_RESTRACK_CTX = 5, RDMA_RESTRACK_COUNTER = 6, RDMA_RESTRACK_SRQ = 7, RDMA_RESTRACK_MAX = 8, }; enum reboot_mode { REBOOT_UNDEFINED = -1, REBOOT_COLD = 0, REBOOT_WARM = 1, REBOOT_HARD = 2, REBOOT_SOFT = 3, REBOOT_GPIO = 4, }; enum reboot_type { BOOT_TRIPLE = 116, BOOT_KBD = 107, BOOT_BIOS = 98, BOOT_ACPI = 97, BOOT_EFI = 101, BOOT_CF9_FORCE = 112, BOOT_CF9_SAFE = 113, }; enum refcount_saturation_type { REFCOUNT_ADD_NOT_ZERO_OVF = 0, REFCOUNT_ADD_OVF = 1, REFCOUNT_ADD_UAF = 2, REFCOUNT_SUB_UAF = 3, REFCOUNT_DEC_LEAK = 4, }; enum reg_arg_type { SRC_OP = 0, DST_OP = 1, DST_OP_NO_MARK = 2, }; enum reg_type { REG_TYPE_RM = 0, REG_TYPE_REG = 1, REG_TYPE_INDEX = 2, REG_TYPE_BASE = 3, }; enum regex_type { MATCH_FULL = 0, MATCH_FRONT_ONLY = 1, MATCH_MIDDLE_ONLY = 2, MATCH_END_ONLY = 3, MATCH_GLOB = 4, MATCH_INDEX = 5, }; enum release_type { leaf_only = 0, whole_subtree = 1, }; enum req_flag_bits { __REQ_FAILFAST_DEV = 8, __REQ_FAILFAST_TRANSPORT = 9, __REQ_FAILFAST_DRIVER = 10, __REQ_SYNC = 11, __REQ_META = 12, __REQ_PRIO = 13, __REQ_NOMERGE = 14, __REQ_IDLE = 15, __REQ_INTEGRITY = 16, __REQ_FUA = 17, __REQ_PREFLUSH = 18, __REQ_RAHEAD = 19, __REQ_BACKGROUND = 20, __REQ_NOWAIT = 21, __REQ_POLLED = 22, __REQ_ALLOC_CACHE = 23, __REQ_SWAP = 24, __REQ_DRV = 25, __REQ_FS_PRIVATE = 26, __REQ_ATOMIC = 27, __REQ_NOUNMAP = 28, __REQ_NR_BITS = 29, }; enum req_op { REQ_OP_READ = 0, REQ_OP_WRITE = 1, REQ_OP_FLUSH = 2, REQ_OP_DISCARD = 3, REQ_OP_SECURE_ERASE = 5, REQ_OP_ZONE_APPEND = 7, REQ_OP_WRITE_ZEROES = 9, REQ_OP_ZONE_OPEN = 10, REQ_OP_ZONE_CLOSE = 11, REQ_OP_ZONE_FINISH = 12, REQ_OP_ZONE_RESET = 13, REQ_OP_ZONE_RESET_ALL = 15, REQ_OP_DRV_IN = 34, REQ_OP_DRV_OUT = 35, REQ_OP_LAST = 36, }; enum resctrl_conf_type { CDP_NONE = 0, CDP_CODE = 1, CDP_DATA = 2, }; enum resolve_mode { RESOLVE_TBD = 0, RESOLVE_PTR = 1, RESOLVE_STRUCT_OR_ARRAY = 2, }; enum retbleed_mitigation { RETBLEED_MITIGATION_NONE = 0, RETBLEED_MITIGATION_UNRET = 1, RETBLEED_MITIGATION_IBPB = 2, RETBLEED_MITIGATION_IBRS = 3, RETBLEED_MITIGATION_EIBRS = 4, RETBLEED_MITIGATION_STUFF = 5, }; enum retbleed_mitigation_cmd { RETBLEED_CMD_OFF = 0, RETBLEED_CMD_AUTO = 1, RETBLEED_CMD_UNRET = 2, RETBLEED_CMD_IBPB = 3, RETBLEED_CMD_STUFF = 4, }; enum rfds_mitigations { RFDS_MITIGATION_OFF = 0, RFDS_MITIGATION_VERW = 1, RFDS_MITIGATION_UCODE_NEEDED = 2, }; enum ring_buffer_flags { RB_FL_OVERWRITE = 1, }; enum ring_buffer_type { RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, RINGBUF_TYPE_PADDING = 29, RINGBUF_TYPE_TIME_EXTEND = 30, RINGBUF_TYPE_TIME_STAMP = 31, }; enum rlimit_type { UCOUNT_RLIMIT_NPROC = 0, UCOUNT_RLIMIT_MSGQUEUE = 1, UCOUNT_RLIMIT_SIGPENDING = 2, UCOUNT_RLIMIT_MEMLOCK = 3, UCOUNT_RLIMIT_COUNTS = 4, }; enum rmap_level { RMAP_LEVEL_PTE = 0, RMAP_LEVEL_PMD = 1, }; enum rmp_flags { RMP_LOCKED = 1, RMP_USE_SHARED_ZEROPAGE = 2, }; enum rp_check { RP_CHECK_CALL = 0, RP_CHECK_CHAIN_CALL = 1, RP_CHECK_RET = 2, }; enum rpc_display_format_t { RPC_DISPLAY_ADDR = 0, RPC_DISPLAY_PORT = 1, RPC_DISPLAY_PROTO = 2, RPC_DISPLAY_HEX_ADDR = 3, RPC_DISPLAY_HEX_PORT = 4, RPC_DISPLAY_NETID = 5, RPC_DISPLAY_MAX = 6, }; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4, }; enum rpm_status { RPM_INVALID = -1, RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3, }; enum rq_end_io_ret { RQ_END_IO_NONE = 0, RQ_END_IO_FREE = 1, }; enum rq_qos_id { RQ_QOS_WBT = 0, RQ_QOS_LATENCY = 1, RQ_QOS_COST = 2, }; enum rsaprivkey_actions { ACT_rsa_get_d = 0, ACT_rsa_get_dp = 1, ACT_rsa_get_dq = 2, ACT_rsa_get_e = 3, ACT_rsa_get_n = 4, ACT_rsa_get_p = 5, ACT_rsa_get_q = 6, ACT_rsa_get_qinv = 7, NR__rsaprivkey_actions = 8, }; enum rsapubkey_actions { ACT_rsa_get_e___2 = 0, ACT_rsa_get_n___2 = 1, NR__rsapubkey_actions = 2, }; enum rseq_cpu_id_state { RSEQ_CPU_ID_UNINITIALIZED = -1, RSEQ_CPU_ID_REGISTRATION_FAILED = -2, }; enum rseq_cs_flags { RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1, RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2, RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4, }; enum rseq_cs_flags_bit { RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, }; enum rseq_event_mask_bits { RSEQ_EVENT_PREEMPT_BIT = 0, RSEQ_EVENT_SIGNAL_BIT = 1, RSEQ_EVENT_MIGRATE_BIT = 2, }; enum rseq_flags { RSEQ_FLAG_UNREGISTER = 1, }; enum rt6_nud_state { RT6_NUD_FAIL_HARD = -3, RT6_NUD_FAIL_PROBE = -2, RT6_NUD_FAIL_DO_RR = -1, RT6_NUD_SUCCEED = 1, }; enum rt_class_t { RT_TABLE_UNSPEC = 0, RT_TABLE_COMPAT = 252, RT_TABLE_DEFAULT = 253, RT_TABLE_MAIN = 254, RT_TABLE_LOCAL = 255, RT_TABLE_MAX = 4294967295, }; enum rt_scope_t { RT_SCOPE_UNIVERSE = 0, RT_SCOPE_SITE = 200, RT_SCOPE_LINK = 253, RT_SCOPE_HOST = 254, RT_SCOPE_NOWHERE = 255, }; enum rtattr_type_t { RTA_UNSPEC = 0, RTA_DST = 1, RTA_SRC = 2, RTA_IIF = 3, RTA_OIF = 4, RTA_GATEWAY = 5, RTA_PRIORITY = 6, RTA_PREFSRC = 7, RTA_METRICS = 8, RTA_MULTIPATH = 9, RTA_PROTOINFO = 10, RTA_FLOW = 11, RTA_CACHEINFO = 12, RTA_SESSION = 13, RTA_MP_ALGO = 14, RTA_TABLE = 15, RTA_MARK = 16, RTA_MFC_STATS = 17, RTA_VIA = 18, RTA_NEWDST = 19, RTA_PREF = 20, RTA_ENCAP_TYPE = 21, RTA_ENCAP = 22, RTA_EXPIRES = 23, RTA_PAD = 24, RTA_UID = 25, RTA_TTL_PROPAGATE = 26, RTA_IP_PROTO = 27, RTA_SPORT = 28, RTA_DPORT = 29, RTA_NH_ID = 30, __RTA_MAX = 31, }; enum rtmutex_chainwalk { RT_MUTEX_MIN_CHAINWALK = 0, RT_MUTEX_FULL_CHAINWALK = 1, }; enum rtnetlink_groups { RTNLGRP_NONE = 0, RTNLGRP_LINK = 1, RTNLGRP_NOTIFY = 2, RTNLGRP_NEIGH = 3, RTNLGRP_TC = 4, RTNLGRP_IPV4_IFADDR = 5, RTNLGRP_IPV4_MROUTE = 6, RTNLGRP_IPV4_ROUTE = 7, RTNLGRP_IPV4_RULE = 8, RTNLGRP_IPV6_IFADDR = 9, RTNLGRP_IPV6_MROUTE = 10, RTNLGRP_IPV6_ROUTE = 11, RTNLGRP_IPV6_IFINFO = 12, RTNLGRP_DECnet_IFADDR = 13, RTNLGRP_NOP2 = 14, RTNLGRP_DECnet_ROUTE = 15, RTNLGRP_DECnet_RULE = 16, RTNLGRP_NOP4 = 17, RTNLGRP_IPV6_PREFIX = 18, RTNLGRP_IPV6_RULE = 19, RTNLGRP_ND_USEROPT = 20, RTNLGRP_PHONET_IFADDR = 21, RTNLGRP_PHONET_ROUTE = 22, RTNLGRP_DCB = 23, RTNLGRP_IPV4_NETCONF = 24, RTNLGRP_IPV6_NETCONF = 25, RTNLGRP_MDB = 26, RTNLGRP_MPLS_ROUTE = 27, RTNLGRP_NSID = 28, RTNLGRP_MPLS_NETCONF = 29, RTNLGRP_IPV4_MROUTE_R = 30, RTNLGRP_IPV6_MROUTE_R = 31, RTNLGRP_NEXTHOP = 32, RTNLGRP_BRVLAN = 33, RTNLGRP_MCTP_IFADDR = 34, RTNLGRP_TUNNEL = 35, RTNLGRP_STATS = 36, __RTNLGRP_MAX = 37, }; enum rtnl_kinds { RTNL_KIND_NEW = 0, RTNL_KIND_DEL = 1, RTNL_KIND_GET = 2, RTNL_KIND_SET = 3, }; enum rtnl_link_flags { RTNL_FLAG_DOIT_UNLOCKED = 1, RTNL_FLAG_BULK_DEL_SUPPORTED = 2, RTNL_FLAG_DUMP_UNLOCKED = 4, RTNL_FLAG_DUMP_SPLIT_NLM_DONE = 8, }; enum rw_hint { WRITE_LIFE_NOT_SET = 0, WRITE_LIFE_NONE = 1, WRITE_LIFE_SHORT = 2, WRITE_LIFE_MEDIUM = 3, WRITE_LIFE_LONG = 4, WRITE_LIFE_EXTREME = 5, } __attribute__((mode(byte))); enum rwsem_waiter_type { RWSEM_WAITING_FOR_WRITE = 0, RWSEM_WAITING_FOR_READ = 1, }; enum rwsem_wake_type { RWSEM_WAKE_ANY = 0, RWSEM_WAKE_READERS = 1, RWSEM_WAKE_READ_OWNED = 2, }; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3, }; typedef enum rx_handler_result rx_handler_result_t; enum s2idle_states { S2IDLE_STATE_NONE = 0, S2IDLE_STATE_ENTER = 1, S2IDLE_STATE_WAKE = 2, }; enum s_alloc { sa_rootdomain = 0, sa_sd = 1, sa_sd_storage = 2, sa_none = 3, }; enum scan_balance { SCAN_EQUAL = 0, SCAN_FRACT = 1, SCAN_ANON = 2, SCAN_FILE = 3, }; enum scan_result { SCAN_FAIL = 0, SCAN_SUCCEED = 1, SCAN_PMD_NULL = 2, SCAN_PMD_NONE = 3, SCAN_PMD_MAPPED = 4, SCAN_EXCEED_NONE_PTE = 5, SCAN_EXCEED_SWAP_PTE = 6, SCAN_EXCEED_SHARED_PTE = 7, SCAN_PTE_NON_PRESENT = 8, SCAN_PTE_UFFD_WP = 9, SCAN_PTE_MAPPED_HUGEPAGE = 10, SCAN_PAGE_RO = 11, SCAN_LACK_REFERENCED_PAGE = 12, SCAN_PAGE_NULL = 13, SCAN_SCAN_ABORT = 14, SCAN_PAGE_COUNT = 15, SCAN_PAGE_LRU = 16, SCAN_PAGE_LOCK = 17, SCAN_PAGE_ANON = 18, SCAN_PAGE_COMPOUND = 19, SCAN_ANY_PROCESS = 20, SCAN_VMA_NULL = 21, SCAN_VMA_CHECK = 22, SCAN_ADDRESS_RANGE = 23, SCAN_DEL_PAGE_LRU = 24, SCAN_ALLOC_HUGE_PAGE_FAIL = 25, SCAN_CGROUP_CHARGE_FAIL = 26, SCAN_TRUNCATED = 27, SCAN_PAGE_HAS_PRIVATE = 28, SCAN_STORE_FAILED = 29, SCAN_COPY_MC = 30, SCAN_PAGE_FILLED = 31, }; enum sched_tunable_scaling { SCHED_TUNABLESCALING_NONE = 0, SCHED_TUNABLESCALING_LOG = 1, SCHED_TUNABLESCALING_LINEAR = 2, SCHED_TUNABLESCALING_END = 3, }; enum scsi_device_event { SDEV_EVT_MEDIA_CHANGE = 1, SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2, SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3, SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4, SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5, SDEV_EVT_LUN_CHANGE_REPORTED = 6, SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7, SDEV_EVT_POWER_ON_RESET_OCCURRED = 8, SDEV_EVT_FIRST = 1, SDEV_EVT_LAST = 8, SDEV_EVT_MAXBITS = 9, }; enum sctp_cid { SCTP_CID_DATA = 0, SCTP_CID_INIT = 1, SCTP_CID_INIT_ACK = 2, SCTP_CID_SACK = 3, SCTP_CID_HEARTBEAT = 4, SCTP_CID_HEARTBEAT_ACK = 5, SCTP_CID_ABORT = 6, SCTP_CID_SHUTDOWN = 7, SCTP_CID_SHUTDOWN_ACK = 8, SCTP_CID_ERROR = 9, SCTP_CID_COOKIE_ECHO = 10, SCTP_CID_COOKIE_ACK = 11, SCTP_CID_ECN_ECNE = 12, SCTP_CID_ECN_CWR = 13, SCTP_CID_SHUTDOWN_COMPLETE = 14, SCTP_CID_AUTH = 15, SCTP_CID_I_DATA = 64, SCTP_CID_FWD_TSN = 192, SCTP_CID_ASCONF = 193, SCTP_CID_I_FWD_TSN = 194, SCTP_CID_ASCONF_ACK = 128, SCTP_CID_RECONF = 130, SCTP_CID_PAD = 132, }; enum sctp_conntrack { SCTP_CONNTRACK_NONE = 0, SCTP_CONNTRACK_CLOSED = 1, SCTP_CONNTRACK_COOKIE_WAIT = 2, SCTP_CONNTRACK_COOKIE_ECHOED = 3, SCTP_CONNTRACK_ESTABLISHED = 4, SCTP_CONNTRACK_SHUTDOWN_SENT = 5, SCTP_CONNTRACK_SHUTDOWN_RECD = 6, SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, SCTP_CONNTRACK_HEARTBEAT_SENT = 8, SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, SCTP_CONNTRACK_MAX = 10, }; enum sctp_endpoint_type { SCTP_EP_TYPE_SOCKET = 0, SCTP_EP_TYPE_ASSOCIATION = 1, }; enum sctp_event_timeout { SCTP_EVENT_TIMEOUT_NONE = 0, SCTP_EVENT_TIMEOUT_T1_COOKIE = 1, SCTP_EVENT_TIMEOUT_T1_INIT = 2, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN = 3, SCTP_EVENT_TIMEOUT_T3_RTX = 4, SCTP_EVENT_TIMEOUT_T4_RTO = 5, SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD = 6, SCTP_EVENT_TIMEOUT_HEARTBEAT = 7, SCTP_EVENT_TIMEOUT_RECONF = 8, SCTP_EVENT_TIMEOUT_PROBE = 9, SCTP_EVENT_TIMEOUT_SACK = 10, SCTP_EVENT_TIMEOUT_AUTOCLOSE = 11, }; enum sctp_msg_flags { MSG_NOTIFICATION = 32768, }; enum sctp_param { SCTP_PARAM_HEARTBEAT_INFO = 256, SCTP_PARAM_IPV4_ADDRESS = 1280, SCTP_PARAM_IPV6_ADDRESS = 1536, SCTP_PARAM_STATE_COOKIE = 1792, SCTP_PARAM_UNRECOGNIZED_PARAMETERS = 2048, SCTP_PARAM_COOKIE_PRESERVATIVE = 2304, SCTP_PARAM_HOST_NAME_ADDRESS = 2816, SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = 3072, SCTP_PARAM_ECN_CAPABLE = 128, SCTP_PARAM_RANDOM = 640, SCTP_PARAM_CHUNKS = 896, SCTP_PARAM_HMAC_ALGO = 1152, SCTP_PARAM_SUPPORTED_EXT = 2176, SCTP_PARAM_FWD_TSN_SUPPORT = 192, SCTP_PARAM_ADD_IP = 448, SCTP_PARAM_DEL_IP = 704, SCTP_PARAM_ERR_CAUSE = 960, SCTP_PARAM_SET_PRIMARY = 1216, SCTP_PARAM_SUCCESS_REPORT = 1472, SCTP_PARAM_ADAPTATION_LAYER_IND = 1728, SCTP_PARAM_RESET_OUT_REQUEST = 3328, SCTP_PARAM_RESET_IN_REQUEST = 3584, SCTP_PARAM_RESET_TSN_REQUEST = 3840, SCTP_PARAM_RESET_RESPONSE = 4096, SCTP_PARAM_RESET_ADD_OUT_STREAMS = 4352, SCTP_PARAM_RESET_ADD_IN_STREAMS = 4608, }; enum sctp_scope { SCTP_SCOPE_GLOBAL = 0, SCTP_SCOPE_PRIVATE = 1, SCTP_SCOPE_LINK = 2, SCTP_SCOPE_LOOPBACK = 3, SCTP_SCOPE_UNUSABLE = 4, }; enum sctp_socket_type { SCTP_SOCKET_UDP = 0, SCTP_SOCKET_UDP_HIGH_BANDWIDTH = 1, SCTP_SOCKET_TCP = 2, }; enum sctp_state { SCTP_STATE_CLOSED = 0, SCTP_STATE_COOKIE_WAIT = 1, SCTP_STATE_COOKIE_ECHOED = 2, SCTP_STATE_ESTABLISHED = 3, SCTP_STATE_SHUTDOWN_PENDING = 4, SCTP_STATE_SHUTDOWN_SENT = 5, SCTP_STATE_SHUTDOWN_RECEIVED = 6, SCTP_STATE_SHUTDOWN_ACK_SENT = 7, }; enum seg6_end_dt_mode { DT_INVALID_MODE = -22, DT_LEGACY_MODE = 0, DT_VRF_MODE = 1, }; enum seg6_local_flv_action { SEG6_LOCAL_FLV_ACT_UNSPEC = 0, SEG6_LOCAL_FLV_ACT_END = 1, SEG6_LOCAL_FLV_ACT_PSP = 2, SEG6_LOCAL_FLV_ACT_USP = 3, SEG6_LOCAL_FLV_ACT_USD = 4, __SEG6_LOCAL_FLV_ACT_MAX = 5, }; enum seg6_local_pktinfo { SEG6_LOCAL_PKTINFO_NOHDR = 0, SEG6_LOCAL_PKTINFO_SL_ZERO = 1, SEG6_LOCAL_PKTINFO_SL_ONE = 2, SEG6_LOCAL_PKTINFO_SL_MORE = 3, __SEG6_LOCAL_PKTINFO_MAX = 4, }; enum sel_inos { SEL_ROOT_INO = 2, SEL_LOAD = 3, SEL_ENFORCE = 4, SEL_CONTEXT = 5, SEL_ACCESS = 6, SEL_CREATE = 7, SEL_RELABEL = 8, SEL_USER = 9, SEL_POLICYVERS = 10, SEL_COMMIT_BOOLS = 11, SEL_MLS = 12, SEL_DISABLE = 13, SEL_MEMBER = 14, SEL_CHECKREQPROT = 15, SEL_COMPAT_NET = 16, SEL_REJECT_UNKNOWN = 17, SEL_DENY_UNKNOWN = 18, SEL_STATUS = 19, SEL_POLICY = 20, SEL_VALIDATE_TRANS = 21, SEL_INO_NEXT = 22, }; enum selinux_nlgroups { SELNLGRP_NONE = 0, SELNLGRP_AVC = 1, __SELNLGRP_MAX = 2, }; enum ser { SER_REQUIRED = 1, NO_SER = 2, }; enum serio_event_type { SERIO_RESCAN_PORT = 0, SERIO_RECONNECT_PORT = 1, SERIO_RECONNECT_SUBTREE = 2, SERIO_REGISTER_PORT = 3, SERIO_ATTACH_DRIVER = 4, }; enum severity_level { MCE_NO_SEVERITY = 0, MCE_DEFERRED_SEVERITY = 1, MCE_UCNA_SEVERITY = 1, MCE_KEEP_SEVERITY = 2, MCE_SOME_SEVERITY = 3, MCE_AO_SEVERITY = 4, MCE_UC_SEVERITY = 5, MCE_AR_SEVERITY = 6, MCE_PANIC_SEVERITY = 7, }; enum sgp_type { SGP_READ = 0, SGP_NOALLOC = 1, SGP_CACHE = 2, SGP_WRITE = 3, SGP_FALLOC = 4, }; enum shmem_param { Opt_gid___6 = 0, Opt_huge = 1, Opt_mode___6 = 2, Opt_mpol = 3, Opt_nr_blocks = 4, Opt_nr_inodes___2 = 5, Opt_size___2 = 6, Opt_uid___5 = 7, Opt_inode32 = 8, Opt_inode64 = 9, Opt_noswap = 10, Opt_quota___2 = 11, Opt_usrquota___2 = 12, Opt_grpquota___2 = 13, Opt_usrquota_block_hardlimit = 14, Opt_usrquota_inode_hardlimit = 15, Opt_grpquota_block_hardlimit = 16, Opt_grpquota_inode_hardlimit = 17, }; enum show_regs_mode { SHOW_REGS_SHORT = 0, SHOW_REGS_USER = 1, SHOW_REGS_ALL = 2, }; enum sig_handler { HANDLER_CURRENT = 0, HANDLER_SIG_DFL = 1, HANDLER_EXIT = 2, }; enum siginfo_layout { SIL_KILL = 0, SIL_TIMER = 1, SIL_POLL = 2, SIL_FAULT = 3, SIL_FAULT_TRAPNO = 4, SIL_FAULT_MCEERR = 5, SIL_FAULT_BNDERR = 6, SIL_FAULT_PKUERR = 7, SIL_FAULT_PERF_EVENT = 8, SIL_CHLD = 9, SIL_RT = 10, SIL_SYS = 11, }; enum sk_action { SK_DROP = 0, SK_PASS = 1, }; enum sk_pacing { SK_PACING_NONE = 0, SK_PACING_NEEDED = 1, SK_PACING_FQ = 2, }; enum sk_psock_state_bits { SK_PSOCK_TX_ENABLED = 0, SK_PSOCK_RX_STRP_ENABLED = 1, }; enum sk_rst_reason { SK_RST_REASON_NOT_SPECIFIED = 0, SK_RST_REASON_NO_SOCKET = 1, SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE = 2, SK_RST_REASON_TCP_RFC7323_PAWS = 3, SK_RST_REASON_TCP_TOO_OLD_ACK = 4, SK_RST_REASON_TCP_ACK_UNSENT_DATA = 5, SK_RST_REASON_TCP_FLAGS = 6, SK_RST_REASON_TCP_OLD_ACK = 7, SK_RST_REASON_TCP_ABORT_ON_DATA = 8, SK_RST_REASON_TCP_TIMEWAIT_SOCKET = 9, SK_RST_REASON_INVALID_SYN = 10, SK_RST_REASON_TCP_ABORT_ON_CLOSE = 11, SK_RST_REASON_TCP_ABORT_ON_LINGER = 12, SK_RST_REASON_TCP_ABORT_ON_MEMORY = 13, SK_RST_REASON_TCP_STATE = 14, SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT = 15, SK_RST_REASON_TCP_DISCONNECT_WITH_DATA = 16, SK_RST_REASON_MPTCP_RST_EUNSPEC = 17, SK_RST_REASON_MPTCP_RST_EMPTCP = 18, SK_RST_REASON_MPTCP_RST_ERESOURCE = 19, SK_RST_REASON_MPTCP_RST_EPROHIBIT = 20, SK_RST_REASON_MPTCP_RST_EWQ2BIG = 21, SK_RST_REASON_MPTCP_RST_EBADPERF = 22, SK_RST_REASON_MPTCP_RST_EMIDDLEBOX = 23, SK_RST_REASON_ERROR = 24, SK_RST_REASON_MAX = 25, }; enum skb_drop_reason { SKB_NOT_DROPPED_YET = 0, SKB_CONSUMED = 1, SKB_DROP_REASON_NOT_SPECIFIED = 2, SKB_DROP_REASON_NO_SOCKET = 3, SKB_DROP_REASON_PKT_TOO_SMALL = 4, SKB_DROP_REASON_TCP_CSUM = 5, SKB_DROP_REASON_SOCKET_FILTER = 6, SKB_DROP_REASON_UDP_CSUM = 7, SKB_DROP_REASON_NETFILTER_DROP = 8, SKB_DROP_REASON_OTHERHOST = 9, SKB_DROP_REASON_IP_CSUM = 10, SKB_DROP_REASON_IP_INHDR = 11, SKB_DROP_REASON_IP_RPFILTER = 12, SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST = 13, SKB_DROP_REASON_XFRM_POLICY = 14, SKB_DROP_REASON_IP_NOPROTO = 15, SKB_DROP_REASON_SOCKET_RCVBUFF = 16, SKB_DROP_REASON_PROTO_MEM = 17, SKB_DROP_REASON_TCP_AUTH_HDR = 18, SKB_DROP_REASON_TCP_MD5NOTFOUND = 19, SKB_DROP_REASON_TCP_MD5UNEXPECTED = 20, SKB_DROP_REASON_TCP_MD5FAILURE = 21, SKB_DROP_REASON_TCP_AONOTFOUND = 22, SKB_DROP_REASON_TCP_AOUNEXPECTED = 23, SKB_DROP_REASON_TCP_AOKEYNOTFOUND = 24, SKB_DROP_REASON_TCP_AOFAILURE = 25, SKB_DROP_REASON_SOCKET_BACKLOG = 26, SKB_DROP_REASON_TCP_FLAGS = 27, SKB_DROP_REASON_TCP_ABORT_ON_DATA = 28, SKB_DROP_REASON_TCP_ZEROWINDOW = 29, SKB_DROP_REASON_TCP_OLD_DATA = 30, SKB_DROP_REASON_TCP_OVERWINDOW = 31, SKB_DROP_REASON_TCP_OFOMERGE = 32, SKB_DROP_REASON_TCP_RFC7323_PAWS = 33, SKB_DROP_REASON_TCP_OLD_SEQUENCE = 34, SKB_DROP_REASON_TCP_INVALID_SEQUENCE = 35, SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE = 36, SKB_DROP_REASON_TCP_RESET = 37, SKB_DROP_REASON_TCP_INVALID_SYN = 38, SKB_DROP_REASON_TCP_CLOSE = 39, SKB_DROP_REASON_TCP_FASTOPEN = 40, SKB_DROP_REASON_TCP_OLD_ACK = 41, SKB_DROP_REASON_TCP_TOO_OLD_ACK = 42, SKB_DROP_REASON_TCP_ACK_UNSENT_DATA = 43, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE = 44, SKB_DROP_REASON_TCP_OFO_DROP = 45, SKB_DROP_REASON_IP_OUTNOROUTES = 46, SKB_DROP_REASON_BPF_CGROUP_EGRESS = 47, SKB_DROP_REASON_IPV6DISABLED = 48, SKB_DROP_REASON_NEIGH_CREATEFAIL = 49, SKB_DROP_REASON_NEIGH_FAILED = 50, SKB_DROP_REASON_NEIGH_QUEUEFULL = 51, SKB_DROP_REASON_NEIGH_DEAD = 52, SKB_DROP_REASON_TC_EGRESS = 53, SKB_DROP_REASON_SECURITY_HOOK = 54, SKB_DROP_REASON_QDISC_DROP = 55, SKB_DROP_REASON_CPU_BACKLOG = 56, SKB_DROP_REASON_XDP = 57, SKB_DROP_REASON_TC_INGRESS = 58, SKB_DROP_REASON_UNHANDLED_PROTO = 59, SKB_DROP_REASON_SKB_CSUM = 60, SKB_DROP_REASON_SKB_GSO_SEG = 61, SKB_DROP_REASON_SKB_UCOPY_FAULT = 62, SKB_DROP_REASON_DEV_HDR = 63, SKB_DROP_REASON_DEV_READY = 64, SKB_DROP_REASON_FULL_RING = 65, SKB_DROP_REASON_NOMEM = 66, SKB_DROP_REASON_HDR_TRUNC = 67, SKB_DROP_REASON_TAP_FILTER = 68, SKB_DROP_REASON_TAP_TXFILTER = 69, SKB_DROP_REASON_ICMP_CSUM = 70, SKB_DROP_REASON_INVALID_PROTO = 71, SKB_DROP_REASON_IP_INADDRERRORS = 72, SKB_DROP_REASON_IP_INNOROUTES = 73, SKB_DROP_REASON_PKT_TOO_BIG = 74, SKB_DROP_REASON_DUP_FRAG = 75, SKB_DROP_REASON_FRAG_REASM_TIMEOUT = 76, SKB_DROP_REASON_FRAG_TOO_FAR = 77, SKB_DROP_REASON_TCP_MINTTL = 78, SKB_DROP_REASON_IPV6_BAD_EXTHDR = 79, SKB_DROP_REASON_IPV6_NDISC_FRAG = 80, SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT = 81, SKB_DROP_REASON_IPV6_NDISC_BAD_CODE = 82, SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS = 83, SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST = 84, SKB_DROP_REASON_QUEUE_PURGE = 85, SKB_DROP_REASON_TC_COOKIE_ERROR = 86, SKB_DROP_REASON_PACKET_SOCK_ERROR = 87, SKB_DROP_REASON_TC_CHAIN_NOTFOUND = 88, SKB_DROP_REASON_TC_RECLASSIFY_LOOP = 89, SKB_DROP_REASON_MAX = 90, SKB_DROP_REASON_SUBSYS_MASK = 4294901760, }; enum skb_drop_reason_subsys { SKB_DROP_REASON_SUBSYS_CORE = 0, SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE = 1, SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR = 2, SKB_DROP_REASON_SUBSYS_OPENVSWITCH = 3, SKB_DROP_REASON_SUBSYS_NUM = 4, }; enum skb_ext_id { SKB_EXT_SEC_PATH = 0, TC_SKB_EXT = 1, SKB_EXT_MPTCP = 2, SKB_EXT_NUM = 3, }; enum skb_tstamp_type { SKB_CLOCK_REALTIME = 0, SKB_CLOCK_MONOTONIC = 1, SKB_CLOCK_TAI = 2, __SKB_CLOCK_MAX = 2, }; enum sknetlink_groups { SKNLGRP_NONE = 0, SKNLGRP_INET_TCP_DESTROY = 1, SKNLGRP_INET_UDP_DESTROY = 2, SKNLGRP_INET6_TCP_DESTROY = 3, SKNLGRP_INET6_UDP_DESTROY = 4, __SKNLGRP_MAX = 5, }; enum slab_stat_type { SL_ALL = 0, SL_PARTIAL = 1, SL_CPU = 2, SL_OBJECTS = 3, SL_TOTAL = 4, }; enum slab_state { DOWN = 0, PARTIAL = 1, UP = 2, FULL = 3, }; enum smbios_attr_enum { SMBIOS_ATTR_NONE = 0, SMBIOS_ATTR_LABEL_SHOW = 1, SMBIOS_ATTR_INSTANCE_SHOW = 2, }; enum smca_bank_types { SMCA_LS = 0, SMCA_LS_V2 = 1, SMCA_IF = 2, SMCA_L2_CACHE = 3, SMCA_DE = 4, SMCA_RESERVED = 5, SMCA_EX = 6, SMCA_FP = 7, SMCA_L3_CACHE = 8, SMCA_CS = 9, SMCA_CS_V2 = 10, SMCA_PIE = 11, SMCA_UMC = 12, SMCA_UMC_V2 = 13, SMCA_MA_LLC = 14, SMCA_PB = 15, SMCA_PSP = 16, SMCA_PSP_V2 = 17, SMCA_SMU = 18, SMCA_SMU_V2 = 19, SMCA_MP5 = 20, SMCA_MPDMA = 21, SMCA_NBIO = 22, SMCA_PCIE = 23, SMCA_PCIE_V2 = 24, SMCA_XGMI_PCS = 25, SMCA_NBIF = 26, SMCA_SHUB = 27, SMCA_SATA = 28, SMCA_USB = 29, SMCA_USR_DP = 30, SMCA_USR_CP = 31, SMCA_GMI_PCS = 32, SMCA_XGMI_PHY = 33, SMCA_WAFL_PHY = 34, SMCA_GMI_PHY = 35, N_SMCA_BANK_TYPES = 36, }; enum sock_flags { SOCK_DEAD = 0, SOCK_DONE = 1, SOCK_URGINLINE = 2, SOCK_KEEPOPEN = 3, SOCK_LINGER = 4, SOCK_DESTROY = 5, SOCK_BROADCAST = 6, SOCK_TIMESTAMP = 7, SOCK_ZAPPED = 8, SOCK_USE_WRITE_QUEUE = 9, SOCK_DBG = 10, SOCK_RCVTSTAMP = 11, SOCK_RCVTSTAMPNS = 12, SOCK_LOCALROUTE = 13, SOCK_MEMALLOC = 14, SOCK_TIMESTAMPING_RX_SOFTWARE = 15, SOCK_FASYNC = 16, SOCK_RXQ_OVFL = 17, SOCK_ZEROCOPY = 18, SOCK_WIFI_STATUS = 19, SOCK_NOFCS = 20, SOCK_FILTER_LOCKED = 21, SOCK_SELECT_ERR_QUEUE = 22, SOCK_RCU_FREE = 23, SOCK_TXTIME = 24, SOCK_XDP = 25, SOCK_TSTAMP_NEW = 26, SOCK_RCVMARK = 27, }; enum sock_shutdown_cmd { SHUT_RD = 0, SHUT_WR = 1, SHUT_RDWR = 2, }; enum sock_type { SOCK_STREAM = 1, SOCK_DGRAM = 2, SOCK_RAW = 3, SOCK_RDM = 4, SOCK_SEQPACKET = 5, SOCK_DCCP = 6, SOCK_PACKET = 10, }; enum special_kfunc_type { KF_bpf_obj_new_impl = 0, KF_bpf_obj_drop_impl = 1, KF_bpf_refcount_acquire_impl = 2, KF_bpf_list_push_front_impl = 3, KF_bpf_list_push_back_impl = 4, KF_bpf_list_pop_front = 5, KF_bpf_list_pop_back = 6, KF_bpf_cast_to_kern_ctx = 7, KF_bpf_rdonly_cast = 8, KF_bpf_rcu_read_lock = 9, KF_bpf_rcu_read_unlock = 10, KF_bpf_rbtree_remove = 11, KF_bpf_rbtree_add_impl = 12, KF_bpf_rbtree_first = 13, KF_bpf_dynptr_from_skb = 14, KF_bpf_dynptr_from_xdp = 15, KF_bpf_dynptr_slice = 16, KF_bpf_dynptr_slice_rdwr = 17, KF_bpf_dynptr_clone = 18, KF_bpf_percpu_obj_new_impl = 19, KF_bpf_percpu_obj_drop_impl = 20, KF_bpf_throw = 21, KF_bpf_wq_set_callback_impl = 22, KF_bpf_preempt_disable = 23, KF_bpf_preempt_enable = 24, KF_bpf_iter_css_task_new = 25, KF_bpf_session_cookie = 26, }; enum spectre_v1_mitigation { SPECTRE_V1_MITIGATION_NONE = 0, SPECTRE_V1_MITIGATION_AUTO = 1, }; enum spectre_v2_mitigation { SPECTRE_V2_NONE = 0, SPECTRE_V2_RETPOLINE = 1, SPECTRE_V2_LFENCE = 2, SPECTRE_V2_EIBRS = 3, SPECTRE_V2_EIBRS_RETPOLINE = 4, SPECTRE_V2_EIBRS_LFENCE = 5, SPECTRE_V2_IBRS = 6, }; enum spectre_v2_mitigation_cmd { SPECTRE_V2_CMD_NONE = 0, SPECTRE_V2_CMD_AUTO = 1, SPECTRE_V2_CMD_FORCE = 2, SPECTRE_V2_CMD_RETPOLINE = 3, SPECTRE_V2_CMD_RETPOLINE_GENERIC = 4, SPECTRE_V2_CMD_RETPOLINE_LFENCE = 5, SPECTRE_V2_CMD_EIBRS = 6, SPECTRE_V2_CMD_EIBRS_RETPOLINE = 7, SPECTRE_V2_CMD_EIBRS_LFENCE = 8, SPECTRE_V2_CMD_IBRS = 9, }; enum spectre_v2_user_cmd { SPECTRE_V2_USER_CMD_NONE = 0, SPECTRE_V2_USER_CMD_AUTO = 1, SPECTRE_V2_USER_CMD_FORCE = 2, SPECTRE_V2_USER_CMD_PRCTL = 3, SPECTRE_V2_USER_CMD_PRCTL_IBPB = 4, SPECTRE_V2_USER_CMD_SECCOMP = 5, SPECTRE_V2_USER_CMD_SECCOMP_IBPB = 6, }; enum spectre_v2_user_mitigation { SPECTRE_V2_USER_NONE = 0, SPECTRE_V2_USER_STRICT = 1, SPECTRE_V2_USER_STRICT_PREFERRED = 2, SPECTRE_V2_USER_PRCTL = 3, SPECTRE_V2_USER_SECCOMP = 4, }; enum split_lock_detect_state { sld_off = 0, sld_warn = 1, sld_fatal = 2, sld_ratelimit = 3, }; enum srbds_mitigations { SRBDS_MITIGATION_OFF = 0, SRBDS_MITIGATION_UCODE_NEEDED = 1, SRBDS_MITIGATION_FULL = 2, SRBDS_MITIGATION_TSX_OFF = 3, SRBDS_MITIGATION_HYPERVISOR = 4, }; enum srso_mitigation { SRSO_MITIGATION_NONE = 0, SRSO_MITIGATION_UCODE_NEEDED = 1, SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED = 2, SRSO_MITIGATION_MICROCODE = 3, SRSO_MITIGATION_SAFE_RET = 4, SRSO_MITIGATION_IBPB = 5, SRSO_MITIGATION_IBPB_ON_VMEXIT = 6, }; enum srso_mitigation_cmd { SRSO_CMD_OFF = 0, SRSO_CMD_MICROCODE = 1, SRSO_CMD_SAFE_RET = 2, SRSO_CMD_IBPB = 3, SRSO_CMD_IBPB_ON_VMEXIT = 4, }; enum ssb_mitigation { SPEC_STORE_BYPASS_NONE = 0, SPEC_STORE_BYPASS_DISABLE = 1, SPEC_STORE_BYPASS_PRCTL = 2, SPEC_STORE_BYPASS_SECCOMP = 3, }; enum ssb_mitigation_cmd { SPEC_STORE_BYPASS_CMD_NONE = 0, SPEC_STORE_BYPASS_CMD_AUTO = 1, SPEC_STORE_BYPASS_CMD_ON = 2, SPEC_STORE_BYPASS_CMD_PRCTL = 3, SPEC_STORE_BYPASS_CMD_SECCOMP = 4, }; enum stack_type { STACK_TYPE_UNKNOWN = 0, STACK_TYPE_TASK = 1, STACK_TYPE_IRQ = 2, STACK_TYPE_SOFTIRQ = 3, STACK_TYPE_ENTRY = 4, STACK_TYPE_EXCEPTION = 5, STACK_TYPE_EXCEPTION_LAST = 10, }; enum stat_group { STAT_READ = 0, STAT_WRITE = 1, STAT_DISCARD = 2, STAT_FLUSH = 3, NR_STAT_GROUPS = 4, }; enum stat_item { ALLOC_FASTPATH = 0, ALLOC_SLOWPATH = 1, FREE_FASTPATH = 2, FREE_SLOWPATH = 3, FREE_FROZEN = 4, FREE_ADD_PARTIAL = 5, FREE_REMOVE_PARTIAL = 6, ALLOC_FROM_PARTIAL = 7, ALLOC_SLAB = 8, ALLOC_REFILL = 9, ALLOC_NODE_MISMATCH = 10, FREE_SLAB = 11, CPUSLAB_FLUSH = 12, DEACTIVATE_FULL = 13, DEACTIVATE_EMPTY = 14, DEACTIVATE_TO_HEAD = 15, DEACTIVATE_TO_TAIL = 16, DEACTIVATE_REMOTE_FREES = 17, DEACTIVATE_BYPASS = 18, ORDER_FALLBACK = 19, CMPXCHG_DOUBLE_CPU_FAIL = 20, CMPXCHG_DOUBLE_FAIL = 21, CPU_PARTIAL_ALLOC = 22, CPU_PARTIAL_FREE = 23, CPU_PARTIAL_NODE = 24, CPU_PARTIAL_DRAIN = 25, NR_SLUB_STAT_ITEMS = 26, }; enum state { Start = 0, Collect = 1, GotHeader = 2, SkipIt = 3, GotName = 4, CopyFile = 5, GotSymlink = 6, Reset = 7, }; enum store_type { wr_invalid = 0, wr_new_root = 1, wr_store_root = 2, wr_exact_fit = 3, wr_spanning_store = 4, wr_split_store = 5, wr_rebalance = 6, wr_append = 7, wr_node_store = 8, wr_slot_store = 9, }; enum string_size_units { STRING_UNITS_10 = 0, STRING_UNITS_2 = 1, STRING_UNITS_MASK = 1, STRING_UNITS_NO_SPACE = 1073741824, STRING_UNITS_NO_BYTES = 2147483648, }; enum sum_check_bits { SUM_CHECK_P = 0, SUM_CHECK_Q = 1, }; enum sum_check_flags { SUM_CHECK_P_RESULT = 1, SUM_CHECK_Q_RESULT = 2, }; enum support_mode { ALLOW_LEGACY = 0, DENY_LEGACY = 1, }; enum suspend_stat_step { SUSPEND_WORKING = 0, SUSPEND_FREEZE = 1, SUSPEND_PREPARE = 2, SUSPEND_SUSPEND = 3, SUSPEND_SUSPEND_LATE = 4, SUSPEND_SUSPEND_NOIRQ = 5, SUSPEND_RESUME_NOIRQ = 6, SUSPEND_RESUME_EARLY = 7, SUSPEND_RESUME = 8, }; enum switchdev_notifier_type { SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, SWITCHDEV_FDB_DEL_TO_BRIDGE = 2, SWITCHDEV_FDB_ADD_TO_DEVICE = 3, SWITCHDEV_FDB_DEL_TO_DEVICE = 4, SWITCHDEV_FDB_OFFLOADED = 5, SWITCHDEV_FDB_FLUSH_TO_BRIDGE = 6, SWITCHDEV_PORT_OBJ_ADD = 7, SWITCHDEV_PORT_OBJ_DEL = 8, SWITCHDEV_PORT_ATTR_SET = 9, SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE = 10, SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE = 11, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE = 12, SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE = 13, SWITCHDEV_VXLAN_FDB_OFFLOADED = 14, SWITCHDEV_BRPORT_OFFLOADED = 15, SWITCHDEV_BRPORT_UNOFFLOADED = 16, SWITCHDEV_BRPORT_REPLAY = 17, }; enum synaptics_pkt_type { SYN_NEWABS = 0, SYN_NEWABS_STRICT = 1, SYN_NEWABS_RELAXED = 2, SYN_OLDABS = 3, }; enum sys_off_mode { SYS_OFF_MODE_POWER_OFF_PREPARE = 0, SYS_OFF_MODE_POWER_OFF = 1, SYS_OFF_MODE_RESTART_PREPARE = 2, SYS_OFF_MODE_RESTART = 3, }; enum syscall_work_bit { SYSCALL_WORK_BIT_SECCOMP = 0, SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT = 1, SYSCALL_WORK_BIT_SYSCALL_TRACE = 2, SYSCALL_WORK_BIT_SYSCALL_EMU = 3, SYSCALL_WORK_BIT_SYSCALL_AUDIT = 4, SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH = 5, SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP = 6, }; enum sysctl_writes_mode { SYSCTL_WRITES_LEGACY = -1, SYSCTL_WRITES_WARN = 0, SYSCTL_WRITES_STRICT = 1, }; enum system_states { SYSTEM_BOOTING = 0, SYSTEM_SCHEDULING = 1, SYSTEM_FREEING_INITMEM = 2, SYSTEM_RUNNING = 3, SYSTEM_HALT = 4, SYSTEM_POWER_OFF = 5, SYSTEM_RESTART = 6, SYSTEM_SUSPEND = 7, }; enum taa_mitigations { TAA_MITIGATION_OFF = 0, TAA_MITIGATION_UCODE_NEEDED = 1, TAA_MITIGATION_VERW = 2, TAA_MITIGATION_TSX_DISABLED = 3, }; enum task_work_notify_mode { TWA_NONE = 0, TWA_RESUME = 1, TWA_SIGNAL = 2, TWA_SIGNAL_NO_IPI = 3, TWA_NMI_CURRENT = 4, }; enum tc_clsbpf_command { TC_CLSBPF_OFFLOAD = 0, TC_CLSBPF_STATS = 1, }; enum tc_fifo_command { TC_FIFO_REPLACE = 0, TC_FIFO_DESTROY = 1, TC_FIFO_STATS = 2, }; enum tc_link_layer { TC_LINKLAYER_UNAWARE = 0, TC_LINKLAYER_ETHERNET = 1, TC_LINKLAYER_ATM = 2, }; enum tc_matchall_command { TC_CLSMATCHALL_REPLACE = 0, TC_CLSMATCHALL_DESTROY = 1, TC_CLSMATCHALL_STATS = 2, }; enum tc_mq_command { TC_MQ_CREATE = 0, TC_MQ_DESTROY = 1, TC_MQ_STATS = 2, TC_MQ_GRAFT = 3, }; enum tc_root_command { TC_ROOT_GRAFT = 0, }; enum tc_setup_type { TC_QUERY_CAPS = 0, TC_SETUP_QDISC_MQPRIO = 1, TC_SETUP_CLSU32 = 2, TC_SETUP_CLSFLOWER = 3, TC_SETUP_CLSMATCHALL = 4, TC_SETUP_CLSBPF = 5, TC_SETUP_BLOCK = 6, TC_SETUP_QDISC_CBS = 7, TC_SETUP_QDISC_RED = 8, TC_SETUP_QDISC_PRIO = 9, TC_SETUP_QDISC_MQ = 10, TC_SETUP_QDISC_ETF = 11, TC_SETUP_ROOT_QDISC = 12, TC_SETUP_QDISC_GRED = 13, TC_SETUP_QDISC_TAPRIO = 14, TC_SETUP_FT = 15, TC_SETUP_QDISC_ETS = 16, TC_SETUP_QDISC_TBF = 17, TC_SETUP_QDISC_FIFO = 18, TC_SETUP_QDISC_HTB = 19, TC_SETUP_ACT = 20, }; enum tc_taprio_qopt_cmd { TAPRIO_CMD_REPLACE = 0, TAPRIO_CMD_DESTROY = 1, TAPRIO_CMD_STATS = 2, TAPRIO_CMD_QUEUE_STATS = 3, }; enum tca_id { TCA_ID_UNSPEC = 0, TCA_ID_POLICE = 1, TCA_ID_GACT = 5, TCA_ID_IPT = 6, TCA_ID_PEDIT = 7, TCA_ID_MIRRED = 8, TCA_ID_NAT = 9, TCA_ID_XT = 10, TCA_ID_SKBEDIT = 11, TCA_ID_VLAN = 12, TCA_ID_BPF = 13, TCA_ID_CONNMARK = 14, TCA_ID_SKBMOD = 15, TCA_ID_CSUM = 16, TCA_ID_TUNNEL_KEY = 17, TCA_ID_SIMP = 22, TCA_ID_IFE = 25, TCA_ID_SAMPLE = 26, TCA_ID_CTINFO = 27, TCA_ID_MPLS = 28, TCA_ID_CT = 29, TCA_ID_GATE = 30, __TCA_ID_MAX = 255, }; enum tcf_proto_ops_flags { TCF_PROTO_OPS_DOIT_UNLOCKED = 1, }; enum tcp_bit_set { TCP_SYN_SET = 0, TCP_SYNACK_SET = 1, TCP_FIN_SET = 2, TCP_ACK_SET = 3, TCP_RST_SET = 4, TCP_NONE_SET = 5, }; enum tcp_ca_ack_event_flags { CA_ACK_SLOWPATH = 1, CA_ACK_WIN_UPDATE = 2, CA_ACK_ECE = 4, }; enum tcp_ca_event { CA_EVENT_TX_START = 0, CA_EVENT_CWND_RESTART = 1, CA_EVENT_COMPLETE_CWR = 2, CA_EVENT_LOSS = 3, CA_EVENT_ECN_NO_CE = 4, CA_EVENT_ECN_IS_CE = 5, }; enum tcp_ca_state { TCP_CA_Open = 0, TCP_CA_Disorder = 1, TCP_CA_CWR = 2, TCP_CA_Recovery = 3, TCP_CA_Loss = 4, }; enum tcp_chrono { TCP_CHRONO_UNSPEC = 0, TCP_CHRONO_BUSY = 1, TCP_CHRONO_RWND_LIMITED = 2, TCP_CHRONO_SNDBUF_LIMITED = 3, __TCP_CHRONO_MAX = 4, }; enum tcp_conntrack { TCP_CONNTRACK_NONE = 0, TCP_CONNTRACK_SYN_SENT = 1, TCP_CONNTRACK_SYN_RECV = 2, TCP_CONNTRACK_ESTABLISHED = 3, TCP_CONNTRACK_FIN_WAIT = 4, TCP_CONNTRACK_CLOSE_WAIT = 5, TCP_CONNTRACK_LAST_ACK = 6, TCP_CONNTRACK_TIME_WAIT = 7, TCP_CONNTRACK_CLOSE = 8, TCP_CONNTRACK_LISTEN = 9, TCP_CONNTRACK_MAX = 10, TCP_CONNTRACK_IGNORE = 11, TCP_CONNTRACK_RETRANS = 12, TCP_CONNTRACK_UNACK = 13, TCP_CONNTRACK_TIMEOUT_MAX = 14, }; enum tcp_fastopen_client_fail { TFO_STATUS_UNSPEC = 0, TFO_COOKIE_UNAVAILABLE = 1, TFO_DATA_NOT_ACKED = 2, TFO_SYN_RETRANSMITTED = 3, }; enum tcp_metric_index { TCP_METRIC_RTT = 0, TCP_METRIC_RTTVAR = 1, TCP_METRIC_SSTHRESH = 2, TCP_METRIC_CWND = 3, TCP_METRIC_REORDERING = 4, TCP_METRIC_RTT_US = 5, TCP_METRIC_RTTVAR_US = 6, __TCP_METRIC_MAX = 7, }; enum tcp_queue { TCP_FRAG_IN_WRITE_QUEUE = 0, TCP_FRAG_IN_RTX_QUEUE = 1, }; enum tcp_seq_states { TCP_SEQ_STATE_LISTENING = 0, TCP_SEQ_STATE_ESTABLISHED = 1, }; enum tcp_skb_cb_sacked_flags { TCPCB_SACKED_ACKED = 1, TCPCB_SACKED_RETRANS = 2, TCPCB_LOST = 4, TCPCB_TAGBITS = 7, TCPCB_REPAIRED = 16, TCPCB_EVER_RETRANS = 128, TCPCB_RETRANS = 146, }; enum tcp_synack_type { TCP_SYNACK_NORMAL = 0, TCP_SYNACK_FASTOPEN = 1, TCP_SYNACK_COOKIE = 2, }; enum tcp_tw_status { TCP_TW_SUCCESS = 0, TCP_TW_RST = 1, TCP_TW_ACK = 2, TCP_TW_SYN = 3, }; enum tcpa_event_types { PREBOOT = 0, POST_CODE = 1, UNUSED = 2, NO_ACTION = 3, SEPARATOR = 4, ACTION = 5, EVENT_TAG = 6, SCRTM_CONTENTS = 7, SCRTM_VERSION = 8, CPU_MICROCODE = 9, PLATFORM_CONFIG_FLAGS = 10, TABLE_OF_DEVICES = 11, COMPACT_HASH = 12, IPL = 13, IPL_PARTITION_DATA = 14, NONHOST_CODE = 15, NONHOST_CONFIG = 16, NONHOST_INFO = 17, }; enum tcpa_pc_event_ids { SMBIOS = 1, BIS_CERT = 2, POST_BIOS_ROM = 3, ESCD = 4, CMOS = 5, NVRAM = 6, OPTION_ROM_EXEC = 7, OPTION_ROM_CONFIG = 8, OPTION_ROM_MICROCODE = 10, S_CRTM_VERSION = 11, S_CRTM_CONTENTS = 12, POST_CONTENTS = 13, HOST_TABLE_OF_DEVICES = 14, }; enum tcx_action_base { TCX_NEXT = -1, TCX_PASS = 0, TCX_DROP = 2, TCX_REDIRECT = 7, }; enum tg_state_flags { THROTL_TG_PENDING = 1, THROTL_TG_WAS_EMPTY = 2, THROTL_TG_CANCELING = 4, }; enum thermal_device_mode { THERMAL_DEVICE_DISABLED = 0, THERMAL_DEVICE_ENABLED = 1, }; enum thermal_notify_event { THERMAL_EVENT_UNSPECIFIED = 0, THERMAL_EVENT_TEMP_SAMPLE = 1, THERMAL_TRIP_VIOLATED = 2, THERMAL_TRIP_CHANGED = 3, THERMAL_DEVICE_DOWN = 4, THERMAL_DEVICE_UP = 5, THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, THERMAL_TABLE_CHANGED = 7, THERMAL_EVENT_KEEP_ALIVE = 8, THERMAL_TZ_BIND_CDEV = 9, THERMAL_TZ_UNBIND_CDEV = 10, THERMAL_INSTANCE_WEIGHT_CHANGED = 11, THERMAL_TZ_RESUME = 12, }; enum thermal_trend { THERMAL_TREND_STABLE = 0, THERMAL_TREND_RAISING = 1, THERMAL_TREND_DROPPING = 2, }; enum thermal_trip_type { THERMAL_TRIP_ACTIVE = 0, THERMAL_TRIP_PASSIVE = 1, THERMAL_TRIP_HOT = 2, THERMAL_TRIP_CRITICAL = 3, }; enum tick_broadcast_mode { TICK_BROADCAST_OFF = 0, TICK_BROADCAST_ON = 1, TICK_BROADCAST_FORCE = 2, }; enum tick_broadcast_state { TICK_BROADCAST_EXIT = 0, TICK_BROADCAST_ENTER = 1, }; enum tick_dep_bits { TICK_DEP_BIT_POSIX_TIMER = 0, TICK_DEP_BIT_PERF_EVENTS = 1, TICK_DEP_BIT_SCHED = 2, TICK_DEP_BIT_CLOCK_UNSTABLE = 3, TICK_DEP_BIT_RCU = 4, TICK_DEP_BIT_RCU_EXP = 5, }; enum tick_device_mode { TICKDEV_MODE_PERIODIC = 0, TICKDEV_MODE_ONESHOT = 1, }; enum timekeeping_adv_mode { TK_ADV_TICK = 0, TK_ADV_FREQ = 1, }; enum timespec_type { TT_NONE = 0, TT_NATIVE = 1, TT_COMPAT = 2, }; enum tis_access { TPM_ACCESS_VALID = 128, TPM_ACCESS_ACTIVE_LOCALITY = 32, TPM_ACCESS_REQUEST_PENDING = 4, TPM_ACCESS_REQUEST_USE = 2, }; enum tis_defaults { TIS_MEM_LEN = 20480, TIS_SHORT_TIMEOUT = 750, TIS_LONG_TIMEOUT = 2000, TIS_TIMEOUT_MIN_ATML = 14700, TIS_TIMEOUT_MAX_ATML = 15000, }; enum tis_int_flags { TPM_GLOBAL_INT_ENABLE = 2147483648, TPM_INTF_BURST_COUNT_STATIC = 256, TPM_INTF_CMD_READY_INT = 128, TPM_INTF_INT_EDGE_FALLING = 64, TPM_INTF_INT_EDGE_RISING = 32, TPM_INTF_INT_LEVEL_LOW = 16, TPM_INTF_INT_LEVEL_HIGH = 8, TPM_INTF_LOCALITY_CHANGE_INT = 4, TPM_INTF_STS_VALID_INT = 2, TPM_INTF_DATA_AVAIL_INT = 1, }; enum tis_status { TPM_STS_VALID = 128, TPM_STS_COMMAND_READY = 64, TPM_STS_GO = 32, TPM_STS_DATA_AVAIL = 16, TPM_STS_DATA_EXPECT = 8, TPM_STS_RESPONSE_RETRY = 2, TPM_STS_READ_ZERO = 35, }; enum tk_offsets { TK_OFFS_REAL = 0, TK_OFFS_BOOT = 1, TK_OFFS_TAI = 2, TK_OFFS_MAX = 3, }; enum tlb_flush_reason { TLB_FLUSH_ON_TASK_SWITCH = 0, TLB_REMOTE_SHOOTDOWN = 1, TLB_LOCAL_SHOOTDOWN = 2, TLB_LOCAL_MM_SHOOTDOWN = 3, TLB_REMOTE_SEND_IPI = 4, NR_TLB_FLUSH_REASONS = 5, }; enum tlb_infos { ENTRIES = 0, NR_INFO = 1, }; enum topo_types { INVALID_TYPE = 0, SMT_TYPE = 1, CORE_TYPE = 2, MAX_TYPE_0B = 3, MODULE_TYPE = 3, AMD_CCD_TYPE = 3, TILE_TYPE = 4, AMD_SOCKET_TYPE = 4, MAX_TYPE_80000026 = 5, DIE_TYPE = 5, DIEGRP_TYPE = 6, MAX_TYPE_1F = 7, }; enum tp_func_state { TP_FUNC_0 = 0, TP_FUNC_1 = 1, TP_FUNC_2 = 2, TP_FUNC_N = 3, }; enum tp_transition_sync { TP_TRANSITION_SYNC_1_0_1 = 0, TP_TRANSITION_SYNC_N_2_1 = 1, _NR_TP_TRANSITION_SYNC = 2, }; enum tpacket_versions { TPACKET_V1 = 0, TPACKET_V2 = 1, TPACKET_V3 = 2, }; enum tpm2_capabilities { TPM2_CAP_HANDLES = 1, TPM2_CAP_COMMANDS = 2, TPM2_CAP_PCRS = 5, TPM2_CAP_TPM_PROPERTIES = 6, }; enum tpm2_cc_attrs { TPM2_CC_ATTR_CHANDLES = 25, TPM2_CC_ATTR_RHANDLE = 28, TPM2_CC_ATTR_VENDOR = 29, }; enum tpm2_command_codes { TPM2_CC_FIRST = 287, TPM2_CC_HIERARCHY_CONTROL = 289, TPM2_CC_HIERARCHY_CHANGE_AUTH = 297, TPM2_CC_CREATE_PRIMARY = 305, TPM2_CC_SEQUENCE_COMPLETE = 318, TPM2_CC_SELF_TEST = 323, TPM2_CC_STARTUP = 324, TPM2_CC_SHUTDOWN = 325, TPM2_CC_NV_READ = 334, TPM2_CC_CREATE = 339, TPM2_CC_LOAD = 343, TPM2_CC_SEQUENCE_UPDATE = 348, TPM2_CC_UNSEAL = 350, TPM2_CC_CONTEXT_LOAD = 353, TPM2_CC_CONTEXT_SAVE = 354, TPM2_CC_FLUSH_CONTEXT = 357, TPM2_CC_READ_PUBLIC = 371, TPM2_CC_START_AUTH_SESS = 374, TPM2_CC_VERIFY_SIGNATURE = 375, TPM2_CC_GET_CAPABILITY = 378, TPM2_CC_GET_RANDOM = 379, TPM2_CC_PCR_READ = 382, TPM2_CC_PCR_EXTEND = 386, TPM2_CC_EVENT_SEQUENCE_COMPLETE = 389, TPM2_CC_HASH_SEQUENCE_START = 390, TPM2_CC_CREATE_LOADED = 401, TPM2_CC_LAST = 403, }; enum tpm2_const { TPM2_PLATFORM_PCR = 24, TPM2_PCR_SELECT_MIN = 3, }; enum tpm2_curves { TPM2_ECC_NONE = 0, TPM2_ECC_NIST_P256 = 3, }; enum tpm2_handle_types { TPM2_HT_HMAC_SESSION = 33554432, TPM2_HT_POLICY_SESSION = 50331648, TPM2_HT_TRANSIENT = 2147483648, }; enum tpm2_mso_type { TPM2_MSO_NVRAM = 1, TPM2_MSO_SESSION = 2, TPM2_MSO_POLICY = 3, TPM2_MSO_PERMANENT = 64, TPM2_MSO_VOLATILE = 128, TPM2_MSO_PERSISTENT = 129, }; enum tpm2_object_attributes { TPM2_OA_FIXED_TPM = 2, TPM2_OA_ST_CLEAR = 4, TPM2_OA_FIXED_PARENT = 16, TPM2_OA_SENSITIVE_DATA_ORIGIN = 32, TPM2_OA_USER_WITH_AUTH = 64, TPM2_OA_ADMIN_WITH_POLICY = 128, TPM2_OA_NO_DA = 1024, TPM2_OA_ENCRYPTED_DUPLICATION = 2048, TPM2_OA_RESTRICTED = 65536, TPM2_OA_DECRYPT = 131072, TPM2_OA_SIGN = 262144, }; enum tpm2_permanent_handles { TPM2_RH_NULL = 1073741831, TPM2_RS_PW = 1073741833, }; enum tpm2_properties { TPM_PT_TOTAL_COMMANDS = 297, }; enum tpm2_return_codes { TPM2_RC_SUCCESS = 0, TPM2_RC_HASH = 131, TPM2_RC_HANDLE = 139, TPM2_RC_INTEGRITY = 159, TPM2_RC_INITIALIZE = 256, TPM2_RC_FAILURE = 257, TPM2_RC_DISABLED = 288, TPM2_RC_UPGRADE = 301, TPM2_RC_COMMAND_CODE = 323, TPM2_RC_TESTING = 2314, TPM2_RC_REFERENCE_H0 = 2320, TPM2_RC_RETRY = 2338, }; enum tpm2_session_attributes { TPM2_SA_CONTINUE_SESSION = 1, TPM2_SA_AUDIT_EXCLUSIVE = 2, TPM2_SA_AUDIT_RESET = 8, TPM2_SA_DECRYPT = 32, TPM2_SA_ENCRYPT = 64, TPM2_SA_AUDIT = 128, }; enum tpm2_session_types { TPM2_SE_HMAC = 0, TPM2_SE_POLICY = 1, TPM2_SE_TRIAL = 2, }; enum tpm2_startup_types { TPM2_SU_CLEAR = 0, TPM2_SU_STATE = 1, }; enum tpm2_structures { TPM2_ST_NO_SESSIONS = 32769, TPM2_ST_SESSIONS = 32770, TPM2_ST_CREATION = 32801, }; enum tpm2_timeouts { TPM2_TIMEOUT_A = 750, TPM2_TIMEOUT_B = 2000, TPM2_TIMEOUT_C = 200, TPM2_TIMEOUT_D = 30, TPM2_DURATION_SHORT = 20, TPM2_DURATION_MEDIUM = 750, TPM2_DURATION_LONG = 2000, TPM2_DURATION_LONG_LONG = 300000, TPM2_DURATION_DEFAULT = 120000, }; enum tpm_algorithms { TPM_ALG_ERROR = 0, TPM_ALG_SHA1 = 4, TPM_ALG_AES = 6, TPM_ALG_KEYEDHASH = 8, TPM_ALG_SHA256 = 11, TPM_ALG_SHA384 = 12, TPM_ALG_SHA512 = 13, TPM_ALG_NULL = 16, TPM_ALG_SM3_256 = 18, TPM_ALG_ECC = 35, TPM_ALG_CFB = 67, }; enum tpm_buf_flags { TPM_BUF_OVERFLOW = 1, TPM_BUF_TPM2B = 2, TPM_BUF_BOUNDARY_ERROR = 4, }; enum tpm_capabilities { TPM_CAP_FLAG = 4, TPM_CAP_PROP = 5, TPM_CAP_VERSION_1_1 = 6, TPM_CAP_VERSION_1_2 = 26, }; enum tpm_chip_flags { TPM_CHIP_FLAG_BOOTSTRAPPED = 1, TPM_CHIP_FLAG_TPM2 = 2, TPM_CHIP_FLAG_IRQ = 4, TPM_CHIP_FLAG_VIRTUAL = 8, TPM_CHIP_FLAG_HAVE_TIMEOUTS = 16, TPM_CHIP_FLAG_ALWAYS_POWERED = 32, TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = 64, TPM_CHIP_FLAG_FIRMWARE_UPGRADE = 128, TPM_CHIP_FLAG_SUSPENDED = 256, TPM_CHIP_FLAG_HWRNG_DISABLED = 512, TPM_CHIP_FLAG_DISABLE = 1024, }; enum tpm_duration { TPM_SHORT = 0, TPM_MEDIUM = 1, TPM_LONG = 2, TPM_LONG_LONG = 3, TPM_UNDEFINED = 4, TPM_NUM_DURATIONS = 4, }; enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10, }; enum tpm_sub_capabilities { TPM_CAP_PROP_PCR = 257, TPM_CAP_PROP_MANUFACTURER = 259, TPM_CAP_FLAG_PERM = 264, TPM_CAP_FLAG_VOL = 265, TPM_CAP_PROP_OWNER = 273, TPM_CAP_PROP_TIS_TIMEOUT = 277, TPM_CAP_PROP_TIS_DURATION = 288, }; enum tpm_timeout { TPM_TIMEOUT = 5, TPM_TIMEOUT_RETRY = 100, TPM_TIMEOUT_RANGE_US = 300, TPM_TIMEOUT_POLL = 1, TPM_TIMEOUT_USECS_MIN = 100, TPM_TIMEOUT_USECS_MAX = 500, }; enum tpm_tis_flags { TPM_TIS_ITPM_WORKAROUND = 0, TPM_TIS_INVALID_STATUS = 1, TPM_TIS_DEFAULT_CANCELLATION = 2, TPM_TIS_IRQ_TESTED = 3, }; enum tpm_tis_io_mode { TPM_TIS_PHYS_8 = 0, TPM_TIS_PHYS_16 = 1, TPM_TIS_PHYS_32 = 2, }; enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 1, TRACE_FLAG_IRQS_NOSUPPORT = 2, TRACE_FLAG_NEED_RESCHED = 4, TRACE_FLAG_HARDIRQ = 8, TRACE_FLAG_SOFTIRQ = 16, TRACE_FLAG_PREEMPT_RESCHED = 32, TRACE_FLAG_NMI = 64, TRACE_FLAG_BH_OFF = 128, }; enum trace_iter_flags { TRACE_FILE_LAT_FMT = 1, TRACE_FILE_ANNOTATE = 2, TRACE_FILE_TIME_IN_NS = 4, }; enum trace_iterator_bits { TRACE_ITER_PRINT_PARENT_BIT = 0, TRACE_ITER_SYM_OFFSET_BIT = 1, TRACE_ITER_SYM_ADDR_BIT = 2, TRACE_ITER_VERBOSE_BIT = 3, TRACE_ITER_RAW_BIT = 4, TRACE_ITER_HEX_BIT = 5, TRACE_ITER_BIN_BIT = 6, TRACE_ITER_BLOCK_BIT = 7, TRACE_ITER_FIELDS_BIT = 8, TRACE_ITER_PRINTK_BIT = 9, TRACE_ITER_ANNOTATE_BIT = 10, TRACE_ITER_USERSTACKTRACE_BIT = 11, TRACE_ITER_SYM_USEROBJ_BIT = 12, TRACE_ITER_PRINTK_MSGONLY_BIT = 13, TRACE_ITER_CONTEXT_INFO_BIT = 14, TRACE_ITER_LATENCY_FMT_BIT = 15, TRACE_ITER_RECORD_CMD_BIT = 16, TRACE_ITER_RECORD_TGID_BIT = 17, TRACE_ITER_OVERWRITE_BIT = 18, TRACE_ITER_STOP_ON_FREE_BIT = 19, TRACE_ITER_IRQ_INFO_BIT = 20, TRACE_ITER_MARKERS_BIT = 21, TRACE_ITER_EVENT_FORK_BIT = 22, TRACE_ITER_TRACE_PRINTK_BIT = 23, TRACE_ITER_PAUSE_ON_TRACE_BIT = 24, TRACE_ITER_HASH_PTR_BIT = 25, TRACE_ITER_FUNCTION_BIT = 26, TRACE_ITER_FUNC_FORK_BIT = 27, TRACE_ITER_DISPLAY_GRAPH_BIT = 28, TRACE_ITER_STACKTRACE_BIT = 29, TRACE_ITER_LAST_BIT = 30, }; enum trace_iterator_flags { TRACE_ITER_PRINT_PARENT = 1, TRACE_ITER_SYM_OFFSET = 2, TRACE_ITER_SYM_ADDR = 4, TRACE_ITER_VERBOSE = 8, TRACE_ITER_RAW = 16, TRACE_ITER_HEX = 32, TRACE_ITER_BIN = 64, TRACE_ITER_BLOCK = 128, TRACE_ITER_FIELDS = 256, TRACE_ITER_PRINTK = 512, TRACE_ITER_ANNOTATE = 1024, TRACE_ITER_USERSTACKTRACE = 2048, TRACE_ITER_SYM_USEROBJ = 4096, TRACE_ITER_PRINTK_MSGONLY = 8192, TRACE_ITER_CONTEXT_INFO = 16384, TRACE_ITER_LATENCY_FMT = 32768, TRACE_ITER_RECORD_CMD = 65536, TRACE_ITER_RECORD_TGID = 131072, TRACE_ITER_OVERWRITE = 262144, TRACE_ITER_STOP_ON_FREE = 524288, TRACE_ITER_IRQ_INFO = 1048576, TRACE_ITER_MARKERS = 2097152, TRACE_ITER_EVENT_FORK = 4194304, TRACE_ITER_TRACE_PRINTK = 8388608, TRACE_ITER_PAUSE_ON_TRACE = 16777216, TRACE_ITER_HASH_PTR = 33554432, TRACE_ITER_FUNCTION = 67108864, TRACE_ITER_FUNC_FORK = 134217728, TRACE_ITER_DISPLAY_GRAPH = 268435456, TRACE_ITER_STACKTRACE = 536870912, }; enum trace_reg { TRACE_REG_REGISTER = 0, TRACE_REG_UNREGISTER = 1, TRACE_REG_PERF_REGISTER = 2, TRACE_REG_PERF_UNREGISTER = 3, TRACE_REG_PERF_OPEN = 4, TRACE_REG_PERF_CLOSE = 5, TRACE_REG_PERF_ADD = 6, TRACE_REG_PERF_DEL = 7, }; enum trace_type { __TRACE_FIRST_TYPE = 0, TRACE_FN = 1, TRACE_CTX = 2, TRACE_WAKE = 3, TRACE_STACK = 4, TRACE_PRINT = 5, TRACE_BPRINT = 6, TRACE_MMIO_RW = 7, TRACE_MMIO_MAP = 8, TRACE_BRANCH = 9, TRACE_GRAPH_RET = 10, TRACE_GRAPH_ENT = 11, TRACE_USER_STACK = 12, TRACE_BLK = 13, TRACE_BPUTS = 14, TRACE_HWLAT = 15, TRACE_OSNOISE = 16, TRACE_TIMERLAT = 17, TRACE_RAW_DATA = 18, TRACE_FUNC_REPEATS = 19, __TRACE_LAST_TYPE = 20, }; enum track_item { TRACK_ALLOC = 0, TRACK_FREE = 1, }; enum translation_map { LAT1_MAP = 0, GRAF_MAP = 1, IBMPC_MAP = 2, USER_MAP = 3, FIRST_MAP = 0, LAST_MAP = 3, }; enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_UNSUPPORTED = 0, TRANSPARENT_HUGEPAGE_FLAG = 1, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG = 2, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG = 3, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG = 4, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG = 5, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG = 6, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG = 7, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG = 8, }; enum tsq_enum { TSQ_THROTTLED = 0, TSQ_QUEUED = 1, TCP_TSQ_DEFERRED = 2, TCP_WRITE_TIMER_DEFERRED = 3, TCP_DELACK_TIMER_DEFERRED = 4, TCP_MTU_REDUCED_DEFERRED = 5, TCP_ACK_DEFERRED = 6, }; enum tsq_flags { TSQF_THROTTLED = 1, TSQF_QUEUED = 2, TCPF_TSQ_DEFERRED = 4, TCPF_WRITE_TIMER_DEFERRED = 8, TCPF_DELACK_TIMER_DEFERRED = 16, TCPF_MTU_REDUCED_DEFERRED = 32, TCPF_ACK_DEFERRED = 64, }; enum tsx_ctrl_states { TSX_CTRL_ENABLE = 0, TSX_CTRL_DISABLE = 1, TSX_CTRL_RTM_ALWAYS_ABORT = 2, TSX_CTRL_NOT_SUPPORTED = 3, }; enum ttu_flags { TTU_SPLIT_HUGE_PMD = 4, TTU_IGNORE_MLOCK = 8, TTU_SYNC = 16, TTU_HWPOISON = 32, TTU_BATCH_FLUSH = 64, TTU_RMAP_LOCKED = 128, }; enum tty_flow_change { TTY_FLOW_NO_CHANGE = 0, TTY_THROTTLE_SAFE = 1, TTY_UNTHROTTLE_SAFE = 2, }; enum tunable_id { ETHTOOL_ID_UNSPEC = 0, ETHTOOL_RX_COPYBREAK = 1, ETHTOOL_TX_COPYBREAK = 2, ETHTOOL_PFC_PREVENTION_TOUT = 3, ETHTOOL_TX_COPYBREAK_BUF_SIZE = 4, __ETHTOOL_TUNABLE_COUNT = 5, }; enum tunable_type_id { ETHTOOL_TUNABLE_UNSPEC = 0, ETHTOOL_TUNABLE_U8 = 1, ETHTOOL_TUNABLE_U16 = 2, ETHTOOL_TUNABLE_U32 = 3, ETHTOOL_TUNABLE_U64 = 4, ETHTOOL_TUNABLE_STRING = 5, ETHTOOL_TUNABLE_S8 = 6, ETHTOOL_TUNABLE_S16 = 7, ETHTOOL_TUNABLE_S32 = 8, ETHTOOL_TUNABLE_S64 = 9, }; enum tunnel_encap_types { TUNNEL_ENCAP_NONE = 0, TUNNEL_ENCAP_FOU = 1, TUNNEL_ENCAP_GUE = 2, TUNNEL_ENCAP_MPLS = 3, }; enum txtime_flags { SOF_TXTIME_DEADLINE_MODE = 1, SOF_TXTIME_REPORT_ERRORS = 2, SOF_TXTIME_FLAGS_LAST = 2, SOF_TXTIME_FLAGS_MASK = 3, }; enum uart_pm_state { UART_PM_STATE_ON = 0, UART_PM_STATE_OFF = 3, UART_PM_STATE_UNDEFINED = 4, }; enum uclamp_id { UCLAMP_MIN = 0, UCLAMP_MAX = 1, UCLAMP_CNT = 2, }; enum ucode_state { UCODE_OK = 0, UCODE_NEW = 1, UCODE_NEW_SAFE = 2, UCODE_UPDATED = 3, UCODE_NFOUND = 4, UCODE_ERROR = 5, UCODE_TIMEOUT = 6, UCODE_OFFLINE = 7, }; enum ucount_type { UCOUNT_USER_NAMESPACES = 0, UCOUNT_PID_NAMESPACES = 1, UCOUNT_UTS_NAMESPACES = 2, UCOUNT_IPC_NAMESPACES = 3, UCOUNT_NET_NAMESPACES = 4, UCOUNT_MNT_NAMESPACES = 5, UCOUNT_CGROUP_NAMESPACES = 6, UCOUNT_TIME_NAMESPACES = 7, UCOUNT_INOTIFY_INSTANCES = 8, UCOUNT_INOTIFY_WATCHES = 9, UCOUNT_COUNTS = 10, }; enum udp_conntrack { UDP_CT_UNREPLIED = 0, UDP_CT_REPLIED = 1, UDP_CT_MAX = 2, }; enum udp_parsable_tunnel_type { UDP_TUNNEL_TYPE_VXLAN = 1, UDP_TUNNEL_TYPE_GENEVE = 2, UDP_TUNNEL_TYPE_VXLAN_GPE = 4, }; enum udp_tunnel_nic_info_flags { UDP_TUNNEL_NIC_INFO_MAY_SLEEP = 1, UDP_TUNNEL_NIC_INFO_OPEN_ONLY = 2, UDP_TUNNEL_NIC_INFO_IPV4_ONLY = 4, UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = 8, }; enum udp_tunnel_nic_table_entry_flags { UDP_TUNNEL_NIC_ENTRY_ADD = 1, UDP_TUNNEL_NIC_ENTRY_DEL = 2, UDP_TUNNEL_NIC_ENTRY_OP_FAIL = 4, UDP_TUNNEL_NIC_ENTRY_FROZEN = 8, }; enum umh_disable_depth { UMH_ENABLED = 0, UMH_FREEZING = 1, UMH_DISABLED = 2, }; enum umount_tree_flags { UMOUNT_SYNC = 1, UMOUNT_PROPAGATE = 2, UMOUNT_CONNECTED = 4, }; enum uncore_access_type { UNCORE_ACCESS_MSR = 0, UNCORE_ACCESS_MMIO = 1, UNCORE_ACCESS_PCI = 2, UNCORE_ACCESS_MAX = 3, }; enum unix_vertex_index { UNIX_VERTEX_INDEX_MARK1 = 0, UNIX_VERTEX_INDEX_MARK2 = 1, UNIX_VERTEX_INDEX_START = 2, }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3, }; enum utf16_endian { UTF16_HOST_ENDIAN = 0, UTF16_LITTLE_ENDIAN = 1, UTF16_BIG_ENDIAN = 2, }; enum utf8_normalization { UTF8_NFDI = 0, UTF8_NFDICF = 1, UTF8_NMAX = 2, }; enum uts_proc { UTS_PROC_ARCH = 0, UTS_PROC_OSTYPE = 1, UTS_PROC_OSRELEASE = 2, UTS_PROC_VERSION = 3, UTS_PROC_HOSTNAME = 4, UTS_PROC_DOMAINNAME = 5, }; enum uv_system_type { UV_NONE = 0, UV_LEGACY_APIC = 1, UV_X2APIC = 2, }; enum v4l2_av1_segment_feature { V4L2_AV1_SEG_LVL_ALT_Q = 0, V4L2_AV1_SEG_LVL_ALT_LF_Y_V = 1, V4L2_AV1_SEG_LVL_REF_FRAME = 5, V4L2_AV1_SEG_LVL_REF_SKIP = 6, V4L2_AV1_SEG_LVL_REF_GLOBALMV = 7, V4L2_AV1_SEG_LVL_MAX = 8, }; enum v4l2_fwnode_bus_type { V4L2_FWNODE_BUS_TYPE_GUESS = 0, V4L2_FWNODE_BUS_TYPE_CSI2_CPHY = 1, V4L2_FWNODE_BUS_TYPE_CSI1 = 2, V4L2_FWNODE_BUS_TYPE_CCP2 = 3, V4L2_FWNODE_BUS_TYPE_CSI2_DPHY = 4, V4L2_FWNODE_BUS_TYPE_PARALLEL = 5, V4L2_FWNODE_BUS_TYPE_BT656 = 6, V4L2_FWNODE_BUS_TYPE_DPI = 7, NR_OF_V4L2_FWNODE_BUS_TYPE = 8, }; enum v4l2_preemphasis { V4L2_PREEMPHASIS_DISABLED = 0, V4L2_PREEMPHASIS_50_uS = 1, V4L2_PREEMPHASIS_75_uS = 2, }; enum vc_ctl_state { ESnormal = 0, ESesc = 1, ESsquare = 2, ESgetpars = 3, ESfunckey = 4, EShash = 5, ESsetG0 = 6, ESsetG1 = 7, ESpercent = 8, EScsiignore = 9, ESnonstd = 10, ESpalette = 11, ESosc = 12, ESANSI_first = 12, ESapc = 13, ESpm = 14, ESdcs = 15, ESANSI_last = 15, }; enum vc_intensity { VCI_HALF_BRIGHT = 0, VCI_NORMAL = 1, VCI_BOLD = 2, VCI_MASK = 3, }; enum vdso_clock_mode { VDSO_CLOCKMODE_NONE = 0, VDSO_CLOCKMODE_TSC = 1, VDSO_CLOCKMODE_PVCLOCK = 2, VDSO_CLOCKMODE_HVCLOCK = 3, VDSO_CLOCKMODE_MAX = 4, VDSO_CLOCKMODE_TIMENS = 2147483647, }; enum verifier_phase { CHECK_META = 0, CHECK_TYPE = 1, }; enum vesa_blank_mode { VESA_NO_BLANKING = 0, VESA_VSYNC_SUSPEND = 1, VESA_HSYNC_SUSPEND = 2, VESA_POWERDOWN = 3, VESA_BLANK_MAX = 3, }; enum virtio_vsock_op { VIRTIO_VSOCK_OP_INVALID = 0, VIRTIO_VSOCK_OP_REQUEST = 1, VIRTIO_VSOCK_OP_RESPONSE = 2, VIRTIO_VSOCK_OP_RST = 3, VIRTIO_VSOCK_OP_SHUTDOWN = 4, VIRTIO_VSOCK_OP_RW = 5, VIRTIO_VSOCK_OP_CREDIT_UPDATE = 6, VIRTIO_VSOCK_OP_CREDIT_REQUEST = 7, }; enum virtio_vsock_rw { VIRTIO_VSOCK_SEQ_EOM = 1, VIRTIO_VSOCK_SEQ_EOR = 2, }; enum virtio_vsock_shutdown { VIRTIO_VSOCK_SHUTDOWN_RCV = 1, VIRTIO_VSOCK_SHUTDOWN_SEND = 2, }; enum virtio_vsock_type { VIRTIO_VSOCK_TYPE_STREAM = 1, VIRTIO_VSOCK_TYPE_SEQPACKET = 2, }; enum visit_state { NOT_VISITED = 0, VISITED = 1, RESOLVED = 2, }; enum vlan_flags { VLAN_FLAG_REORDER_HDR = 1, VLAN_FLAG_GVRP = 2, VLAN_FLAG_LOOSE_BINDING = 4, VLAN_FLAG_MVRP = 8, VLAN_FLAG_BRIDGE_BINDING = 16, }; enum vlan_ioctl_cmds { ADD_VLAN_CMD = 0, DEL_VLAN_CMD = 1, SET_VLAN_INGRESS_PRIORITY_CMD = 2, SET_VLAN_EGRESS_PRIORITY_CMD = 3, GET_VLAN_INGRESS_PRIORITY_CMD = 4, GET_VLAN_EGRESS_PRIORITY_CMD = 5, SET_VLAN_NAME_TYPE_CMD = 6, SET_VLAN_FLAG_CMD = 7, GET_VLAN_REALDEV_NAME_CMD = 8, GET_VLAN_VID_CMD = 9, }; enum vlan_name_types { VLAN_NAME_TYPE_PLUS_VID = 0, VLAN_NAME_TYPE_RAW_PLUS_VID = 1, VLAN_NAME_TYPE_PLUS_VID_NO_PAD = 2, VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD = 3, VLAN_NAME_TYPE_HIGHEST = 4, }; enum vlan_protos { VLAN_PROTO_8021Q = 0, VLAN_PROTO_8021AD = 1, VLAN_PROTO_NUM = 2, }; enum vm_event_item { PGPGIN = 0, PGPGOUT = 1, PSWPIN = 2, PSWPOUT = 3, PGALLOC_DMA = 4, PGALLOC_DMA32 = 5, PGALLOC_NORMAL = 6, PGALLOC_MOVABLE = 7, ALLOCSTALL_DMA = 8, ALLOCSTALL_DMA32 = 9, ALLOCSTALL_NORMAL = 10, ALLOCSTALL_MOVABLE = 11, PGSCAN_SKIP_DMA = 12, PGSCAN_SKIP_DMA32 = 13, PGSCAN_SKIP_NORMAL = 14, PGSCAN_SKIP_MOVABLE = 15, PGFREE = 16, PGACTIVATE = 17, PGDEACTIVATE = 18, PGLAZYFREE = 19, PGFAULT = 20, PGMAJFAULT = 21, PGLAZYFREED = 22, PGREFILL = 23, PGREUSE = 24, PGSTEAL_KSWAPD = 25, PGSTEAL_DIRECT = 26, PGSTEAL_KHUGEPAGED = 27, PGSCAN_KSWAPD = 28, PGSCAN_DIRECT = 29, PGSCAN_KHUGEPAGED = 30, PGSCAN_DIRECT_THROTTLE = 31, PGSCAN_ANON = 32, PGSCAN_FILE = 33, PGSTEAL_ANON = 34, PGSTEAL_FILE = 35, PGSCAN_ZONE_RECLAIM_SUCCESS = 36, PGSCAN_ZONE_RECLAIM_FAILED = 37, PGINODESTEAL = 38, SLABS_SCANNED = 39, KSWAPD_INODESTEAL = 40, KSWAPD_LOW_WMARK_HIT_QUICKLY = 41, KSWAPD_HIGH_WMARK_HIT_QUICKLY = 42, PAGEOUTRUN = 43, PGROTATED = 44, DROP_PAGECACHE = 45, DROP_SLAB = 46, OOM_KILL = 47, NUMA_PTE_UPDATES = 48, NUMA_HUGE_PTE_UPDATES = 49, NUMA_HINT_FAULTS = 50, NUMA_HINT_FAULTS_LOCAL = 51, NUMA_PAGE_MIGRATE = 52, PGMIGRATE_SUCCESS = 53, PGMIGRATE_FAIL = 54, THP_MIGRATION_SUCCESS = 55, THP_MIGRATION_FAIL = 56, THP_MIGRATION_SPLIT = 57, COMPACTMIGRATE_SCANNED = 58, COMPACTFREE_SCANNED = 59, COMPACTISOLATED = 60, COMPACTSTALL = 61, COMPACTFAIL = 62, COMPACTSUCCESS = 63, KCOMPACTD_WAKE = 64, KCOMPACTD_MIGRATE_SCANNED = 65, KCOMPACTD_FREE_SCANNED = 66, HTLB_BUDDY_PGALLOC = 67, HTLB_BUDDY_PGALLOC_FAIL = 68, CMA_ALLOC_SUCCESS = 69, CMA_ALLOC_FAIL = 70, UNEVICTABLE_PGCULLED = 71, UNEVICTABLE_PGSCANNED = 72, UNEVICTABLE_PGRESCUED = 73, UNEVICTABLE_PGMLOCKED = 74, UNEVICTABLE_PGMUNLOCKED = 75, UNEVICTABLE_PGCLEARED = 76, UNEVICTABLE_PGSTRANDED = 77, THP_FAULT_ALLOC = 78, THP_FAULT_FALLBACK = 79, THP_FAULT_FALLBACK_CHARGE = 80, THP_COLLAPSE_ALLOC = 81, THP_COLLAPSE_ALLOC_FAILED = 82, THP_FILE_ALLOC = 83, THP_FILE_FALLBACK = 84, THP_FILE_FALLBACK_CHARGE = 85, THP_FILE_MAPPED = 86, THP_SPLIT_PAGE = 87, THP_SPLIT_PAGE_FAILED = 88, THP_DEFERRED_SPLIT_PAGE = 89, THP_UNDERUSED_SPLIT_PAGE = 90, THP_SPLIT_PMD = 91, THP_SCAN_EXCEED_NONE_PTE = 92, THP_SCAN_EXCEED_SWAP_PTE = 93, THP_SCAN_EXCEED_SHARED_PTE = 94, THP_SPLIT_PUD = 95, THP_ZERO_PAGE_ALLOC = 96, THP_ZERO_PAGE_ALLOC_FAILED = 97, THP_SWPOUT = 98, THP_SWPOUT_FALLBACK = 99, SWAP_RA = 100, SWAP_RA_HIT = 101, KSM_SWPIN_COPY = 102, COW_KSM = 103, DIRECT_MAP_LEVEL2_SPLIT = 104, DIRECT_MAP_LEVEL3_SPLIT = 105, NR_VM_EVENT_ITEMS = 106, }; enum vm_fault_reason { VM_FAULT_OOM = 1, VM_FAULT_SIGBUS = 2, VM_FAULT_MAJOR = 4, VM_FAULT_HWPOISON = 16, VM_FAULT_HWPOISON_LARGE = 32, VM_FAULT_SIGSEGV = 64, VM_FAULT_NOPAGE = 256, VM_FAULT_LOCKED = 512, VM_FAULT_RETRY = 1024, VM_FAULT_FALLBACK = 2048, VM_FAULT_DONE_COW = 4096, VM_FAULT_NEEDDSYNC = 8192, VM_FAULT_COMPLETED = 16384, VM_FAULT_HINDEX_MASK = 983040, }; enum vm_stat_item { NR_DIRTY_THRESHOLD = 0, NR_DIRTY_BG_THRESHOLD = 1, NR_MEMMAP_PAGES = 2, NR_MEMMAP_BOOT_PAGES = 3, NR_VM_STAT_ITEMS = 4, }; enum vma_merge_state { VMA_MERGE_START = 0, VMA_MERGE_ERROR_NOMEM = 1, VMA_MERGE_NOMERGE = 2, VMA_MERGE_SUCCESS = 3, }; enum vma_resv_mode { VMA_NEEDS_RESV = 0, VMA_COMMIT_RESV = 1, VMA_END_RESV = 2, VMA_ADD_RESV = 3, VMA_DEL_RESV = 4, }; enum vmpressure_levels { VMPRESSURE_LOW = 0, VMPRESSURE_MEDIUM = 1, VMPRESSURE_CRITICAL = 2, VMPRESSURE_NUM_LEVELS = 3, }; enum vmpressure_modes { VMPRESSURE_NO_PASSTHROUGH = 0, VMPRESSURE_HIERARCHY = 1, VMPRESSURE_LOCAL = 2, VMPRESSURE_NUM_MODES = 3, }; enum vmscan_throttle_state { VMSCAN_THROTTLE_WRITEBACK = 0, VMSCAN_THROTTLE_ISOLATED = 1, VMSCAN_THROTTLE_NOPROGRESS = 2, VMSCAN_THROTTLE_CONGESTED = 3, NR_VMSCAN_THROTTLE = 4, }; enum vmx_feature_leafs { MISC_FEATURES = 0, PRIMARY_CTLS = 1, SECONDARY_CTLS = 2, TERTIARY_CTLS_LOW = 3, TERTIARY_CTLS_HIGH = 4, NR_VMX_FEATURE_WORDS = 5, }; enum vmx_l1d_flush_state { VMENTER_L1D_FLUSH_AUTO = 0, VMENTER_L1D_FLUSH_NEVER = 1, VMENTER_L1D_FLUSH_COND = 2, VMENTER_L1D_FLUSH_ALWAYS = 3, VMENTER_L1D_FLUSH_EPT_DISABLED = 4, VMENTER_L1D_FLUSH_NOT_REQUIRED = 5, }; enum wb_reason { WB_REASON_BACKGROUND = 0, WB_REASON_VMSCAN = 1, WB_REASON_SYNC = 2, WB_REASON_PERIODIC = 3, WB_REASON_LAPTOP_TIMER = 4, WB_REASON_FS_FREE_SPACE = 5, WB_REASON_FORKER_THREAD = 6, WB_REASON_FOREIGN_FLUSH = 7, WB_REASON_MAX = 8, }; enum wb_stat_item { WB_RECLAIMABLE = 0, WB_WRITEBACK = 1, WB_DIRTIED = 2, WB_WRITTEN = 3, NR_WB_STAT_ITEMS = 4, }; enum wb_state { WB_registered = 0, WB_writeback_running = 1, WB_has_dirty_io = 2, WB_start_all = 3, }; enum wd_read_status { WD_READ_SUCCESS = 0, WD_READ_UNSTABLE = 1, WD_READ_SKIP = 2, }; enum which_selector { FS = 0, GS = 1, }; enum work_bits { WORK_STRUCT_PENDING_BIT = 0, WORK_STRUCT_INACTIVE_BIT = 1, WORK_STRUCT_PWQ_BIT = 2, WORK_STRUCT_LINKED_BIT = 3, WORK_STRUCT_FLAG_BITS = 4, WORK_STRUCT_COLOR_SHIFT = 4, WORK_STRUCT_COLOR_BITS = 4, WORK_STRUCT_PWQ_SHIFT = 8, WORK_OFFQ_FLAG_SHIFT = 4, WORK_OFFQ_BH_BIT = 4, WORK_OFFQ_FLAG_END = 5, WORK_OFFQ_FLAG_BITS = 1, WORK_OFFQ_DISABLE_SHIFT = 5, WORK_OFFQ_DISABLE_BITS = 16, WORK_OFFQ_POOL_SHIFT = 21, WORK_OFFQ_LEFT = 43, WORK_OFFQ_POOL_BITS = 31, }; enum work_cancel_flags { WORK_CANCEL_DELAYED = 1, WORK_CANCEL_DISABLE = 2, }; enum work_flags { WORK_STRUCT_PENDING = 1, WORK_STRUCT_INACTIVE = 2, WORK_STRUCT_PWQ = 4, WORK_STRUCT_LINKED = 8, WORK_STRUCT_STATIC = 0, }; enum worker_flags { WORKER_DIE = 2, WORKER_IDLE = 4, WORKER_PREP = 8, WORKER_CPU_INTENSIVE = 64, WORKER_UNBOUND = 128, WORKER_REBOUND = 256, WORKER_NOT_RUNNING = 456, }; enum worker_pool_flags { POOL_BH = 1, POOL_MANAGER_ACTIVE = 2, POOL_DISASSOCIATED = 4, POOL_BH_DRAINING = 8, }; enum wq_affn_scope { WQ_AFFN_DFL = 0, WQ_AFFN_CPU = 1, WQ_AFFN_SMT = 2, WQ_AFFN_CACHE = 3, WQ_AFFN_NUMA = 4, WQ_AFFN_SYSTEM = 5, WQ_AFFN_NR_TYPES = 6, }; enum wq_consts { WQ_MAX_ACTIVE = 512, WQ_UNBOUND_MAX_ACTIVE = 512, WQ_DFL_ACTIVE = 256, WQ_DFL_MIN_ACTIVE = 8, }; enum wq_flags { WQ_BH = 1, WQ_UNBOUND = 2, WQ_FREEZABLE = 4, WQ_MEM_RECLAIM = 8, WQ_HIGHPRI = 16, WQ_CPU_INTENSIVE = 32, WQ_SYSFS = 64, WQ_POWER_EFFICIENT = 128, __WQ_DESTROYING = 32768, __WQ_DRAINING = 65536, __WQ_ORDERED = 131072, __WQ_LEGACY = 262144, __WQ_BH_ALLOWS = 17, }; enum wq_internal_consts { NR_STD_WORKER_POOLS = 2, UNBOUND_POOL_HASH_ORDER = 6, BUSY_WORKER_HASH_ORDER = 6, MAX_IDLE_WORKERS_RATIO = 4, IDLE_WORKER_TIMEOUT = 300000, MAYDAY_INITIAL_TIMEOUT = 10, MAYDAY_INTERVAL = 100, CREATE_COOLDOWN = 1000, RESCUER_NICE_LEVEL = -20, HIGHPRI_NICE_LEVEL = -20, WQ_NAME_LEN = 32, WORKER_ID_LEN = 42, }; enum wq_misc_consts { WORK_NR_COLORS = 16, WORK_CPU_UNBOUND = 128, WORK_BUSY_PENDING = 1, WORK_BUSY_RUNNING = 2, WORKER_DESC_LEN = 32, }; enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1, }; enum x509_actions { ACT_x509_extract_key_data = 0, ACT_x509_extract_name_segment = 1, ACT_x509_note_OID = 2, ACT_x509_note_issuer = 3, ACT_x509_note_not_after = 4, ACT_x509_note_not_before = 5, ACT_x509_note_params = 6, ACT_x509_note_serial = 7, ACT_x509_note_sig_algo = 8, ACT_x509_note_signature = 9, ACT_x509_note_subject = 10, ACT_x509_note_tbs_certificate = 11, ACT_x509_process_extension = 12, NR__x509_actions = 13, }; enum x509_akid_actions { ACT_x509_akid_note_kid = 0, ACT_x509_akid_note_name = 1, ACT_x509_akid_note_serial = 2, ACT_x509_extract_name_segment___2 = 3, ACT_x509_note_OID___2 = 4, NR__x509_akid_actions = 5, }; enum x86_hardware_subarch { X86_SUBARCH_PC = 0, X86_SUBARCH_LGUEST = 1, X86_SUBARCH_XEN = 2, X86_SUBARCH_INTEL_MID = 3, X86_SUBARCH_CE4100 = 4, X86_NR_SUBARCHS = 5, }; enum x86_hypervisor_type { X86_HYPER_NATIVE = 0, X86_HYPER_VMWARE = 1, X86_HYPER_MS_HYPERV = 2, X86_HYPER_XEN_PV = 3, X86_HYPER_XEN_HVM = 4, X86_HYPER_KVM = 5, X86_HYPER_JAILHOUSE = 6, X86_HYPER_ACRN = 7, }; enum x86_intercept_stage; enum x86_legacy_i8042_state { X86_LEGACY_I8042_PLATFORM_ABSENT = 0, X86_LEGACY_I8042_FIRMWARE_ABSENT = 1, X86_LEGACY_I8042_EXPECTED_PRESENT = 2, }; enum x86_pf_error_code { X86_PF_PROT = 1, X86_PF_WRITE = 2, X86_PF_USER = 4, X86_PF_RSVD = 8, X86_PF_INSTR = 16, X86_PF_PK = 32, X86_PF_SHSTK = 64, X86_PF_SGX = 32768, X86_PF_RMP = 2147483648, }; enum x86_regset_32 { REGSET32_GENERAL = 0, REGSET32_FP = 1, REGSET32_XFP = 2, REGSET32_XSTATE = 3, REGSET32_TLS = 4, REGSET32_IOPERM = 5, }; enum x86_regset_64 { REGSET64_GENERAL = 0, REGSET64_FP = 1, REGSET64_IOPERM = 2, REGSET64_XSTATE = 3, REGSET64_SSP = 4, }; enum x86_topology_domains { TOPO_SMT_DOMAIN = 0, TOPO_CORE_DOMAIN = 1, TOPO_MODULE_DOMAIN = 2, TOPO_TILE_DOMAIN = 3, TOPO_DIE_DOMAIN = 4, TOPO_DIEGRP_DOMAIN = 5, TOPO_PKG_DOMAIN = 6, TOPO_MAX_DOMAIN = 7, }; enum xa_lock_type { XA_LOCK_IRQ = 1, XA_LOCK_BH = 2, }; enum xdp_action { XDP_ABORTED = 0, XDP_DROP = 1, XDP_PASS = 2, XDP_TX = 3, XDP_REDIRECT = 4, }; enum xdp_buff_flags { XDP_FLAGS_HAS_FRAGS = 1, XDP_FLAGS_FRAGS_PF_MEMALLOC = 2, }; enum xdp_mem_type { MEM_TYPE_PAGE_SHARED = 0, MEM_TYPE_PAGE_ORDER0 = 1, MEM_TYPE_PAGE_POOL = 2, MEM_TYPE_XSK_BUFF_POOL = 3, MEM_TYPE_MAX = 4, }; enum xdp_rss_hash_type { XDP_RSS_L3_IPV4 = 1, XDP_RSS_L3_IPV6 = 2, XDP_RSS_L3_DYNHDR = 4, XDP_RSS_L4 = 8, XDP_RSS_L4_TCP = 16, XDP_RSS_L4_UDP = 32, XDP_RSS_L4_SCTP = 64, XDP_RSS_L4_IPSEC = 128, XDP_RSS_L4_ICMP = 256, XDP_RSS_TYPE_NONE = 0, XDP_RSS_TYPE_L2 = 0, XDP_RSS_TYPE_L3_IPV4 = 1, XDP_RSS_TYPE_L3_IPV6 = 2, XDP_RSS_TYPE_L3_IPV4_OPT = 5, XDP_RSS_TYPE_L3_IPV6_EX = 6, XDP_RSS_TYPE_L4_ANY = 8, XDP_RSS_TYPE_L4_IPV4_TCP = 25, XDP_RSS_TYPE_L4_IPV4_UDP = 41, XDP_RSS_TYPE_L4_IPV4_SCTP = 73, XDP_RSS_TYPE_L4_IPV4_IPSEC = 137, XDP_RSS_TYPE_L4_IPV4_ICMP = 265, XDP_RSS_TYPE_L4_IPV6_TCP = 26, XDP_RSS_TYPE_L4_IPV6_UDP = 42, XDP_RSS_TYPE_L4_IPV6_SCTP = 74, XDP_RSS_TYPE_L4_IPV6_IPSEC = 138, XDP_RSS_TYPE_L4_IPV6_ICMP = 266, XDP_RSS_TYPE_L4_IPV6_TCP_EX = 30, XDP_RSS_TYPE_L4_IPV6_UDP_EX = 46, XDP_RSS_TYPE_L4_IPV6_SCTP_EX = 78, }; enum xdp_rx_metadata { XDP_METADATA_KFUNC_RX_TIMESTAMP = 0, XDP_METADATA_KFUNC_RX_HASH = 1, XDP_METADATA_KFUNC_RX_VLAN_TAG = 2, MAX_XDP_METADATA_KFUNC = 3, }; enum xen_domain_type { XEN_NATIVE = 0, XEN_PV_DOMAIN = 1, XEN_HVM_DOMAIN = 2, }; enum xfeature { XFEATURE_FP = 0, XFEATURE_SSE = 1, XFEATURE_YMM = 2, XFEATURE_BNDREGS = 3, XFEATURE_BNDCSR = 4, XFEATURE_OPMASK = 5, XFEATURE_ZMM_Hi256 = 6, XFEATURE_Hi16_ZMM = 7, XFEATURE_PT_UNIMPLEMENTED_SO_FAR = 8, XFEATURE_PKRU = 9, XFEATURE_PASID = 10, XFEATURE_CET_USER = 11, XFEATURE_CET_KERNEL_UNUSED = 12, XFEATURE_RSRVD_COMP_13 = 13, XFEATURE_RSRVD_COMP_14 = 14, XFEATURE_LBR = 15, XFEATURE_RSRVD_COMP_16 = 16, XFEATURE_XTILE_CFG = 17, XFEATURE_XTILE_DATA = 18, XFEATURE_MAX = 19, }; enum xfrm_ae_ftype_t { XFRM_AE_UNSPEC = 0, XFRM_AE_RTHR = 1, XFRM_AE_RVAL = 2, XFRM_AE_LVAL = 4, XFRM_AE_ETHR = 8, XFRM_AE_CR = 16, XFRM_AE_CE = 32, XFRM_AE_CU = 64, __XFRM_AE_MAX = 65, }; enum xfrm_attr_type_t { XFRMA_UNSPEC = 0, XFRMA_ALG_AUTH = 1, XFRMA_ALG_CRYPT = 2, XFRMA_ALG_COMP = 3, XFRMA_ENCAP = 4, XFRMA_TMPL = 5, XFRMA_SA = 6, XFRMA_POLICY = 7, XFRMA_SEC_CTX = 8, XFRMA_LTIME_VAL = 9, XFRMA_REPLAY_VAL = 10, XFRMA_REPLAY_THRESH = 11, XFRMA_ETIMER_THRESH = 12, XFRMA_SRCADDR = 13, XFRMA_COADDR = 14, XFRMA_LASTUSED = 15, XFRMA_POLICY_TYPE = 16, XFRMA_MIGRATE = 17, XFRMA_ALG_AEAD = 18, XFRMA_KMADDRESS = 19, XFRMA_ALG_AUTH_TRUNC = 20, XFRMA_MARK = 21, XFRMA_TFCPAD = 22, XFRMA_REPLAY_ESN_VAL = 23, XFRMA_SA_EXTRA_FLAGS = 24, XFRMA_PROTO = 25, XFRMA_ADDRESS_FILTER = 26, XFRMA_PAD = 27, XFRMA_OFFLOAD_DEV = 28, XFRMA_SET_MARK = 29, XFRMA_SET_MARK_MASK = 30, XFRMA_IF_ID = 31, XFRMA_MTIMER_THRESH = 32, XFRMA_SA_DIR = 33, XFRMA_NAT_KEEPALIVE_INTERVAL = 34, __XFRMA_MAX = 35, }; enum xfrm_nlgroups { XFRMNLGRP_NONE = 0, XFRMNLGRP_ACQUIRE = 1, XFRMNLGRP_EXPIRE = 2, XFRMNLGRP_SA = 3, XFRMNLGRP_POLICY = 4, XFRMNLGRP_AEVENTS = 5, XFRMNLGRP_REPORT = 6, XFRMNLGRP_MIGRATE = 7, XFRMNLGRP_MAPPING = 8, __XFRMNLGRP_MAX = 9, }; enum xfrm_pol_inexact_candidate_type { XFRM_POL_CAND_BOTH = 0, XFRM_POL_CAND_SADDR = 1, XFRM_POL_CAND_DADDR = 2, XFRM_POL_CAND_ANY = 3, XFRM_POL_CAND_MAX = 4, }; enum xfrm_replay_mode { XFRM_REPLAY_MODE_LEGACY = 0, XFRM_REPLAY_MODE_BMP = 1, XFRM_REPLAY_MODE_ESN = 2, }; enum xfrm_sa_dir { XFRM_SA_DIR_IN = 1, XFRM_SA_DIR_OUT = 2, }; enum xfrm_sadattr_type_t { XFRMA_SAD_UNSPEC = 0, XFRMA_SAD_CNT = 1, XFRMA_SAD_HINFO = 2, __XFRMA_SAD_MAX = 3, }; enum xfrm_spdattr_type_t { XFRMA_SPD_UNSPEC = 0, XFRMA_SPD_INFO = 1, XFRMA_SPD_HINFO = 2, XFRMA_SPD_IPV4_HTHRESH = 3, XFRMA_SPD_IPV6_HTHRESH = 4, __XFRMA_SPD_MAX = 5, }; enum xps_map_type { XPS_CPUS = 0, XPS_RXQS = 1, XPS_MAPS_MAX = 2, }; enum xstate_copy_mode { XSTATE_COPY_FP = 0, XSTATE_COPY_FX = 1, XSTATE_COPY_XSAVE = 2, }; enum xt_bpf_modes { XT_BPF_MODE_BYTECODE = 0, XT_BPF_MODE_FD_PINNED = 1, XT_BPF_MODE_FD_ELF = 2, }; enum xt_statistic_flags { XT_STATISTIC_INVERT = 1, }; enum xt_statistic_mode { XT_STATISTIC_MODE_RANDOM = 0, XT_STATISTIC_MODE_NTH = 1, __XT_STATISTIC_MODE_MAX = 2, }; enum xz_check { XZ_CHECK_NONE = 0, XZ_CHECK_CRC32 = 1, XZ_CHECK_CRC64 = 4, XZ_CHECK_SHA256 = 10, }; enum xz_mode { XZ_SINGLE = 0, XZ_PREALLOC = 1, XZ_DYNALLOC = 2, }; enum xz_ret { XZ_OK = 0, XZ_STREAM_END = 1, XZ_UNSUPPORTED_CHECK = 2, XZ_MEM_ERROR = 3, XZ_MEMLIMIT_ERROR = 4, XZ_FORMAT_ERROR = 5, XZ_OPTIONS_ERROR = 6, XZ_DATA_ERROR = 7, XZ_BUF_ERROR = 8, }; enum zone_flags { ZONE_BOOSTED_WATERMARK = 0, ZONE_RECLAIM_ACTIVE = 1, ZONE_BELOW_HIGH = 2, }; enum zone_stat_item { NR_FREE_PAGES = 0, NR_ZONE_LRU_BASE = 1, NR_ZONE_INACTIVE_ANON = 1, NR_ZONE_ACTIVE_ANON = 2, NR_ZONE_INACTIVE_FILE = 3, NR_ZONE_ACTIVE_FILE = 4, NR_ZONE_UNEVICTABLE = 5, NR_ZONE_WRITE_PENDING = 6, NR_MLOCK = 7, NR_BOUNCE = 8, NR_FREE_CMA_PAGES = 9, NR_VM_ZONE_STAT_ITEMS = 10, }; enum zone_type { ZONE_DMA = 0, ZONE_DMA32 = 1, ZONE_NORMAL = 2, ZONE_MOVABLE = 3, __MAX_NR_ZONES = 4, }; enum zone_watermarks { WMARK_MIN = 0, WMARK_LOW = 1, WMARK_HIGH = 2, WMARK_PROMO = 3, NR_WMARK = 4, }; typedef _Bool bool; typedef __int128 unsigned __u128; typedef __u128 u128; typedef u128 freelist_full_t; typedef char acpi_bus_id[8]; typedef char acpi_device_class[20]; typedef char acpi_device_name[40]; typedef char *acpi_string; typedef const char (* const ethnl_string_array_t)[32]; typedef int __kernel_clockid_t; typedef int __kernel_daddr_t; typedef int __kernel_ipc_pid_t; typedef int __kernel_key_t; typedef int __kernel_mqd_t; typedef int __kernel_pid_t; typedef int __kernel_rwf_t; typedef int __kernel_timer_t; typedef int __s32; typedef int class_get_unused_fd_t; typedef __kernel_clockid_t clockid_t; typedef __s32 s32; typedef s32 codel_tdiff_t; typedef s32 compat_int_t; typedef s32 compat_ssize_t; typedef int cydp_t; typedef s32 dma_cookie_t; typedef int ext4_grpblk_t; typedef int folio_walk_flags_t; typedef int fpb_t; typedef int fpi_t; typedef int initcall_entry_t; typedef int insn_value_t; typedef s32 int32_t; typedef int32_t key_serial_t; typedef __kernel_key_t key_t; typedef int mpi_size_t; typedef __kernel_mqd_t mqd_t; typedef s32 old_time32_t; typedef int pci_power_t; typedef __kernel_pid_t pid_t; typedef int rmap_t; typedef __kernel_rwf_t rwf_t; typedef __s32 sctp_assoc_t; typedef int suspend_state_t; typedef __kernel_timer_t timer_t; typedef const int tracepoint_ptr_t; typedef long int __kernel_long_t; typedef __kernel_long_t __kernel_clock_t; typedef __kernel_long_t __kernel_off_t; typedef __kernel_long_t __kernel_old_time_t; typedef __kernel_long_t __kernel_ptrdiff_t; typedef __kernel_long_t __kernel_ssize_t; typedef __kernel_long_t __kernel_suseconds_t; typedef __kernel_clock_t clock_t; typedef long int intptr_t; typedef long int mpi_limb_signed_t; typedef __kernel_off_t off_t; typedef __kernel_ptrdiff_t ptrdiff_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_suseconds_t suseconds_t; typedef long long int __s64; typedef __s64 Elf64_Sxword; typedef long long int __kernel_loff_t; typedef long long int __kernel_time64_t; typedef __s64 s64; typedef s64 int64_t; typedef s64 ktime_t; typedef __kernel_loff_t loff_t; typedef long long int qsize_t; typedef __s64 time64_t; typedef long long unsigned int __u64; typedef __u64 Elf64_Addr; typedef __u64 Elf64_Off; typedef __u64 Elf64_Xword; typedef __u64 u64; typedef u64 uint64_t; typedef uint64_t U64; typedef __u64 __addrpair; typedef __u64 __be64; typedef __u64 __le64; typedef u64 acpi_bus_address; typedef u64 acpi_integer; typedef u64 acpi_io_address; typedef u64 acpi_physical_address; typedef u64 acpi_size; typedef u64 async_cookie_t; typedef u64 blkcnt_t; typedef long long unsigned int cycles_t; typedef u64 dma_addr_t; typedef long long unsigned int ext4_fsblk_t; typedef u64 gfn_t; typedef u64 gpa_t; typedef u64 hfn_t; typedef u64 hpa_t; typedef u64 io_req_flags_t; typedef hfn_t kvm_pfn_t; typedef u64 netdev_features_t; typedef u64 pci_bus_addr_t; typedef u64 phys_addr_t; typedef phys_addr_t resource_size_t; typedef u64 sci_t; typedef u64 sector_t; typedef __u64 timeu64_t; typedef u64 u_int64_t; typedef u64 upf_t; typedef uint64_t vli_type; typedef long unsigned int mpi_limb_t; typedef mpi_limb_t UWtype; typedef long unsigned int __kernel_old_dev_t; typedef long unsigned int __kernel_ulong_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_ulong_t aio_context_t; typedef long unsigned int dax_entry_t; typedef long unsigned int efi_status_t; typedef long unsigned int elf_greg_t; typedef elf_greg_t elf_gregset_t[27]; typedef long unsigned int gva_t; typedef __kernel_ulong_t ino_t; typedef long unsigned int irq_hw_number_t; typedef long unsigned int kernel_ulong_t; typedef long unsigned int kimage_entry_t; typedef long unsigned int mce_banks_t[1]; typedef mpi_limb_t *mpi_ptr_t; typedef long unsigned int netmem_ref; typedef long unsigned int old_sigset_t; typedef long unsigned int p4dval_t; typedef long unsigned int perf_trace_t[1024]; typedef long unsigned int pgdval_t; typedef long unsigned int pgprotval_t; typedef long unsigned int pmdval_t; typedef long unsigned int pte_marker; typedef long unsigned int pteval_t; typedef long unsigned int pudval_t; typedef __kernel_size_t size_t; typedef long unsigned int uLong; typedef long unsigned int uintptr_t; typedef long unsigned int ulong; typedef uintptr_t uptrval; typedef long unsigned int vm_flags_t; typedef short int __s16; typedef __s16 s16; typedef s16 int16_t; typedef int16_t S16; typedef short unsigned int __u16; typedef __u16 Elf32_Half; typedef __u16 Elf64_Half; typedef __u16 u16; typedef u16 uint16_t; typedef uint16_t U16; typedef __u16 __be16; typedef short unsigned int __kernel_gid16_t; typedef short unsigned int __kernel_sa_family_t; typedef short unsigned int __kernel_uid16_t; typedef __u16 __le16; typedef __u16 __sum16; typedef __u16 __virtio16; typedef u16 acpi_owner_id; typedef u16 acpi_rs_length; typedef __u16 comp_t; typedef u16 efi_char16_t; typedef __kernel_gid16_t gid16_t; typedef short unsigned int pci_bus_flags_t; typedef short unsigned int pci_dev_flags_t; typedef __kernel_sa_family_t sa_family_t; typedef u16 u_int16_t; typedef short unsigned int u_short; typedef u16 ucs2_char_t; typedef __kernel_uid16_t uid16_t; typedef short unsigned int umode_t; typedef short unsigned int ushort; typedef short unsigned int vifi_t; typedef u16 wchar_t; typedef signed char __s8; typedef __s8 s8; typedef unsigned char __u8; typedef __u8 u8; typedef u8 uint8_t; typedef uint8_t BYTE; typedef unsigned char Byte; typedef uint8_t U8; typedef u8 acpi_adr_space_type; typedef u8 blk_status_t; typedef unsigned char cc_t; typedef u8 dscp_t; typedef u8 efi_bool_t; typedef unsigned char insn_byte_t; typedef u8 kprobe_opcode_t; typedef __u8 mtrr_type; typedef u8 retpoline_thunk_t[32]; typedef u8 rmap_age_t; typedef unsigned char u8___2; typedef unsigned char u_char; typedef u8 u_int8_t; typedef u8 uprobe_opcode_t; typedef unsigned int __u32; typedef __u32 Elf32_Addr; typedef __u32 Elf32_Off; typedef __u32 Elf32_Word; typedef __u32 Elf64_Word; typedef unsigned int FSE_DTable; typedef __u32 u32; typedef u32 uint32_t; typedef uint32_t U32; typedef U32 HUF_DTable; typedef unsigned int UHWtype; typedef __u32 __be32; typedef u32 __kernel_dev_t; typedef unsigned int __kernel_gid32_t; typedef unsigned int __kernel_gid_t; typedef unsigned int __kernel_mode_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_uid_t; typedef __u32 __le32; typedef unsigned int __poll_t; typedef __u32 __portpair; typedef __u32 __wsum; typedef u32 acpi_event_status; typedef u32 acpi_mutex_handle; typedef u32 acpi_name; typedef u32 acpi_object_type; typedef u32 acpi_rsdesc_size; typedef u32 acpi_status; typedef unsigned int blk_features_t; typedef unsigned int blk_flags_t; typedef unsigned int blk_insert_t; typedef unsigned int blk_mode_t; typedef __u32 blk_mq_req_flags_t; typedef __u32 blk_opf_t; typedef unsigned int blk_qc_t; typedef u32 codel_time_t; typedef __u32 comp2_t; typedef u32 compat_caddr_t; typedef u32 compat_size_t; typedef u32 compat_uint_t; typedef u32 compat_ulong_t; typedef u32 compat_uptr_t; typedef u32 depot_flags_t; typedef u32 depot_stack_handle_t; typedef __kernel_dev_t dev_t; typedef uint32_t drbg_flag_t; typedef u32 errseq_t; typedef unsigned int ext4_group_t; typedef __u32 ext4_lblk_t; typedef unsigned int fgf_t; typedef unsigned int fmode_t; typedef unsigned int fop_flags_t; typedef unsigned int gfp_t; typedef __kernel_gid32_t gid_t; typedef unsigned int insn_attr_t; typedef unsigned int ioasid_t; typedef unsigned int iov_iter_extraction_t; typedef unsigned int isolate_mode_t; typedef unsigned int kasan_vmalloc_flags_t; typedef uint32_t key_perm_t; typedef __kernel_mode_t mode_t; typedef u32 nlink_t; typedef u32 note_buf_t[92]; typedef unsigned int pci_channel_state_t; typedef unsigned int pci_ers_result_t; typedef unsigned int pcie_reset_state_t; typedef unsigned int pgtbl_mod_mask; typedef u32 phandle; typedef u32 phys_cpuid_t; typedef __kernel_uid32_t projid_t; typedef U32 rankValCol_t[13]; typedef __u32 req_flags_t; typedef unsigned int sk_buff_data_t; typedef unsigned int slab_flags_t; typedef unsigned int speed_t; typedef unsigned int t_key; typedef unsigned int tcflag_t; typedef unsigned int tid_t; typedef unsigned int uInt; typedef unsigned int u_int; typedef u32 u_int32_t; typedef unsigned int uffd_flags_t; typedef __kernel_uid32_t uid_t; typedef unsigned int uint; typedef u32 unicode_t; typedef unsigned int upstat_t; typedef unsigned int vm_fault_t; typedef unsigned int xa_mark_t; typedef u32 xdp_features_t; typedef unsigned int zap_flags_t; typedef struct { size_t bitContainer; unsigned int bitsConsumed; const char *ptr; const char *start; const char *limitPtr; } BIT_DStream_t; typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; typedef struct { size_t state; const void *table; } FSE_DState_t; typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; typedef struct { short int ncount[256]; FSE_DTable dtable[0]; } FSE_DecompressWksp; typedef struct { short unsigned int newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; typedef struct { U32 rankVal[13]; U32 rankStart[13]; U32 statsWksp[218]; BYTE symbols[256]; BYTE huffWeight[256]; } HUF_ReadDTableX1_Workspace; typedef struct { BYTE symbol; } sortedSymbol_t; typedef struct { U32 rankVal[156]; U32 rankStats[13]; U32 rankStart0[15]; sortedSymbol_t sortedSymbol[256]; BYTE weightList[256]; U32 calleeWksp[218]; } HUF_ReadDTableX2_Workspace; struct buffer_head; typedef struct { __le32 *p; __le32 key; struct buffer_head *bh; } Indirect; typedef struct { const uint8_t *externalDict; size_t extDictSize; const uint8_t *prefixEnd; size_t prefixSize; } LZ4_streamDecode_t_internal; typedef union { long long unsigned int table[4]; LZ4_streamDecode_t_internal internal_donotuse; } LZ4_streamDecode_t; struct list_head { struct list_head *next; struct list_head *prev; }; typedef struct { int counter; } atomic_t; struct refcount_struct { atomic_t refs; }; typedef struct refcount_struct refcount_t; struct dentry; struct file; typedef struct { struct list_head list; long unsigned int flags; int offset; int size; char *magic; char *mask; const char *interpreter; char *name; struct dentry *dentry; struct file *interp_file; refcount_t users; } Node; struct folio; typedef struct { struct folio *v; } Sector; struct ZSTD_DDict_s; typedef struct ZSTD_DDict_s ZSTD_DDict; typedef struct { const ZSTD_DDict **ddictPtrTable; size_t ddictPtrTableSize; size_t ddictPtrCount; } ZSTD_DDictHashSet; typedef struct { size_t error; int lowerBound; int upperBound; } ZSTD_bounds; typedef struct { U32 f1c; U32 f1d; U32 f7b; U32 f7c; } ZSTD_cpuid_t; typedef void * (*ZSTD_allocFunction)(void *, size_t); typedef void (*ZSTD_freeFunction)(void *, void *); typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void *opaque; } ZSTD_customMem; typedef struct { U16 nextState; BYTE nbAdditionalBits; BYTE nbBits; U32 baseValue; } ZSTD_seqSymbol; typedef struct { ZSTD_seqSymbol LLTable[513]; ZSTD_seqSymbol OFTable[257]; ZSTD_seqSymbol MLTable[513]; HUF_DTable hufTable[4097]; U32 rep[3]; U32 workspace[157]; } ZSTD_entropyDTables_t; typedef struct { long long unsigned int frameContentSize; long long unsigned int windowSize; unsigned int blockSizeMax; ZSTD_frameType_e frameType; unsigned int headerSize; unsigned int dictID; unsigned int checksumFlag; } ZSTD_frameHeader; typedef struct { size_t compressedSize; long long unsigned int decompressedBound; } ZSTD_frameSizeInfo; typedef struct { size_t state; const ZSTD_seqSymbol *table; } ZSTD_fseState; typedef struct { U32 fastMode; U32 tableLog; } ZSTD_seqSymbol_header; typedef struct { long unsigned int fds_bits[16]; } __kernel_fd_set; typedef struct { int val[2]; } __kernel_fsid_t; typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; typedef struct { s64 counter; } atomic64_t; typedef atomic64_t atomic_long_t; typedef struct { __be64 a; __be64 b; } be128; typedef struct { blockType_e blockType; U32 lastBlock; U32 origSize; } blockProperties_t; typedef struct { union { void *kernel; void *user; }; bool is_kernel: 1; } sockptr_t; typedef sockptr_t bpfptr_t; struct permanent_flags_t { __be16 tag; u8 disable; u8 ownership; u8 deactivated; u8 readPubek; u8 disableOwnerClear; u8 allowMaintenance; u8 physicalPresenceLifetimeLock; u8 physicalPresenceHWEnable; u8 physicalPresenceCMDEnable; u8 CEKPUsed; u8 TPMpost; u8 TPMpostLock; u8 FIPS; u8 operator; u8 enableRevokeEK; u8 nvLocked; u8 readSRKPub; u8 tpmEstablished; u8 maintenanceDone; u8 disableFullDALogicInfo; }; struct stclear_flags_t { __be16 tag; u8 deactivated; u8 disableForceClear; u8 physicalPresence; u8 physicalPresenceLock; u8 bGlobalLock; } __attribute__((packed)); struct tpm1_version { u8 major; u8 minor; u8 rev_major; u8 rev_minor; }; struct tpm1_version2 { __be16 tag; struct tpm1_version version; }; struct timeout_t { __be32 a; __be32 b; __be32 c; __be32 d; }; struct duration_t { __be32 tpm_short; __be32 tpm_medium; __be32 tpm_long; }; typedef union { struct permanent_flags_t perm_flags; struct stclear_flags_t stclear_flags; __u8 owned; __be32 num_pcrs; struct tpm1_version version1; struct tpm1_version2 version2; __be32 manufacturer_id; struct timeout_t timeout; struct duration_t duration; } cap_t; typedef struct { unsigned int interval; unsigned int timeout; } cisco_proto; typedef struct { void *lock; } class_cpus_read_lock_t; struct raw_spinlock; typedef struct raw_spinlock raw_spinlock_t; typedef struct { raw_spinlock_t *lock; raw_spinlock_t *lock2; } class_double_raw_spinlock_t; struct rq; typedef struct { struct rq *lock; struct rq *lock2; } class_double_rq_lock_t; typedef struct { void *lock; long unsigned int flags; } class_irqsave_t; typedef struct { void *lock; } class_preempt_t; typedef struct { raw_spinlock_t *lock; } class_raw_spinlock_irq_t; typedef struct { raw_spinlock_t *lock; long unsigned int flags; } class_raw_spinlock_irqsave_t; typedef struct { raw_spinlock_t *lock; } class_raw_spinlock_t; typedef struct { void *lock; } class_rcu_t; struct qspinlock { union { atomic_t val; struct { u8 locked; u8 pending; }; struct { u16 locked_pending; u16 tail; }; }; }; typedef struct qspinlock arch_spinlock_t; struct qrwlock { union { atomic_t cnts; struct { u8 wlocked; u8 __lstate[3]; }; }; arch_spinlock_t wait_lock; }; typedef struct qrwlock arch_rwlock_t; struct lock_class_key; struct lock_class; struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2]; const char *name; u8 wait_type_outer; u8 wait_type_inner; u8 lock_type; }; typedef struct { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } rwlock_t; typedef struct { rwlock_t *lock; } class_read_lock_t; struct pin_cookie { unsigned int val; }; struct rq_flags { long unsigned int flags; struct pin_cookie cookie; unsigned int clock_update_flags; }; typedef struct { struct rq *lock; struct rq_flags rf; } class_rq_lock_irq_t; typedef struct { struct rq *lock; struct rq_flags rf; } class_rq_lock_irqsave_t; typedef struct { struct rq *lock; struct rq_flags rf; } class_rq_lock_t; struct spinlock; typedef struct spinlock spinlock_t; typedef struct { spinlock_t *lock; } class_spinlock_irq_t; typedef struct { spinlock_t *lock; long unsigned int flags; } class_spinlock_irqsave_t; typedef struct { spinlock_t *lock; } class_spinlock_t; struct task_struct; typedef struct { struct task_struct *lock; struct rq *rq; struct rq_flags rf; } class_task_rq_lock_t; typedef struct { rwlock_t *lock; } class_write_lock_irq_t; typedef struct { rwlock_t *lock; } class_write_lock_t; typedef struct { unsigned char op; unsigned char bits; short unsigned int val; } code; typedef struct { long unsigned int bits[1]; } dma_cap_mask_t; typedef struct { __u8 b[16]; } guid_t; typedef guid_t efi_guid_t; typedef struct { efi_guid_t guid; u32 headersize; u32 flags; u32 imagesize; } efi_capsule_header_t; typedef struct { efi_guid_t guid; u32 table; } efi_config_table_32_t; typedef struct { efi_guid_t guid; u64 table; } efi_config_table_64_t; typedef union { struct { efi_guid_t guid; void *table; }; efi_config_table_32_t mixed_mode; } efi_config_table_t; typedef struct { efi_guid_t guid; long unsigned int *ptr; const char name[16]; } efi_config_table_type_t; typedef struct { u32 type; u32 pad; u64 phys_addr; u64 virt_addr; u64 num_pages; u64 attribute; } efi_memory_desc_t; typedef struct { u32 version; u32 num_entries; u32 desc_size; u32 flags; efi_memory_desc_t entry[0]; } efi_memory_attributes_table_t; typedef struct { u32 version; u32 length; u64 memory_protection_attribute; } efi_properties_table_t; typedef struct { u16 version; u16 length; u32 runtime_services_supported; } efi_rt_properties_table_t; typedef struct { u64 signature; u32 revision; u32 headersize; u32 crc32; u32 reserved; } efi_table_hdr_t; typedef struct { efi_table_hdr_t hdr; u32 get_time; u32 set_time; u32 get_wakeup_time; u32 set_wakeup_time; u32 set_virtual_address_map; u32 convert_pointer; u32 get_variable; u32 get_next_variable; u32 set_variable; u32 get_next_high_mono_count; u32 reset_system; u32 update_capsule; u32 query_capsule_caps; u32 query_variable_info; } efi_runtime_services_32_t; typedef struct { u16 year; u8 month; u8 day; u8 hour; u8 minute; u8 second; u8 pad1; u32 nanosecond; s16 timezone; u8 daylight; u8 pad2; } efi_time_t; typedef struct { u32 resolution; u32 accuracy; u8 sets_to_zero; } efi_time_cap_t; typedef union { struct { efi_table_hdr_t hdr; efi_status_t (*get_time)(efi_time_t *, efi_time_cap_t *); efi_status_t (*set_time)(efi_time_t *); efi_status_t (*get_wakeup_time)(efi_bool_t *, efi_bool_t *, efi_time_t *); efi_status_t (*set_wakeup_time)(efi_bool_t, efi_time_t *); efi_status_t (*set_virtual_address_map)(long unsigned int, long unsigned int, u32, efi_memory_desc_t *); void *convert_pointer; efi_status_t (*get_variable)(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *); efi_status_t (*get_next_variable)(long unsigned int *, efi_char16_t *, efi_guid_t *); efi_status_t (*set_variable)(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *); efi_status_t (*get_next_high_mono_count)(u32 *); void (*reset_system)(int, efi_status_t, long unsigned int, efi_char16_t *); efi_status_t (*update_capsule)(efi_capsule_header_t **, long unsigned int, long unsigned int); efi_status_t (*query_capsule_caps)(efi_capsule_header_t **, long unsigned int, u64 *, int *); efi_status_t (*query_variable_info)(u32, u64 *, u64 *, u64 *); }; efi_runtime_services_32_t mixed_mode; } efi_runtime_services_t; typedef struct { efi_table_hdr_t hdr; u32 fw_vendor; u32 fw_revision; u32 con_in_handle; u32 con_in; u32 con_out_handle; u32 con_out; u32 stderr_handle; u32 stderr; u32 runtime; u32 boottime; u32 nr_tables; u32 tables; } efi_system_table_32_t; typedef struct { efi_table_hdr_t hdr; u64 fw_vendor; u32 fw_revision; u32 __pad1; u64 con_in_handle; u64 con_in; u64 con_out_handle; u64 con_out; u64 stderr_handle; u64 stderr; u64 runtime; u64 boottime; u32 nr_tables; u32 __pad2; u64 tables; } efi_system_table_64_t; union efi_simple_text_input_protocol; typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t; union efi_simple_text_output_protocol; typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; union efi_boot_services; typedef union efi_boot_services efi_boot_services_t; typedef union { struct { efi_table_hdr_t hdr; long unsigned int fw_vendor; u32 fw_revision; long unsigned int con_in_handle; efi_simple_text_input_protocol_t *con_in; long unsigned int con_out_handle; efi_simple_text_output_protocol_t *con_out; long unsigned int stderr_handle; long unsigned int stderr; efi_runtime_services_t *runtime; efi_boot_services_t *boottime; long unsigned int nr_tables; long unsigned int tables; }; efi_system_table_32_t mixed_mode; } efi_system_table_t; typedef struct { __le16 e_tag; __le16 e_perm; __le32 e_id; } ext4_acl_entry; typedef struct { __le32 a_version; } ext4_acl_header; typedef __kernel_fd_set fd_set; typedef struct { long unsigned int *in; long unsigned int *out; long unsigned int *ex; long unsigned int *res_in; long unsigned int *res_out; long unsigned int *res_ex; } fd_set_bits; typedef struct { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; short unsigned int lmi; short unsigned int dce; } fr_proto; typedef struct { unsigned int dlci; } fr_proto_pvc; typedef struct { unsigned int dlci; char master[16]; } fr_proto_pvc_info; typedef union { struct { void *freelist; long unsigned int counter; }; freelist_full_t full; } freelist_aba_t; typedef struct { long unsigned int v; } freeptr_t; typedef struct { long unsigned int key[2]; } hsiphash_key_t; typedef struct { unsigned int __nmi_count; unsigned int apic_timer_irqs; unsigned int irq_spurious_count; unsigned int icr_read_retry_count; unsigned int x86_platform_ipis; unsigned int apic_perf_irqs; unsigned int apic_irq_work_irqs; unsigned int irq_resched_count; unsigned int irq_call_count; unsigned int irq_tlb_count; unsigned int irq_thermal_count; unsigned int irq_threshold_count; unsigned int irq_deferred_error_count; long: 64; } irq_cpustat_t; typedef struct { u64 val; } kernel_cap_t; typedef struct { gid_t val; } kgid_t; typedef struct { projid_t val; } kprojid_t; typedef struct { uid_t val; } kuid_t; typedef struct { __le64 b; __le64 a; } le128; typedef struct { atomic_long_t a; } local_t; typedef struct { local_t a; } local64_t; typedef struct { struct lockdep_map dep_map; struct task_struct *owner; } local_lock_t; struct optimistic_spin_queue { atomic_t tail; }; struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; }; struct rw_semaphore { atomic_long_t count; atomic_long_t owner; struct optimistic_spin_queue osq; raw_spinlock_t wait_lock; struct list_head wait_list; void *magic; struct lockdep_map dep_map; }; struct mutex { atomic_long_t owner; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct list_head wait_list; void *magic; struct lockdep_map dep_map; }; struct ldt_struct; struct vdso_image; typedef struct { u64 ctx_id; atomic64_t tlb_gen; struct rw_semaphore ldt_usr_sem; struct ldt_struct *ldt; long unsigned int flags; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; } mm_context_t; typedef struct {} netdevice_tracker; typedef struct {} netns_tracker; typedef struct { long unsigned int bits[1]; } nodemask_t; typedef struct { p4dval_t p4d; } p4d_t; typedef struct { u64 pme; } pagemap_entry_t; typedef struct { u64 val; } pfn_t; typedef struct { pgdval_t pgd; } pgd_t; typedef struct { pmdval_t pmd; } pmd_t; typedef struct { long unsigned int bits[4]; } pnp_irq_mask_t; struct net; typedef struct { struct net *net; } possible_net_t; typedef struct { pteval_t pte; } pte_t; typedef struct { pudval_t pud; } pud_t; typedef struct { short unsigned int encoding; short unsigned int parity; } raw_hdlc_proto; typedef struct { atomic_t refcnt; } rcuref_t; typedef struct { size_t written; size_t count; union { char *buf; void *data; } arg; int error; } read_descriptor_t; typedef union { } release_pages_arg; typedef struct { BIT_DStream_t DStream; ZSTD_fseState stateLL; ZSTD_fseState stateOffb; ZSTD_fseState stateML; size_t prevOffset[3]; } seqState_t; typedef struct { size_t litLength; size_t matchLength; size_t offset; } seq_t; struct seqcount { unsigned int sequence; struct lockdep_map dep_map; }; typedef struct seqcount seqcount_t; typedef struct { seqcount_t seqcount; } seqcount_latch_t; struct seqcount_spinlock { seqcount_t seqcount; spinlock_t *lock; }; typedef struct seqcount_spinlock seqcount_spinlock_t; struct spinlock { union { struct raw_spinlock rlock; struct { u8 __padding[24]; struct lockdep_map dep_map; }; }; }; typedef struct { seqcount_spinlock_t seqcount; spinlock_t lock; } seqlock_t; typedef struct { long unsigned int sig[1]; } sigset_t; typedef struct { u64 key[2]; } siphash_key_t; struct wait_queue_head { spinlock_t lock; struct list_head head; }; typedef struct wait_queue_head wait_queue_head_t; typedef struct { spinlock_t slock; int owned; wait_queue_head_t wq; struct lockdep_map dep_map; } socket_lock_t; typedef struct { char *from; char *to; } substring_t; typedef struct { long unsigned int val; } swp_entry_t; typedef struct { unsigned int clock_rate; unsigned int clock_type; short unsigned int loopback; } sync_serial_settings; typedef struct { unsigned int clock_rate; unsigned int clock_type; short unsigned int loopback; unsigned int slot_map; } te1_settings; struct mm_struct; typedef struct { struct mm_struct *mm; } temp_mm_state_t; typedef struct { local64_t v; } u64_stats_t; typedef struct { u64 m_low; u64 m_high; } uint128_t; typedef struct { __u8 b[16]; } uuid_le; typedef struct { __u8 b[16]; } uuid_t; typedef struct { gid_t val; } vfsgid_t; typedef struct { uid_t val; } vfsuid_t; typedef struct { short unsigned int dce; unsigned int modulo; unsigned int window; unsigned int t1; unsigned int t2; unsigned int n2; } x25_hdlc_proto; struct in6_addr { union { __u8 u6_addr8[16]; __be16 u6_addr16[8]; __be32 u6_addr32[4]; } in6_u; }; typedef union { __be32 a4; __be32 a6[4]; struct in6_addr in6; } xfrm_address_t; typedef ZSTD_customMem zstd_custom_mem; typedef ZSTD_frameHeader zstd_frame_header; union IO_APIC_reg_00 { u32 raw; struct { u32 __reserved_2: 14; u32 LTS: 1; u32 delivery_type: 1; u32 __reserved_1: 8; u32 ID: 8; } bits; }; union IO_APIC_reg_01 { u32 raw; struct { u32 version: 8; u32 __reserved_2: 7; u32 PRQ: 1; u32 entries: 8; u32 __reserved_1: 8; } bits; }; union IO_APIC_reg_02 { u32 raw; struct { u32 __reserved_2: 24; u32 arbitration: 4; u32 __reserved_1: 4; } bits; }; union IO_APIC_reg_03 { u32 raw; struct { u32 boot_DT: 1; u32 __reserved_1: 31; } bits; }; struct IO_APIC_route_entry { union { struct { u64 vector: 8; u64 delivery_mode: 3; u64 dest_mode_logical: 1; u64 delivery_status: 1; u64 active_low: 1; u64 irr: 1; u64 is_level: 1; u64 masked: 1; u64 reserved_0: 15; u64 reserved_1: 17; u64 virt_destid_8_14: 7; u64 destid_0_7: 8; }; struct { u64 ir_shared_0: 8; u64 ir_zero: 3; u64 ir_index_15: 1; u64 ir_shared_1: 5; u64 ir_reserved_0: 31; u64 ir_format: 1; u64 ir_index_0_14: 15; }; struct { u64 w1: 32; u64 w2: 32; }; }; }; struct PartitionBlock { __be32 pb_ID; __be32 pb_SummedLongs; __be32 pb_ChkSum; __be32 pb_HostID; __be32 pb_Next; __be32 pb_Flags; __be32 pb_Reserved1[2]; __be32 pb_DevFlags; __u8 pb_DriveName[32]; __be32 pb_Reserved2[15]; __be32 pb_Environment[17]; __be32 pb_EReserved[15]; }; struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; }; struct sk_buff; struct sk_buff_list { struct sk_buff *next; struct sk_buff *prev; }; struct sk_buff_head { union { struct { struct sk_buff *next; struct sk_buff *prev; }; struct sk_buff_list list; }; __u32 qlen; spinlock_t lock; }; struct qdisc_skb_head { struct sk_buff *head; struct sk_buff *tail; __u32 qlen; spinlock_t lock; }; struct u64_stats_sync {}; struct gnet_stats_basic_sync { u64_stats_t bytes; u64_stats_t packets; struct u64_stats_sync syncp; }; struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; }; struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); }; struct lockdep_subclass_key { char __one_byte; }; struct lock_class_key { union { struct hlist_node hash_entry; struct lockdep_subclass_key subkeys[8]; }; }; struct Qdisc_ops; struct qdisc_size_table; struct netdev_queue; struct net_rate_estimator; struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct hlist_node hash; u32 handle; u32 parent; struct netdev_queue *dev_queue; struct net_rate_estimator *rate_est; struct gnet_stats_basic_sync *cpu_bstats; struct gnet_stats_queue *cpu_qstats; int pad; refcount_t refcnt; long: 64; long: 64; long: 64; struct sk_buff_head gso_skb; struct qdisc_skb_head q; struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; int owner; long unsigned int state; long unsigned int state2; struct Qdisc *next_sched; struct sk_buff_head skb_bad_txq; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t busylock; spinlock_t seqlock; struct callback_head rcu; netdevice_tracker dev_tracker; struct lock_class_key root_lock_key; long: 64; long: 64; long: 64; long: 64; long int privdata[0]; }; struct Qdisc_class_common { u32 classid; unsigned int filter_cnt; struct hlist_node hnode; }; struct hlist_head; struct Qdisc_class_hash { struct hlist_head *hash; unsigned int hashsize; unsigned int hashmask; unsigned int hashelems; }; struct tcmsg; struct netlink_ext_ack; struct nlattr; struct qdisc_walker; struct tcf_block; struct gnet_dump; struct Qdisc_class_ops { unsigned int flags; struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *); struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int); void (*qlen_notify)(struct Qdisc *, long unsigned int); long unsigned int (*find)(struct Qdisc *, u32); int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *); int (*delete)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32); void (*unbind_tcf)(struct Qdisc *, long unsigned int); int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *); }; struct module; struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16]; int priv_size; unsigned int static_flags; int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); void (*attach)(struct Qdisc *); int (*change_tx_queue_len)(struct Qdisc *, unsigned int); void (*change_real_num_tx)(struct Qdisc *, unsigned int); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); void (*ingress_block_set)(struct Qdisc *, u32); void (*egress_block_set)(struct Qdisc *, u32); u32 (*ingress_block_get)(struct Qdisc *); u32 (*egress_block_get)(struct Qdisc *); struct module *owner; }; struct RigidDiskBlock { __be32 rdb_ID; __be32 rdb_SummedLongs; __be32 rdb_ChkSum; __be32 rdb_HostID; __be32 rdb_BlockBytes; __be32 rdb_Flags; __be32 rdb_BadBlockList; __be32 rdb_PartitionList; __be32 rdb_FileSysHeaderList; __be32 rdb_DriveInit; __be32 rdb_Reserved1[6]; __be32 rdb_Cylinders; __be32 rdb_Sectors; __be32 rdb_Heads; __be32 rdb_Interleave; __be32 rdb_Park; __be32 rdb_Reserved2[3]; __be32 rdb_WritePreComp; __be32 rdb_ReducedWrite; __be32 rdb_StepRate; __be32 rdb_Reserved3[5]; __be32 rdb_RDBBlocksLo; __be32 rdb_RDBBlocksHi; __be32 rdb_LoCylinder; __be32 rdb_HiCylinder; __be32 rdb_CylBlocks; __be32 rdb_AutoParkSeconds; __be32 rdb_HighRDSKBlock; __be32 rdb_Reserved4; char rdb_DiskVendor[8]; char rdb_DiskProduct[16]; char rdb_DiskRevision[4]; char rdb_ControllerVendor[8]; char rdb_ControllerProduct[16]; char rdb_ControllerRevision[4]; __be32 rdb_Reserved5[10]; }; struct xxh64_state { uint64_t total_len; uint64_t v1; uint64_t v2; uint64_t v3; uint64_t v4; uint64_t mem64[4]; uint32_t memsize; }; struct ZSTD_outBuffer_s { void *dst; size_t size; size_t pos; }; typedef struct ZSTD_outBuffer_s ZSTD_outBuffer; struct ZSTD_DCtx_s { const ZSTD_seqSymbol *LLTptr; const ZSTD_seqSymbol *MLTptr; const ZSTD_seqSymbol *OFTptr; const HUF_DTable *HUFptr; ZSTD_entropyDTables_t entropy; U32 workspace[640]; const void *previousDstEnd; const void *prefixStart; const void *virtualStart; const void *dictEnd; size_t expected; ZSTD_frameHeader fParams; U64 processedCSize; U64 decodedSize; blockType_e bType; ZSTD_dStage stage; U32 litEntropy; U32 fseEntropy; struct xxh64_state xxhState; size_t headerSize; ZSTD_format_e format; ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; U32 validateChecksum; const BYTE *litPtr; ZSTD_customMem customMem; size_t litSize; size_t rleSize; size_t staticSize; int bmi2; ZSTD_DDict *ddictLocal; const ZSTD_DDict *ddict; U32 dictID; int ddictIsCold; ZSTD_dictUses_e dictUses; ZSTD_DDictHashSet *ddictSet; ZSTD_refMultipleDDicts_e refMultipleDDicts; ZSTD_dStreamStage streamStage; char *inBuff; size_t inBuffSize; size_t inPos; size_t maxWindowSize; char *outBuff; size_t outBuffSize; size_t outStart; size_t outEnd; size_t lhSize; U32 hostageByte; int noForwardProgress; ZSTD_bufferMode_e outBufferMode; ZSTD_outBuffer expectedOutBuffer; BYTE *litBuffer; const BYTE *litBufferEnd; ZSTD_litLocation_e litBufferLocation; BYTE litExtraBuffer[65568]; BYTE headerBuffer[18]; size_t oversizedDuration; }; typedef struct ZSTD_DCtx_s ZSTD_DCtx; typedef ZSTD_DCtx ZSTD_DStream; typedef ZSTD_DCtx zstd_dctx; typedef ZSTD_DStream zstd_dstream; struct ZSTD_DDict_s { void *dictBuffer; const void *dictContent; size_t dictSize; ZSTD_entropyDTables_t entropy; U32 dictID; U32 entropyPresent; ZSTD_customMem cMem; }; typedef ZSTD_DDict zstd_ddict; struct ZSTD_inBuffer_s { const void *src; size_t size; size_t pos; }; typedef struct ZSTD_inBuffer_s ZSTD_inBuffer; typedef ZSTD_inBuffer zstd_in_buffer; typedef ZSTD_outBuffer zstd_out_buffer; struct __aio_sigset { const sigset_t *sigmask; size_t sigsetsize; }; struct __arch_relative_insn { u8 op; s32 raddr; } __attribute__((packed)); struct llist_node { struct llist_node *next; }; struct __call_single_node { struct llist_node llist; union { unsigned int u_flags; atomic_t a_flags; }; u16 src; u16 dst; }; typedef void (*smp_call_func_t)(void *); struct __call_single_data { struct __call_single_node node; smp_call_func_t func; void *info; }; typedef struct __call_single_data call_single_data_t; struct cpumask; struct __cmp_key { const struct cpumask *cpus; struct cpumask ***masks; int node; int cpu; int w; }; struct __fb_timings { u32 dclk; u32 hfreq; u32 vfreq; u32 hactive; u32 vactive; u32 hblank; u32 vblank; u32 htotal; u32 vtotal; }; struct tracepoint; struct __find_tracepoint_cb_data { const char *tp_name; struct tracepoint *tpoint; struct module *mod; }; struct genradix_root; struct __genradix { struct genradix_root *root; }; struct pmu; struct cgroup; struct __group_key { int cpu; struct pmu *pmu; struct cgroup *cgroup; }; struct __ip6_tnl_parm { char name[16]; int link; __u8 proto; __u8 encap_limit; __u8 hop_limit; bool collect_md; __be32 flowinfo; __u32 flags; struct in6_addr laddr; struct in6_addr raddr; long unsigned int i_flags[1]; long unsigned int o_flags[1]; __be32 i_key; __be32 o_key; __u32 fwmark; __u32 index; __u8 erspan_ver; __u8 dir; __u16 hwid; }; struct __kernel_timespec { __kernel_time64_t tv_sec; long long int tv_nsec; }; struct __kernel_itimerspec { struct __kernel_timespec it_interval; struct __kernel_timespec it_value; }; struct __kernel_old_timeval { __kernel_long_t tv_sec; __kernel_long_t tv_usec; }; struct __kernel_old_itimerval { struct __kernel_old_timeval it_interval; struct __kernel_old_timeval it_value; }; struct __kernel_old_timespec { __kernel_old_time_t tv_sec; long int tv_nsec; }; struct __kernel_sock_timeval { __s64 tv_sec; __s64 tv_usec; }; struct __kernel_sockaddr_storage { union { struct { __kernel_sa_family_t ss_family; char __data[126]; }; void *__align; }; }; struct __kernel_timex_timeval { __kernel_time64_t tv_sec; long long int tv_usec; }; struct __kernel_timex { unsigned int modes; long long int offset; long long int freq; long long int maxerror; long long int esterror; int status; long long int constant; long long int precision; long long int tolerance; struct __kernel_timex_timeval time; long long int tick; long long int ppsfreq; long long int jitter; int shift; long long int stabil; long long int jitcnt; long long int calcnt; long long int errcnt; long long int stbcnt; int tai; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct __kfifo { unsigned int in; unsigned int out; unsigned int mask; unsigned int esize; void *data; }; struct __large_struct { long unsigned int buf[100]; }; struct nft_payload { enum nft_payload_bases base: 8; u8 offset; u8 len; u8 dreg; }; struct nft_meta { enum nft_meta_keys key: 8; u8 len; union { u8 dreg; u8 sreg; }; }; struct nft_expr_ops; struct __nft_expr { const struct nft_expr_ops *ops; union { struct nft_payload payload; struct nft_meta meta; }; }; struct __old_kernel_stat { short unsigned int st_dev; short unsigned int st_ino; short unsigned int st_mode; short unsigned int st_nlink; short unsigned int st_uid; short unsigned int st_gid; short unsigned int st_rdev; unsigned int st_size; unsigned int st_atime; unsigned int st_mtime; unsigned int st_ctime; }; struct work_struct; typedef void (*work_func_t)(struct work_struct *); struct work_struct { atomic_long_t data; struct list_head entry; work_func_t func; struct lockdep_map lockdep_map; }; struct net_device; struct __rt6_probe_work { struct work_struct work; struct in6_addr target; struct net_device *dev; netdevice_tracker dev_tracker; }; union sigval { int sival_int; void *sival_ptr; }; typedef union sigval sigval_t; union __sifields { struct { __kernel_pid_t _pid; __kernel_uid32_t _uid; } _kill; struct { __kernel_timer_t _tid; int _overrun; sigval_t _sigval; int _sys_private; } _timer; struct { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } _rt; struct { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } _sigchld; struct { void *_addr; union { int _trapno; short int _addr_lsb; struct { char _dummy_bnd[8]; void *_lower; void *_upper; } _addr_bnd; struct { char _dummy_pkey[8]; __u32 _pkey; } _addr_pkey; struct { long unsigned int _data; __u32 _type; __u32 _flags; } _perf; }; } _sigfault; struct { long int _band; int _fd; } _sigpoll; struct { void *_call_addr; int _syscall; unsigned int _arch; } _sigsys; }; struct bpf_flow_keys; struct bpf_sock; struct __sk_buff { __u32 len; __u32 pkt_type; __u32 mark; __u32 queue_mapping; __u32 protocol; __u32 vlan_present; __u32 vlan_tci; __u32 vlan_proto; __u32 priority; __u32 ingress_ifindex; __u32 ifindex; __u32 tc_index; __u32 cb[5]; __u32 hash; __u32 tc_classid; __u32 data; __u32 data_end; __u32 napi_id; __u32 family; __u32 remote_ip4; __u32 local_ip4; __u32 remote_ip6[4]; __u32 local_ip6[4]; __u32 remote_port; __u32 local_port; __u32 data_meta; union { struct bpf_flow_keys *flow_keys; }; __u64 tstamp; __u32 wire_len; __u32 gso_segs; union { struct bpf_sock *sk; }; __u32 gso_size; __u8 tstamp_type; __u64 hwtstamp; }; struct __track_dentry_update_args { struct dentry *dentry; int op; }; struct __track_range_args { ext4_lblk_t start; ext4_lblk_t end; }; union __u128_halves { u128 full; struct { u64 low; u64 high; }; }; struct __una_u32 { u32 x; }; struct inode; struct __uprobe_key { struct inode *inode; loff_t offset; }; struct __user_cap_data_struct { __u32 effective; __u32 permitted; __u32 inheritable; }; typedef struct __user_cap_data_struct *cap_user_data_t; struct __user_cap_header_struct { __u32 version; int pid; }; typedef struct __user_cap_header_struct *cap_user_header_t; struct __va_list_tag { unsigned int gp_offset; unsigned int fp_offset; void *overflow_arg_area; void *reg_save_area; }; typedef __builtin_va_list va_list; struct _bpf_dtab_netdev { struct net_device *dev; }; struct _cache_table { unsigned char descriptor; char cache_type; short int size; }; union _cpuid4_leaf_eax { struct { enum _cache_type type: 5; unsigned int level: 3; unsigned int is_self_initializing: 1; unsigned int is_fully_associative: 1; unsigned int reserved: 4; unsigned int num_threads_sharing: 12; unsigned int num_cores_on_die: 6; } split; u32 full; }; union _cpuid4_leaf_ebx { struct { unsigned int coherency_line_size: 12; unsigned int physical_line_partition: 10; unsigned int ways_of_associativity: 10; } split; u32 full; }; union _cpuid4_leaf_ecx { struct { unsigned int number_of_sets: 32; } split; u32 full; }; struct amd_northbridge; struct _cpuid4_info_regs { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned int id; long unsigned int size; struct amd_northbridge *nb; }; struct _flow_keys_digest_data { __be16 n_proto; u8 ip_proto; u8 padding; __be32 ports; __be32 src; __be32 dst; }; struct _fpreg { __u16 significand[4]; __u16 exponent; }; struct _fpxreg { __u16 significand[4]; __u16 exponent; __u16 padding[3]; }; struct _xmmreg { __u32 element[4]; }; struct _fpx_sw_bytes { __u32 magic1; __u32 extended_size; __u64 xfeatures; __u32 xstate_size; __u32 padding[7]; }; struct _fpstate_32 { __u32 cw; __u32 sw; __u32 tag; __u32 ipoff; __u32 cssel; __u32 dataoff; __u32 datasel; struct _fpreg _st[8]; __u16 status; __u16 magic; __u32 _fxsr_env[6]; __u32 mxcsr; __u32 reserved; struct _fpxreg _fxsr_st[8]; struct _xmmreg _xmm[8]; union { __u32 padding1[44]; __u32 padding[44]; }; union { __u32 padding2[12]; struct _fpx_sw_bytes sw_reserved; }; }; struct _gpt_entry_attributes { u64 required_to_function: 1; u64 reserved: 47; u64 type_guid_specific: 16; }; typedef struct _gpt_entry_attributes gpt_entry_attributes; struct _gpt_entry { efi_guid_t partition_type_guid; efi_guid_t unique_partition_guid; __le64 starting_lba; __le64 ending_lba; gpt_entry_attributes attributes; __le16 partition_name[36]; }; typedef struct _gpt_entry gpt_entry; struct _gpt_header { __le64 signature; __le32 revision; __le32 header_size; __le32 header_crc32; __le32 reserved1; __le64 my_lba; __le64 alternate_lba; __le64 first_usable_lba; __le64 last_usable_lba; efi_guid_t disk_guid; __le64 partition_entry_lba; __le32 num_partition_entries; __le32 sizeof_partition_entry; __le32 partition_entry_array_crc32; } __attribute__((packed)); typedef struct _gpt_header gpt_header; struct _gpt_mbr_record { u8 boot_indicator; u8 start_head; u8 start_sector; u8 start_track; u8 os_type; u8 end_head; u8 end_sector; u8 end_track; __le32 starting_lba; __le32 size_in_lba; }; typedef struct _gpt_mbr_record gpt_mbr_record; struct resource { resource_size_t start; resource_size_t end; const char *name; long unsigned int flags; long unsigned int desc; struct resource *parent; struct resource *sibling; struct resource *child; }; struct intel_gtt_driver; struct pci_dev; struct page; struct _intel_private { const struct intel_gtt_driver *driver; struct pci_dev *pcidev; struct pci_dev *bridge_dev; u8 *registers; phys_addr_t gtt_phys_addr; u32 PGETBL_save; u32 *gtt; bool clear_fake_agp; int num_dcache_entries; void *i9xx_flush_page; char *i81x_gtt_table; struct resource ifp_resource; int resource_valid; struct page *scratch_page; phys_addr_t scratch_page_dma; int refcount; unsigned int needs_dmar: 1; phys_addr_t gma_bus_addr; resource_size_t stolen_size; unsigned int gtt_total_entries; unsigned int gtt_mappable_entries; }; struct kvm_stats_desc { __u32 flags; __s16 exponent; __u16 size; __u32 offset; __u32 bucket_size; char name[0]; }; struct _kvm_stats_desc { struct kvm_stats_desc desc; char name[48]; }; struct _legacy_mbr { u8 boot_code[440]; __le32 unique_mbr_signature; __le16 unknown; gpt_mbr_record partition_record[4]; __le16 signature; } __attribute__((packed)); typedef struct _legacy_mbr legacy_mbr; struct strp_msg { int full_len; int offset; }; struct _strp_msg { struct strp_msg strp; int accum_len; }; struct timer_list { struct hlist_node entry; long unsigned int expires; void (*function)(struct timer_list *); u32 flags; struct lockdep_map lockdep_map; }; struct workqueue_struct; struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; }; struct _thermal_state { u64 next_check; u64 last_interrupt_time; struct delayed_work therm_work; long unsigned int count; long unsigned int last_count; long unsigned int max_time_ms; long unsigned int total_time_ms; bool rate_control_active; bool new_event; u8 level; u8 sample_index; u8 sample_count; u8 average; u8 baseline_temp; u8 temp_samples[3]; }; struct _tlb_table { unsigned char descriptor; char tlb_type; unsigned int entries; char info[128]; }; struct a4tech_sc { long unsigned int quirks; unsigned int hw_wheel; __s32 delayed_value; }; struct seq_net_private { struct net *net; netns_tracker ns_tracker; }; struct ac6_iter_state { struct seq_net_private p; struct net_device *dev; }; struct access_coordinate { unsigned int read_bandwidth; unsigned int write_bandwidth; unsigned int read_latency; unsigned int write_latency; }; struct acct { char ac_flag; char ac_version; __u16 ac_uid16; __u16 ac_gid16; __u16 ac_tty; __u32 ac_btime; comp_t ac_utime; comp_t ac_stime; comp_t ac_etime; comp_t ac_mem; comp_t ac_io; comp_t ac_rw; comp_t ac_minflt; comp_t ac_majflt; comp_t ac_swaps; __u16 ac_ahz; __u32 ac_exitcode; char ac_comm[17]; __u8 ac_etime_hi; __u16 ac_etime_lo; __u32 ac_uid; __u32 ac_gid; }; typedef struct acct acct_t; struct ack_sample { u32 pkts_acked; s32 rtt_us; u32 in_flight; }; struct crypto_tfm; struct cipher_alg { unsigned int cia_min_keysize; unsigned int cia_max_keysize; int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); }; struct compress_alg { int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); }; struct crypto_type; struct crypto_alg { struct list_head cra_list; struct list_head cra_users; u32 cra_flags; unsigned int cra_blocksize; unsigned int cra_ctxsize; unsigned int cra_alignmask; int cra_priority; refcount_t cra_refcnt; char cra_name[128]; char cra_driver_name[128]; const struct crypto_type *cra_type; union { struct cipher_alg cipher; struct compress_alg compress; } cra_u; int (*cra_init)(struct crypto_tfm *); void (*cra_exit)(struct crypto_tfm *); void (*cra_destroy)(struct crypto_alg *); struct module *cra_module; }; struct comp_alg_common { struct crypto_alg base; }; struct acomp_req; struct scatterlist; struct crypto_acomp; struct acomp_alg { int (*compress)(struct acomp_req *); int (*decompress)(struct acomp_req *); void (*dst_free)(struct scatterlist *); int (*init)(struct crypto_acomp *); void (*exit)(struct crypto_acomp *); unsigned int reqsize; union { struct { struct crypto_alg base; }; struct comp_alg_common calg; }; }; typedef void (*crypto_completion_t)(void *, int); struct crypto_async_request { struct list_head list; crypto_completion_t complete; void *data; struct crypto_tfm *tfm; u32 flags; }; struct acomp_req { struct crypto_async_request base; struct scatterlist *src; struct scatterlist *dst; unsigned int slen; unsigned int dlen; u32 flags; void *__ctx[0]; }; struct power_supply; union power_supply_propval; struct power_supply_desc { const char *name; enum power_supply_type type; u8 charge_behaviours; u32 usb_types; const enum power_supply_property *properties; size_t num_properties; int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *); int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *); int (*property_is_writeable)(struct power_supply *, enum power_supply_property); void (*external_power_changed)(struct power_supply *); void (*set_charged)(struct power_supply *); bool no_thermal; int use_for_apm; }; struct notifier_block; typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *); struct notifier_block { notifier_fn_t notifier_call; struct notifier_block *next; int priority; }; struct acpi_device; struct acpi_ac { struct power_supply *charger; struct power_supply_desc charger_desc; struct acpi_device *device; long long unsigned int state; struct notifier_block battery_nb; }; struct acpi_address16_attribute { u16 granularity; u16 minimum; u16 maximum; u16 translation_offset; u16 address_length; }; struct acpi_address32_attribute { u32 granularity; u32 minimum; u32 maximum; u32 translation_offset; u32 address_length; }; struct acpi_address64_attribute { u64 granularity; u64 minimum; u64 maximum; u64 translation_offset; u64 address_length; }; struct acpi_namespace_node; struct acpi_address_range { struct acpi_address_range *next; struct acpi_namespace_node *region_node; acpi_physical_address start_address; acpi_physical_address end_address; }; struct acpi_battery { struct mutex lock; struct mutex sysfs_lock; struct power_supply *bat; struct power_supply_desc bat_desc; struct acpi_device *device; struct notifier_block pm_nb; struct list_head list; long unsigned int update_time; int revision; int rate_now; int capacity_now; int voltage_now; int design_capacity; int full_charge_capacity; int technology; int design_voltage; int design_capacity_warning; int design_capacity_low; int cycle_count; int measurement_accuracy; int max_sampling_time; int min_sampling_time; int max_averaging_interval; int min_averaging_interval; int capacity_granularity_1; int capacity_granularity_2; int alarm; char model_number[64]; char serial_number[64]; char type[64]; char oem_info[64]; int state; int power_unit; long unsigned int flags; }; struct acpi_battery_hook { const char *name; int (*add_battery)(struct power_supply *, struct acpi_battery_hook *); int (*remove_battery)(struct power_supply *, struct acpi_battery_hook *); struct list_head list; }; struct acpi_bit_register_info { u8 parent_register; u8 bit_position; u16 access_bit_mask; }; struct acpi_buffer { acpi_size length; void *pointer; }; struct acpi_bus_event { struct list_head node; acpi_device_class device_class; acpi_bus_id bus_id; u32 type; u32 data; }; struct device; struct acpi_bus_type { struct list_head list; const char *name; bool (*match)(struct device *); struct acpi_device * (*find_companion)(struct device *); void (*setup)(struct device *); }; struct input_dev; struct acpi_button { unsigned int type; struct input_dev *input; char phys[32]; long unsigned int pushed; int last_state; ktime_t last_time; bool suspended; bool lid_state_initialized; }; struct acpi_cdat_header { u8 type; u8 reserved; u16 length; }; struct acpi_cedt_header { u8 type; u8 reserved; u16 length; }; struct acpi_cedt_cfmws { struct acpi_cedt_header header; u32 reserved1; u64 base_hpa; u64 window_size; u8 interleave_ways; u8 interleave_arithmetic; u16 reserved2; u32 granularity; u16 restrictions; u16 qtg_id; u32 interleave_targets[0]; } __attribute__((packed)); struct acpi_comment_node { char *comment; struct acpi_comment_node *next; }; struct acpi_common_descriptor { void *common_pointer; u8 descriptor_type; }; struct acpi_common_state { void *next; u8 descriptor_type; u8 flags; u16 value; u16 state; }; struct acpi_connection_info { u8 *connection; u16 length; u8 access_length; }; union acpi_parse_object; struct acpi_control_state { void *next; u8 descriptor_type; u8 flags; u16 value; u16 state; u16 opcode; union acpi_parse_object *predicate_op; u8 *aml_predicate_start; u8 *package_end; u64 loop_timeout; }; struct cpumask { long unsigned int bits[2]; }; typedef struct cpumask cpumask_var_t[1]; struct acpi_pct_register; struct acpi_cpufreq_data { unsigned int resume; unsigned int cpu_feature; unsigned int acpi_perf_cpu; cpumask_var_t freqdomain_cpus; void (*cpu_freq_write)(struct acpi_pct_register *, u32); u32 (*cpu_freq_read)(struct acpi_pct_register *); }; struct acpi_create_field_info { struct acpi_namespace_node *region_node; struct acpi_namespace_node *field_node; struct acpi_namespace_node *register_node; struct acpi_namespace_node *data_register_node; struct acpi_namespace_node *connection_node; u8 *resource_buffer; u32 bank_value; u32 field_bit_position; u32 field_bit_length; u16 resource_length; u16 pin_number_index; u8 field_flags; u8 attribute; u8 field_type; u8 access_length; }; struct attribute { const char *name; umode_t mode; bool ignore_lockdep: 1; struct lock_class_key *key; struct lock_class_key skey; }; struct address_space; struct kobject; struct vm_area_struct; struct bin_attribute { struct attribute attr; size_t size; void *private; struct address_space * (*f_mapping)(void); ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *, loff_t, int); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); }; struct acpi_data_attr { struct bin_attribute attr; u64 addr; }; typedef void *acpi_handle; struct fwnode_operations; struct fwnode_handle { struct fwnode_handle *secondary; const struct fwnode_operations *ops; struct device *dev; struct list_head suppliers; struct list_head consumers; u8 flags; }; union acpi_object; struct acpi_device_data { const union acpi_object *pointer; struct list_head properties; const union acpi_object *of_compatible; struct list_head subnodes; }; struct kref { refcount_t refcount; }; struct kset; struct kobj_type; struct kernfs_node; struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; const struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; unsigned int state_initialized: 1; unsigned int state_in_sysfs: 1; unsigned int state_add_uevent_sent: 1; unsigned int state_remove_uevent_sent: 1; unsigned int uevent_suppress: 1; }; struct swait_queue_head { raw_spinlock_t lock; struct list_head task_list; }; struct completion { unsigned int done; struct swait_queue_head wait; }; struct acpi_data_node { struct list_head sibling; const char *name; acpi_handle handle; struct fwnode_handle fwnode; struct fwnode_handle *parent; struct acpi_device_data data; struct kobject kobj; struct completion kobj_done; }; struct acpi_data_node_attr { struct attribute attr; ssize_t (*show)(struct acpi_data_node *, char *); ssize_t (*store)(struct acpi_data_node *, const char *, size_t); }; struct acpi_data_obj { char *name; int (*fn)(void *, struct acpi_data_attr *); }; struct acpi_data_table_mapping { void *pointer; }; struct acpi_dep_data { struct list_head node; acpi_handle supplier; acpi_handle consumer; bool honor_dep; bool met; bool free_when_met; }; union acpi_operand_object; struct acpi_object_common { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; }; struct acpi_object_integer { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 fill[3]; u64 value; }; struct acpi_object_string { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; char *pointer; u32 length; }; struct acpi_object_buffer { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 *pointer; u32 length; u32 aml_length; u8 *aml_start; struct acpi_namespace_node *node; }; struct acpi_object_package { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; struct acpi_namespace_node *node; union acpi_operand_object **elements; u8 *aml_start; u32 aml_length; u32 count; }; struct acpi_object_event { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; void *os_semaphore; }; struct acpi_walk_state; typedef acpi_status (*acpi_internal_method)(struct acpi_walk_state *); struct acpi_object_method { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 info_flags; u8 param_count; u8 sync_level; union acpi_operand_object *mutex; union acpi_operand_object *node; u8 *aml_start; union { acpi_internal_method implementation; union acpi_operand_object *handler; } dispatch; u32 aml_length; acpi_owner_id owner_id; u8 thread_count; }; struct acpi_thread_state; struct acpi_object_mutex { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 sync_level; u16 acquisition_depth; void *os_mutex; u64 thread_id; struct acpi_thread_state *owner_thread; union acpi_operand_object *prev; union acpi_operand_object *next; struct acpi_namespace_node *node; u8 original_sync_level; }; struct acpi_object_region { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 space_id; struct acpi_namespace_node *node; union acpi_operand_object *handler; union acpi_operand_object *next; acpi_physical_address address; u32 length; void *pointer; }; struct acpi_object_notify_common { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; union acpi_operand_object *notify_list[2]; union acpi_operand_object *handler; }; struct acpi_gpe_block_info; struct acpi_object_device { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; union acpi_operand_object *notify_list[2]; union acpi_operand_object *handler; struct acpi_gpe_block_info *gpe_block; }; struct acpi_object_power_resource { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; union acpi_operand_object *notify_list[2]; union acpi_operand_object *handler; u32 system_level; u32 resource_order; }; struct acpi_object_processor { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 proc_id; u8 length; union acpi_operand_object *notify_list[2]; union acpi_operand_object *handler; acpi_io_address address; }; struct acpi_object_thermal_zone { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; union acpi_operand_object *notify_list[2]; union acpi_operand_object *handler; }; struct acpi_object_field_common { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 field_flags; u8 attribute; u8 access_byte_width; struct acpi_namespace_node *node; u32 bit_length; u32 base_byte_offset; u32 value; u8 start_field_bit_offset; u8 access_length; union acpi_operand_object *region_obj; }; struct acpi_object_region_field { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 field_flags; u8 attribute; u8 access_byte_width; struct acpi_namespace_node *node; u32 bit_length; u32 base_byte_offset; u32 value; u8 start_field_bit_offset; u8 access_length; u16 resource_length; union acpi_operand_object *region_obj; u8 *resource_buffer; u16 pin_number_index; u8 *internal_pcc_buffer; }; struct acpi_object_buffer_field { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 field_flags; u8 attribute; u8 access_byte_width; struct acpi_namespace_node *node; u32 bit_length; u32 base_byte_offset; u32 value; u8 start_field_bit_offset; u8 access_length; u8 is_create_field; union acpi_operand_object *buffer_obj; }; struct acpi_object_bank_field { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 field_flags; u8 attribute; u8 access_byte_width; struct acpi_namespace_node *node; u32 bit_length; u32 base_byte_offset; u32 value; u8 start_field_bit_offset; u8 access_length; union acpi_operand_object *region_obj; union acpi_operand_object *bank_obj; }; struct acpi_object_index_field { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 field_flags; u8 attribute; u8 access_byte_width; struct acpi_namespace_node *node; u32 bit_length; u32 base_byte_offset; u32 value; u8 start_field_bit_offset; u8 access_length; union acpi_operand_object *index_obj; union acpi_operand_object *data_obj; }; typedef void (*acpi_notify_handler)(acpi_handle, u32, void *); struct acpi_object_notify_handler { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; struct acpi_namespace_node *node; u32 handler_type; acpi_notify_handler handler; void *context; union acpi_operand_object *next[2]; }; typedef acpi_status (*acpi_adr_space_handler)(u32, acpi_physical_address, u32, u64 *, void *, void *); typedef acpi_status (*acpi_adr_space_setup)(acpi_handle, u32, void *, void **); struct acpi_object_addr_handler { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 space_id; u8 handler_flags; acpi_adr_space_handler handler; struct acpi_namespace_node *node; void *context; void *context_mutex; acpi_adr_space_setup setup; union acpi_operand_object *region_list; union acpi_operand_object *next; }; struct acpi_object_reference { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; u8 class; u8 target_type; u8 resolved; void *object; struct acpi_namespace_node *node; union acpi_operand_object **where; u8 *index_pointer; u8 *aml; u32 value; }; struct acpi_object_extra { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; struct acpi_namespace_node *method_REG; struct acpi_namespace_node *scope_node; void *region_context; u8 *aml_start; u32 aml_length; }; typedef void (*acpi_object_handler)(acpi_handle, void *); struct acpi_object_data { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; acpi_object_handler handler; void *pointer; }; struct acpi_object_cache_list { union acpi_operand_object *next_object; u8 descriptor_type; u8 type; u16 reference_count; u8 flags; union acpi_operand_object *next; }; union acpi_name_union { u32 integer; char ascii[4]; }; struct acpi_namespace_node { union acpi_operand_object *object; u8 descriptor_type; u8 type; u16 flags; union acpi_name_union name; struct acpi_namespace_node *parent; struct acpi_namespace_node *child; struct acpi_namespace_node *peer; acpi_owner_id owner_id; }; union acpi_operand_object { struct acpi_object_common common; struct acpi_object_integer integer; struct acpi_object_string string; struct acpi_object_buffer buffer; struct acpi_object_package package; struct acpi_object_event event; struct acpi_object_method method; struct acpi_object_mutex mutex; struct acpi_object_region region; struct acpi_object_notify_common common_notify; struct acpi_object_device device; struct acpi_object_power_resource power_resource; struct acpi_object_processor processor; struct acpi_object_thermal_zone thermal_zone; struct acpi_object_field_common common_field; struct acpi_object_region_field field; struct acpi_object_buffer_field buffer_field; struct acpi_object_bank_field bank_field; struct acpi_object_index_field index_field; struct acpi_object_notify_handler notify; struct acpi_object_addr_handler address_space; struct acpi_object_reference reference; struct acpi_object_extra extra; struct acpi_object_data data; struct acpi_object_cache_list cache; struct acpi_namespace_node node; }; union acpi_parse_value { u64 integer; u32 size; char *string; u8 *buffer; char *name; union acpi_parse_object *arg; }; struct acpi_parse_obj_common { union acpi_parse_object *parent; u8 descriptor_type; u8 flags; u16 aml_opcode; u8 *aml; union acpi_parse_object *next; struct acpi_namespace_node *node; union acpi_parse_value value; u8 arg_list_length; }; struct acpi_parse_obj_named { union acpi_parse_object *parent; u8 descriptor_type; u8 flags; u16 aml_opcode; u8 *aml; union acpi_parse_object *next; struct acpi_namespace_node *node; union acpi_parse_value value; u8 arg_list_length; char *path; u8 *data; u32 length; u32 name; }; struct acpi_parse_obj_asl { union acpi_parse_object *parent; u8 descriptor_type; u8 flags; u16 aml_opcode; u8 *aml; union acpi_parse_object *next; struct acpi_namespace_node *node; union acpi_parse_value value; u8 arg_list_length; union acpi_parse_object *child; union acpi_parse_object *parent_method; char *filename; u8 file_changed; char *parent_filename; char *external_name; char *namepath; char name_seg[4]; u32 extra_value; u32 column; u32 line_number; u32 logical_line_number; u32 logical_byte_offset; u32 end_line; u32 end_logical_line; u32 acpi_btype; u32 aml_length; u32 aml_subtree_length; u32 final_aml_length; u32 final_aml_offset; u32 compile_flags; u16 parse_opcode; u8 aml_opcode_length; u8 aml_pkg_len_bytes; u8 extra; char parse_op_name[20]; }; union acpi_parse_object { struct acpi_parse_obj_common common; struct acpi_parse_obj_named named; struct acpi_parse_obj_asl asl; }; union acpi_descriptor { struct acpi_common_descriptor common; union acpi_operand_object object; struct acpi_namespace_node node; union acpi_parse_object op; }; struct acpi_device_id { __u8 id[16]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; }; struct acpi_dev_match_info { struct acpi_device_id hid[2]; const char *uid; s64 hrv; }; struct acpi_dev_walk_context { int (*fn)(struct acpi_device *, void *); void *data; }; struct acpi_device_status { u32 present: 1; u32 enabled: 1; u32 show_in_ui: 1; u32 functional: 1; u32 battery_present: 1; u32 reserved: 27; }; struct acpi_device_flags { u32 dynamic_status: 1; u32 removable: 1; u32 ejectable: 1; u32 power_manageable: 1; u32 match_driver: 1; u32 initialized: 1; u32 visited: 1; u32 hotplug_notify: 1; u32 is_dock_station: 1; u32 of_compatible_ok: 1; u32 coherent_dma: 1; u32 cca_seen: 1; u32 enumeration_by_parent: 1; u32 honor_deps: 1; u32 reserved: 18; }; struct acpi_pnp_type { u32 hardware_id: 1; u32 bus_address: 1; u32 platform_id: 1; u32 backlight: 1; u32 reserved: 28; }; struct acpi_device_pnp { acpi_bus_id bus_id; int instance_no; struct acpi_pnp_type type; acpi_bus_address bus_address; char *unique_id; struct list_head ids; acpi_device_name device_name; acpi_device_class device_class; }; struct acpi_device_power_flags { u32 explicit_get: 1; u32 power_resources: 1; u32 inrush_current: 1; u32 power_removed: 1; u32 ignore_parent: 1; u32 dsw_present: 1; u32 reserved: 26; }; struct acpi_device_power_state { struct list_head resources; struct { u8 valid: 1; u8 explicit_set: 1; u8 reserved: 6; } flags; int power; int latency; }; struct acpi_device_power { int state; struct acpi_device_power_flags flags; struct acpi_device_power_state states[5]; u8 state_for_enumeration; }; struct acpi_device_wakeup_flags { u8 valid: 1; u8 notifier_present: 1; }; struct acpi_device_wakeup_context { void (*func)(struct acpi_device_wakeup_context *); struct device *dev; }; struct wakeup_source; struct acpi_device_wakeup { acpi_handle gpe_device; u64 gpe_number; u64 sleep_state; struct list_head resources; struct acpi_device_wakeup_flags flags; struct acpi_device_wakeup_context context; struct wakeup_source *ws; int prepare_count; int enable_count; }; struct acpi_device_perf_flags { u8 reserved: 8; }; struct acpi_device_perf_state; struct acpi_device_perf { int state; struct acpi_device_perf_flags flags; int state_count; struct acpi_device_perf_state *states; }; struct proc_dir_entry; struct acpi_device_dir { struct proc_dir_entry *entry; }; struct dev_links_info { struct list_head suppliers; struct list_head consumers; struct list_head defer_sync; enum dl_dev_state status; }; struct pm_message { int event; }; typedef struct pm_message pm_message_t; struct rb_node { long unsigned int __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; }; struct timerqueue_node { struct rb_node node; ktime_t expires; }; struct hrtimer_clock_base; struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; u8 is_soft; u8 is_hard; }; struct wake_irq; struct pm_subsys_data; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state; bool can_wakeup: 1; bool async_suspend: 1; bool in_dpm_list: 1; bool is_prepared: 1; bool is_suspended: 1; bool is_noirq_suspended: 1; bool is_late_suspended: 1; bool no_pm: 1; bool early_init: 1; bool direct_complete: 1; u32 driver_flags; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path: 1; bool syscore: 1; bool no_pm_callbacks: 1; bool async_in_progress: 1; bool must_resume: 1; bool may_skip_resume: 1; struct hrtimer suspend_timer; u64 timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned int disable_depth: 3; bool idle_notification: 1; bool request_pending: 1; bool deferred_resume: 1; bool needs_force_resume: 1; bool runtime_auto: 1; bool ignore_children: 1; bool no_callbacks: 1; bool irq_safe: 1; bool use_autosuspend: 1; bool timer_autosuspends: 1; bool memalloc_noio: 1; unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; enum rpm_status last_status; int runtime_error; int autosuspend_delay; u64 last_busy; u64 active_time; u64 suspended_time; u64 accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32); struct dev_pm_qos *qos; }; struct irq_domain; struct msi_device_data; struct dev_msi_info { struct irq_domain *domain; struct msi_device_data *data; }; struct dev_archdata {}; struct device_private; struct device_type; struct bus_type; struct device_driver; struct dev_pm_domain; struct dma_map_ops; struct bus_dma_region; struct device_dma_parameters; struct cma; struct io_tlb_mem; struct device_node; struct class; struct attribute_group; struct iommu_group; struct dev_iommu; struct device_physical_location; struct device { struct kobject kobj; struct device *parent; struct device_private *p; const char *init_name; const struct device_type *type; const struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct mutex mutex; struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_msi_info msi; const struct dma_map_ops *dma_ops; u64 *dma_mask; u64 coherent_dma_mask; u64 bus_dma_limit; const struct bus_dma_region *dma_range_map; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct cma *cma_area; struct io_tlb_mem *dma_io_tlb_mem; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; int numa_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; const struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct dev_iommu *iommu; struct device_physical_location *physical_location; enum device_removable removable; bool offline_disabled: 1; bool offline: 1; bool of_node_reused: 1; bool state_synced: 1; bool can_match: 1; bool dma_skip_sync: 1; bool dma_iommu: 1; }; struct acpi_scan_handler; struct acpi_hotplug_context; struct acpi_device_software_nodes; struct acpi_gpio_mapping; struct acpi_device { u32 pld_crc; int device_type; acpi_handle handle; struct fwnode_handle fwnode; struct list_head wakeup_list; struct list_head del_list; struct acpi_device_status status; struct acpi_device_flags flags; struct acpi_device_pnp pnp; struct acpi_device_power power; struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; struct acpi_device_data data; struct acpi_scan_handler *handler; struct acpi_hotplug_context *hp; struct acpi_device_software_nodes *swnodes; const struct acpi_gpio_mapping *driver_gpios; void *driver_data; struct device dev; unsigned int physical_node_count; unsigned int dep_unmet; struct list_head physical_node_list; struct mutex physical_node_lock; void (*remove)(struct acpi_device *); }; struct xarray { spinlock_t xa_lock; gfp_t xa_flags; void *xa_head; }; struct ida { struct xarray xa; }; struct acpi_device_bus_id { const char *bus_id; struct ida instance_ida; struct list_head node; }; struct acpi_pnp_device_id { u32 length; char *string; }; struct acpi_pnp_device_id_list { u32 count; u32 list_size; struct acpi_pnp_device_id ids[0]; }; struct acpi_device_info { u32 info_size; u32 name; acpi_object_type type; u8 param_count; u16 valid; u8 flags; u8 highest_dstates[4]; u8 lowest_dstates[5]; u64 address; struct acpi_pnp_device_id hardware_id; struct acpi_pnp_device_id unique_id; struct acpi_pnp_device_id class_code; struct acpi_pnp_device_id_list compatible_id_list; }; typedef int (*acpi_op_add)(struct acpi_device *); typedef void (*acpi_op_remove)(struct acpi_device *); typedef void (*acpi_op_notify)(struct acpi_device *, u32); struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; acpi_op_notify notify; }; struct acpi_device_perf_state { struct { u8 valid: 1; u8 reserved: 7; } flags; u8 power; u8 performance; int latency; }; struct acpi_device_physical_node { struct list_head node; struct device *dev; unsigned int node_id; bool put_online: 1; }; struct acpi_device_properties { struct list_head list; const guid_t *guid; union acpi_object *properties; void **bufs; }; struct property_entry { const char *name; size_t length; bool is_inline; enum dev_prop_type type; union { const void *pointer; union { u8 u8_data[8]; u16 u16_data[4]; u32 u32_data[2]; u64 u64_data[1]; const char *str[1]; } value; }; }; struct software_node; struct software_node_ref_args { const struct software_node *node; unsigned int nargs; u64 args[8]; }; struct acpi_device_software_node_port { char port_name[9]; u32 data_lanes[8]; u32 lane_polarities[9]; u64 link_frequencies[8]; unsigned int port_nr; bool crs_csi2_local; struct property_entry port_props[2]; struct property_entry ep_props[8]; struct software_node_ref_args remote_ep[1]; }; struct acpi_device_software_nodes { struct property_entry dev_props[6]; struct software_node *nodes; const struct software_node **nodeptrs; struct acpi_device_software_node_port *ports; unsigned int num_ports; }; struct acpi_table_desc; struct acpi_evaluate_info; struct acpi_device_walk_info { struct acpi_table_desc *table_desc; struct acpi_evaluate_info *evaluate_info; u32 device_count; u32 num_STA; u32 num_INI; }; struct of_device_id; struct dev_pm_ops; struct driver_private; struct device_driver { const char *name; const struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); void (*sync_state)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t); int (*resume)(struct device *); const struct attribute_group **groups; const struct attribute_group **dev_groups; const struct dev_pm_ops *pm; void (*coredump)(struct device *); struct driver_private *p; }; struct acpi_driver { char name[80]; char class[80]; const struct acpi_device_id *ids; unsigned int flags; struct acpi_device_ops ops; struct device_driver drv; }; struct transaction; struct acpi_ec { acpi_handle handle; int gpe; int irq; long unsigned int command_addr; long unsigned int data_addr; bool global_lock; long unsigned int flags; long unsigned int reference_count; struct mutex mutex; wait_queue_head_t wait; struct list_head list; struct transaction *curr; spinlock_t lock; struct work_struct work; long unsigned int timestamp; enum acpi_ec_event_state event_state; unsigned int events_to_process; unsigned int events_in_progress; unsigned int queries_in_progress; bool busy_polling; unsigned int polling_guard; }; struct transaction { const u8 *wdata; u8 *rdata; short unsigned int irq_count; u8 command; u8 wi; u8 ri; u8 wlen; u8 rlen; u8 flags; }; struct acpi_ec_query_handler; struct acpi_ec_query { struct transaction transaction; struct work_struct work; struct acpi_ec_query_handler *handler; struct acpi_ec *ec; }; typedef int (*acpi_ec_query_func)(void *); struct acpi_ec_query_handler { struct list_head node; acpi_ec_query_func func; acpi_handle handle; void *data; u8 query_bit; struct kref kref; }; union acpi_predefined_info; struct acpi_evaluate_info { struct acpi_namespace_node *prefix_node; const char *relative_pathname; union acpi_operand_object **parameters; struct acpi_namespace_node *node; union acpi_operand_object *obj_desc; char *full_pathname; const union acpi_predefined_info *predefined; union acpi_operand_object *return_object; union acpi_operand_object *parent_package; u32 return_flags; u32 return_btype; u16 param_count; u16 node_flags; u8 pass_number; u8 return_object_type; u8 flags; }; struct acpi_exception_info { char *name; }; struct acpi_fadt_info { const char *name; u16 address64; u16 address32; u16 length; u8 default_length; u8 flags; }; struct acpi_generic_address; struct acpi_fadt_pm_info { struct acpi_generic_address *target; u16 source; u8 register_num; }; struct acpi_fan_fif { u8 revision; u8 fine_grain_ctrl; u8 step_size; u8 low_speed_notification; }; struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); }; struct acpi_fan_fps; struct thermal_cooling_device; struct acpi_fan { bool acpi4; struct acpi_fan_fif fif; struct acpi_fan_fps *fps; int fps_count; struct thermal_cooling_device *cdev; struct device_attribute fst_speed; struct device_attribute fine_grain_control; }; struct acpi_fan_fps { u64 control; u64 trip_point; u64 speed; u64 noise_level; u64 power; char name[20]; struct device_attribute dev_attr; }; struct acpi_fan_fst { u64 revision; u64 control; u64 speed; }; struct acpi_ffh_info { u64 offset; u64 length; }; typedef u32 (*acpi_event_handler)(void *); struct acpi_fixed_event_handler { acpi_event_handler handler; void *context; }; struct acpi_fixed_event_info { u8 status_register_id; u8 enable_register_id; u16 status_bit_mask; u16 enable_bit_mask; }; struct acpi_ged_device { struct device *dev; struct list_head event_list; }; struct acpi_ged_event { struct list_head node; struct device *dev; unsigned int gsi; unsigned int irq; acpi_handle handle; }; struct acpi_ged_handler_info { struct acpi_ged_handler_info *next; u32 int_id; struct acpi_namespace_node *evt_method; }; struct acpi_generic_address { u8 space_id; u8 bit_width; u8 bit_offset; u8 access_width; u64 address; } __attribute__((packed)); struct acpi_update_state { void *next; u8 descriptor_type; u8 flags; u16 value; u16 state; union acpi_operand_object *object; }; struct acpi_scope_state { void *next; u8 descriptor_type; u8 flags; u16 value; u16 state; struct acpi_namespace_node *node; }; struct acpi_pscope_state { void *next; u8 descriptor_type; u8 flags; u16 value; u16 state; u32 arg_count; union acpi_parse_object *op; u8 *arg_end; u8 *pkg_end; u32 arg_list; }; struct acpi_pkg_state { void *next; u8 descriptor_type; u8 flags; u16 value; u16 state; u32 index; union acpi_operand_object *source_object; union acpi_operand_object *dest_object; struct acpi_walk_state *walk_state; void *this_target_obj; u32 num_packages; }; struct acpi_thread_state { void *next; u8 descriptor_type; u8 flags; u16 value; u16 state; u8 current_sync_level; struct acpi_walk_state *walk_state_list; union acpi_operand_object *acquired_mutex_list; u64 thread_id; }; struct acpi_result_values { void *next; u8 descriptor_type; u8 flags; u16 value; u16 state; union acpi_operand_object *obj_desc[8]; }; struct acpi_global_notify_handler; struct acpi_notify_info { void *next; u8 descriptor_type; u8 flags; u16 value; u16 state; u8 handler_list_id; struct acpi_namespace_node *node; union acpi_operand_object *handler_list_head; struct acpi_global_notify_handler *global; }; union acpi_generic_state { struct acpi_common_state common; struct acpi_control_state control; struct acpi_update_state update; struct acpi_scope_state scope; struct acpi_pscope_state parse_scope; struct acpi_pkg_state pkg; struct acpi_thread_state thread; struct acpi_result_values results; struct acpi_notify_info notify; }; struct acpi_genl_event { acpi_device_class device_class; char bus_id[15]; u32 type; u32 data; }; typedef acpi_status (*acpi_walk_callback)(acpi_handle, u32, void *, void **); struct acpi_get_devices_info { acpi_walk_callback user_function; void *context; const char *hid; }; struct acpi_global_notify_handler { acpi_notify_handler handler; void *context; }; struct acpi_gpe_address { u8 space_id; u64 address; }; struct acpi_gpe_xrupt_info; struct acpi_gpe_register_info; struct acpi_gpe_event_info; struct acpi_gpe_block_info { struct acpi_namespace_node *node; struct acpi_gpe_block_info *previous; struct acpi_gpe_block_info *next; struct acpi_gpe_xrupt_info *xrupt_block; struct acpi_gpe_register_info *register_info; struct acpi_gpe_event_info *event_info; u64 address; u32 register_count; u16 gpe_count; u16 block_base_number; u8 space_id; u8 initialized; }; struct acpi_gpe_block_status_context { struct acpi_gpe_register_info *gpe_skip_register_info; u8 gpe_skip_mask; u8 retval; }; struct acpi_gpe_device_info { u32 index; u32 next_block_base_index; acpi_status status; struct acpi_namespace_node *gpe_device; }; struct acpi_gpe_handler_info; struct acpi_gpe_notify_info; union acpi_gpe_dispatch_info { struct acpi_namespace_node *method_node; struct acpi_gpe_handler_info *handler; struct acpi_gpe_notify_info *notify_list; }; struct acpi_gpe_event_info { union acpi_gpe_dispatch_info dispatch; struct acpi_gpe_register_info *register_info; u8 flags; u8 gpe_number; u8 runtime_count; u8 disable_for_dispatch; }; typedef u32 (*acpi_gpe_handler)(acpi_handle, u32, void *); struct acpi_gpe_handler_info { acpi_gpe_handler address; void *context; struct acpi_namespace_node *method_node; u8 original_flags; u8 originally_enabled; }; struct acpi_gpe_notify_info { struct acpi_namespace_node *device_node; struct acpi_gpe_notify_info *next; }; struct acpi_gpe_register_info { struct acpi_gpe_address status_address; struct acpi_gpe_address enable_address; u16 base_gpe_number; u8 enable_for_wake; u8 enable_for_run; u8 mask_for_run; u8 enable_mask; }; struct acpi_gpe_walk_info { struct acpi_namespace_node *gpe_device; struct acpi_gpe_block_info *gpe_block; u16 count; acpi_owner_id owner_id; u8 execute_by_owner_id; }; struct acpi_gpe_xrupt_info { struct acpi_gpe_xrupt_info *previous; struct acpi_gpe_xrupt_info *next; struct acpi_gpe_block_info *gpe_block_list_head; u32 interrupt_number; }; struct acpi_gpio_params; struct acpi_gpio_mapping { const char *name; const struct acpi_gpio_params *data; unsigned int size; unsigned int quirks; }; struct acpi_gpio_params { unsigned int crs_entry_index; unsigned int line_index; bool active_low; }; struct acpi_handle_list { u32 count; acpi_handle *handles; }; struct acpi_hardware_id { struct list_head list; const char *id; }; struct acpi_hmat_structure { u16 type; u16 reserved; u32 length; }; typedef int (*acpi_hp_notify)(struct acpi_device *, u32); typedef void (*acpi_hp_uevent)(struct acpi_device *, u32); typedef void (*acpi_hp_fixup)(struct acpi_device *); struct acpi_hotplug_context { struct acpi_device *self; acpi_hp_notify notify; acpi_hp_uevent uevent; acpi_hp_fixup fixup; }; struct acpi_hotplug_profile { struct kobject kobj; int (*scan_dependent)(struct acpi_device *); void (*notify_online)(struct acpi_device *); bool enabled: 1; bool demand_offline: 1; }; struct acpi_hp_work { struct work_struct work; struct acpi_device *adev; u32 src; }; struct acpi_init_walk_info { u32 table_index; u32 object_count; u32 method_count; u32 serial_method_count; u32 non_serial_method_count; u32 serialized_method_count; u32 device_count; u32 op_region_count; u32 field_count; u32 buffer_count; u32 package_count; u32 op_region_init; u32 field_init; u32 buffer_init; u32 package_init; acpi_owner_id owner_id; }; struct acpi_interface_info { char *name; struct acpi_interface_info *next; u8 flags; u8 value; }; struct acpi_io_attribute { u8 range_type; u8 translation; u8 translation_type; u8 reserved1; }; struct rcu_work { struct work_struct work; struct callback_head rcu; struct workqueue_struct *wq; }; struct acpi_ioremap { struct list_head list; void *virt; acpi_physical_address phys; acpi_size size; union { long unsigned int refcount; struct rcu_work rwork; } track; }; struct acpi_lpat { int temp; int raw; }; struct acpi_lpat_conversion_table { struct acpi_lpat *lpat; int lpat_count; }; struct acpi_lpi_state { u32 min_residency; u32 wake_latency; u32 flags; u32 arch_flags; u32 res_cnt_freq; u32 enable_parent_state; u64 address; u8 index; u8 entry_method; char desc[32]; }; struct acpi_lpi_states_array { unsigned int size; unsigned int composite_states_size; struct acpi_lpi_state *entries; struct acpi_lpi_state *composite_states[8]; }; struct acpi_lpit_header { u32 type; u32 length; u16 unique_id; u16 reserved; u32 flags; }; struct acpi_lpit_native { struct acpi_lpit_header header; struct acpi_generic_address entry_trigger; u32 residency; u32 latency; struct acpi_generic_address residency_counter; u64 counter_frequency; }; struct acpi_subtable_header { u8 type; u8 length; }; struct acpi_madt_core_pic { struct acpi_subtable_header header; u8 version; u32 processor_id; u32 core_id; u32 flags; } __attribute__((packed)); struct acpi_madt_generic_distributor { struct acpi_subtable_header header; u16 reserved; u32 gic_id; u64 base_address; u32 global_irq_base; u8 version; u8 reserved2[3]; }; struct acpi_madt_generic_interrupt { struct acpi_subtable_header header; u16 reserved; u32 cpu_interface_number; u32 uid; u32 flags; u32 parking_version; u32 performance_interrupt; u64 parked_address; u64 base_address; u64 gicv_base_address; u64 gich_base_address; u32 vgic_interrupt; u64 gicr_base_address; u64 arm_mpidr; u8 efficiency_class; u8 reserved2[1]; u16 spe_interrupt; u16 trbe_interrupt; } __attribute__((packed)); struct acpi_madt_interrupt_override { struct acpi_subtable_header header; u8 bus; u8 source_irq; u32 global_irq; u16 inti_flags; } __attribute__((packed)); struct acpi_madt_interrupt_source { struct acpi_subtable_header header; u16 inti_flags; u8 type; u8 id; u8 eid; u8 io_sapic_vector; u32 global_irq; u32 flags; }; struct acpi_madt_io_apic { struct acpi_subtable_header header; u8 id; u8 reserved; u32 address; u32 global_irq_base; }; struct acpi_madt_io_sapic { struct acpi_subtable_header header; u8 id; u8 reserved; u32 global_irq_base; u64 address; }; struct acpi_madt_local_apic { struct acpi_subtable_header header; u8 processor_id; u8 id; u32 lapic_flags; }; struct acpi_madt_local_apic_nmi { struct acpi_subtable_header header; u8 processor_id; u16 inti_flags; u8 lint; } __attribute__((packed)); struct acpi_madt_local_apic_override { struct acpi_subtable_header header; u16 reserved; u64 address; } __attribute__((packed)); struct acpi_madt_local_sapic { struct acpi_subtable_header header; u8 processor_id; u8 id; u8 eid; u8 reserved[3]; u32 lapic_flags; u32 uid; char uid_string[0]; }; struct acpi_madt_local_x2apic { struct acpi_subtable_header header; u16 reserved; u32 local_apic_id; u32 lapic_flags; u32 uid; }; struct acpi_madt_local_x2apic_nmi { struct acpi_subtable_header header; u16 inti_flags; u32 uid; u8 lint; u8 reserved[3]; }; struct acpi_madt_multiproc_wakeup { struct acpi_subtable_header header; u16 version; u32 reserved; u64 mailbox_address; u64 reset_vector; }; struct acpi_madt_multiproc_wakeup_mailbox { u16 command; u16 reserved; u32 apic_id; u64 wakeup_vector; u8 reserved_os[2032]; u8 reserved_firmware[2048]; }; struct acpi_madt_nmi_source { struct acpi_subtable_header header; u16 inti_flags; u32 global_irq; }; struct acpi_madt_rintc { struct acpi_subtable_header header; u8 version; u8 reserved; u32 flags; u64 hart_id; u32 uid; u32 ext_intc_id; u64 imsic_addr; u32 imsic_size; } __attribute__((packed)); struct acpi_mcfg_allocation { u64 address; u16 pci_segment; u8 start_bus_number; u8 end_bus_number; u32 reserved; }; struct acpi_mem_mapping { acpi_physical_address physical_address; u8 *logical_address; acpi_size length; struct acpi_mem_mapping *next_mm; }; struct acpi_mem_space_context { u32 length; acpi_physical_address address; struct acpi_mem_mapping *cur_mm; struct acpi_mem_mapping *first_mm; }; struct acpi_memory_attribute { u8 write_protect; u8 caching; u8 range_type; u8 translation; }; struct acpi_mutex_info { void *mutex; u32 use_count; u64 thread_id; }; struct acpi_name_info { char name[4]; u16 argument_list; u8 expected_btypes; } __attribute__((packed)); struct acpi_namestring_info { const char *external_name; const char *next_external_char; char *internal_name; u32 length; u32 num_segments; u32 num_carats; u8 fully_qualified; }; union acpi_object { acpi_object_type type; struct { acpi_object_type type; u64 value; } integer; struct { acpi_object_type type; u32 length; char *pointer; } string; struct { acpi_object_type type; u32 length; u8 *pointer; } buffer; struct { acpi_object_type type; u32 count; union acpi_object *elements; } package; struct { acpi_object_type type; acpi_object_type actual_type; acpi_handle handle; } reference; struct { acpi_object_type type; u32 proc_id; acpi_io_address pblk_address; u32 pblk_length; } processor; struct { acpi_object_type type; u32 system_level; u32 resource_order; } power_resource; }; struct acpi_object_list { u32 count; union acpi_object *pointer; }; struct acpi_offsets { size_t offset; u8 mode; }; struct acpi_opcode_info { u32 parse_args; u32 runtime_args; u16 flags; u8 object_type; u8 class; u8 type; }; typedef void (*acpi_osd_exec_callback)(void *); struct acpi_os_dpc { acpi_osd_exec_callback function; void *context; struct work_struct work; }; struct acpi_osc_context { char *uuid_str; int rev; struct acpi_buffer cap; struct acpi_buffer ret; }; struct acpi_osi_config { u8 default_disabling; unsigned int linux_enable: 1; unsigned int linux_dmi: 1; unsigned int linux_cmdline: 1; unsigned int darwin_enable: 1; unsigned int darwin_dmi: 1; unsigned int darwin_cmdline: 1; }; struct acpi_osi_entry { char string[64]; bool enable; }; struct acpi_package_info { u8 type; u8 object_type1; u8 count1; u8 object_type2; u8 count2; u16 reserved; } __attribute__((packed)); struct acpi_package_info2 { u8 type; u8 count; u8 object_type[4]; u8 reserved; }; struct acpi_package_info3 { u8 type; u8 count; u8 object_type[2]; u8 tail_object_type; u16 reserved; } __attribute__((packed)); struct acpi_package_info4 { u8 type; u8 object_type1; u8 count1; u8 sub_object_types; u8 pkg_count; u16 reserved; } __attribute__((packed)); struct acpi_parse_state { u8 *aml_start; u8 *aml; u8 *aml_end; u8 *pkg_start; u8 *pkg_end; union acpi_parse_object *start_op; struct acpi_namespace_node *start_node; union acpi_generic_state *scope; union acpi_parse_object *start_scope; u32 aml_size; }; struct acpi_pcc_info { u8 subspace_id; u16 length; u8 *internal_buffer; }; struct acpi_pcct_ext_pcc_master { struct acpi_subtable_header header; u32 platform_interrupt; u8 flags; u8 reserved1; u64 base_address; u32 length; struct acpi_generic_address doorbell_register; u64 preserve_mask; u64 write_mask; u32 latency; u32 max_access_rate; u32 min_turnaround_time; struct acpi_generic_address platform_ack_register; u64 ack_preserve_mask; u64 ack_set_mask; u64 reserved2; struct acpi_generic_address cmd_complete_register; u64 cmd_complete_mask; struct acpi_generic_address cmd_update_register; u64 cmd_update_preserve_mask; u64 cmd_update_set_mask; struct acpi_generic_address error_status_register; u64 error_status_mask; } __attribute__((packed)); struct acpi_pcct_hw_reduced { struct acpi_subtable_header header; u32 platform_interrupt; u8 flags; u8 reserved; u64 base_address; u64 length; struct acpi_generic_address doorbell_register; u64 preserve_mask; u64 write_mask; u32 latency; u32 max_access_rate; u16 min_turnaround_time; } __attribute__((packed)); struct acpi_pcct_hw_reduced_type2 { struct acpi_subtable_header header; u32 platform_interrupt; u8 flags; u8 reserved; u64 base_address; u64 length; struct acpi_generic_address doorbell_register; u64 preserve_mask; u64 write_mask; u32 latency; u32 max_access_rate; u16 min_turnaround_time; struct acpi_generic_address platform_ack_register; u64 ack_preserve_mask; u64 ack_write_mask; } __attribute__((packed)); struct acpi_pcct_shared_memory { u32 signature; u16 command; u16 status; }; struct acpi_pcct_subspace { struct acpi_subtable_header header; u8 reserved[6]; u64 base_address; u64 length; struct acpi_generic_address doorbell_register; u64 preserve_mask; u64 write_mask; u32 latency; u32 max_access_rate; u16 min_turnaround_time; } __attribute__((packed)); struct acpi_pci_device { acpi_handle device; struct acpi_pci_device *next; }; struct acpi_pci_id { u16 segment; u16 bus; u16 device; u16 function; }; struct acpi_pci_ioapic { acpi_handle root_handle; acpi_handle handle; u32 gsi_base; struct resource res; struct pci_dev *pdev; struct list_head list; }; struct acpi_pci_link_irq { u32 active; u8 triggering; u8 polarity; u8 resource_type; u8 possible_count; u32 possible[16]; u8 initialized: 1; u8 reserved: 7; }; struct acpi_pci_link { struct list_head list; struct acpi_device *device; struct acpi_pci_link_irq irq; int refcnt; }; struct pci_bus; struct acpi_pci_root { struct acpi_device *device; struct pci_bus *bus; u16 segment; int bridge_type; struct resource secondary; u32 osc_support_set; u32 osc_control_set; u32 osc_ext_support_set; u32 osc_ext_control_set; phys_addr_t mcfg_addr; }; struct acpi_pci_root_ops; struct acpi_pci_root_info { struct acpi_pci_root *root; struct acpi_device *bridge; struct acpi_pci_root_ops *ops; struct list_head resources; char name[16]; }; struct pci_ops; struct acpi_pci_root_ops { struct pci_ops *pci_ops; int (*init_info)(struct acpi_pci_root_info *); void (*release_info)(struct acpi_pci_root_info *); int (*prepare_resources)(struct acpi_pci_root_info *); }; struct acpi_pci_routing_table { u32 length; u32 pin; u64 address; u32 source_index; union { char pad[4]; struct { struct {} __Empty_source; char source[0]; }; }; }; struct acpi_pct_register { u8 descriptor; u16 length; u8 space_id; u8 bit_width; u8 bit_offset; u8 reserved; u64 address; } __attribute__((packed)); struct acpi_pkg_info { u8 *free_space; acpi_size length; u32 object_space; u32 num_packages; }; struct acpi_platform_list { char oem_id[7]; char oem_table_id[9]; u32 oem_revision; char *table; enum acpi_predicate pred; char *reason; u32 data; }; struct acpi_pld_info { u8 revision; u8 ignore_color; u8 red; u8 green; u8 blue; u16 width; u16 height; u8 user_visible; u8 dock; u8 lid; u8 panel; u8 vertical_position; u8 horizontal_position; u8 shape; u8 group_orientation; u8 group_token; u8 group_position; u8 bay; u8 ejectable; u8 ospm_eject_required; u8 cabinet_number; u8 card_cage_number; u8 reference; u8 rotation; u8 order; u8 reserved; u16 vertical_offset; u16 horizontal_offset; }; struct acpi_port_info { char *name; u16 start; u16 end; u8 osi_dependency; }; struct acpi_power_dependent_device { struct device *dev; struct list_head node; }; struct acpi_power_register { u8 descriptor; u16 length; u8 space_id; u8 bit_width; u8 bit_offset; u8 access_size; u64 address; } __attribute__((packed)); struct acpi_power_resource { struct acpi_device device; struct list_head list_node; u32 system_level; u32 order; unsigned int ref_count; u8 state; struct mutex resource_lock; struct list_head dependents; }; struct acpi_power_resource_entry { struct list_head node; struct acpi_power_resource *resource; }; union acpi_predefined_info { struct acpi_name_info info; struct acpi_package_info ret_info; struct acpi_package_info2 ret_info2; struct acpi_package_info3 ret_info3; struct acpi_package_info4 ret_info4; }; struct acpi_predefined_names { const char *name; u8 type; char *val; }; struct acpi_prmt_handler_info { u16 revision; u16 length; u8 handler_guid[16]; u64 handler_address; u64 static_data_buffer_address; u64 acpi_param_buffer_address; } __attribute__((packed)); struct acpi_prmt_module_header { u16 revision; u16 length; }; struct acpi_prmt_module_info { u16 revision; u16 length; u8 module_guid[16]; u16 major_rev; u16 minor_rev; u16 handler_info_count; u32 handler_info_offset; u64 mmio_list_pointer; } __attribute__((packed)); struct acpi_probe_entry; typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *); struct acpi_table_header; typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *); union acpi_subtable_headers; typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *, const long unsigned int); struct acpi_probe_entry { __u8 id[5]; __u8 type; acpi_probe_entry_validate_subtbl subtable_valid; union { acpi_tbl_table_handler probe_table; acpi_tbl_entry_handler probe_subtbl; }; kernel_ulong_t driver_data; }; struct acpi_processor_flags { u8 power: 1; u8 performance: 1; u8 throttling: 1; u8 limit: 1; u8 bm_control: 1; u8 bm_check: 1; u8 has_cst: 1; u8 has_lpi: 1; u8 power_setup_done: 1; u8 bm_rld_set: 1; u8 previously_online: 1; }; struct acpi_processor_cx { u8 valid; u8 type; u32 address; u8 entry_method; u8 index; u32 latency; u8 bm_sts_skip; char desc[32]; }; struct acpi_processor_power { int count; union { struct acpi_processor_cx states[8]; struct acpi_lpi_state lpi_states[8]; }; int timer_broadcast_on_state; }; struct acpi_tsd_package { u64 num_entries; u64 revision; u64 domain; u64 coord_type; u64 num_processors; }; struct acpi_processor_tx { u16 power; u16 performance; }; struct acpi_processor_tx_tss; struct acpi_processor; struct acpi_processor_throttling { unsigned int state; unsigned int platform_limit; struct acpi_pct_register control_register; struct acpi_pct_register status_register; unsigned int state_count; struct acpi_processor_tx_tss *states_tss; struct acpi_tsd_package domain_info; cpumask_var_t shared_cpu_map; int (*acpi_processor_get_throttling)(struct acpi_processor *); int (*acpi_processor_set_throttling)(struct acpi_processor *, int, bool); u32 address; u8 duty_offset; u8 duty_width; u8 tsd_valid_flag; unsigned int shared_type; struct acpi_processor_tx states[16]; }; struct acpi_processor_lx { int px; int tx; }; struct acpi_processor_limit { struct acpi_processor_lx state; struct acpi_processor_lx thermal; struct acpi_processor_lx user; }; struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; }; struct freq_constraints; struct freq_qos_request { enum freq_qos_req_type type; struct plist_node pnode; struct freq_constraints *qos; }; struct acpi_processor_performance; struct acpi_processor { acpi_handle handle; u32 acpi_id; phys_cpuid_t phys_id; u32 id; u32 pblk; int performance_platform_limit; int throttling_platform_limit; struct acpi_processor_flags flags; struct acpi_processor_power power; struct acpi_processor_performance *performance; struct acpi_processor_throttling throttling; struct acpi_processor_limit limit; struct thermal_cooling_device *cdev; struct device *dev; struct freq_qos_request perflib_req; struct freq_qos_request thermal_req; }; struct acpi_processor_errata { u8 smp; struct { u8 throttle: 1; u8 fdma: 1; u8 reserved: 6; u32 bmisx; } piix4; }; struct acpi_psd_package { u64 num_entries; u64 revision; u64 domain; u64 coord_type; u64 num_processors; }; struct acpi_processor_px; struct acpi_processor_performance { unsigned int state; unsigned int platform_limit; struct acpi_pct_register control_register; struct acpi_pct_register status_register; unsigned int state_count; struct acpi_processor_px *states; struct acpi_psd_package domain_info; cpumask_var_t shared_cpu_map; unsigned int shared_type; }; struct acpi_processor_px { u64 core_frequency; u64 power; u64 transition_latency; u64 bus_master_latency; u64 control; u64 status; }; struct acpi_processor_throttling_arg { struct acpi_processor *pr; int target_state; bool force; }; struct acpi_processor_tx_tss { u64 freqpercentage; u64 power; u64 transition_latency; u64 control; u64 status; }; struct acpi_prt_entry { struct acpi_pci_id id; u8 pin; acpi_handle link; u32 index; }; struct acpi_reg_walk_info { u32 function; u32 reg_run_count; acpi_adr_space_type space_id; }; typedef acpi_status (*acpi_repair_function)(struct acpi_evaluate_info *, union acpi_operand_object **); struct acpi_repair_info { char name[4]; acpi_repair_function repair_function; }; struct acpi_resource_irq { u8 descriptor_length; u8 triggering; u8 polarity; u8 shareable; u8 wake_capable; u8 interrupt_count; union { u8 interrupt; struct { struct {} __Empty_interrupts; u8 interrupts[0]; }; }; }; struct acpi_resource_dma { u8 type; u8 bus_master; u8 transfer; u8 channel_count; union { u8 channel; struct { struct {} __Empty_channels; u8 channels[0]; }; }; }; struct acpi_resource_start_dependent { u8 descriptor_length; u8 compatibility_priority; u8 performance_robustness; }; struct acpi_resource_io { u8 io_decode; u8 alignment; u8 address_length; u16 minimum; u16 maximum; } __attribute__((packed)); struct acpi_resource_fixed_io { u16 address; u8 address_length; } __attribute__((packed)); struct acpi_resource_fixed_dma { u16 request_lines; u16 channels; u8 width; } __attribute__((packed)); struct acpi_resource_vendor { u16 byte_length; u8 byte_data[0]; }; struct acpi_resource_vendor_typed { u16 byte_length; u8 uuid_subtype; u8 uuid[16]; u8 byte_data[0]; } __attribute__((packed)); struct acpi_resource_end_tag { u8 checksum; }; struct acpi_resource_memory24 { u8 write_protect; u16 minimum; u16 maximum; u16 alignment; u16 address_length; } __attribute__((packed)); struct acpi_resource_memory32 { u8 write_protect; u32 minimum; u32 maximum; u32 alignment; u32 address_length; } __attribute__((packed)); struct acpi_resource_fixed_memory32 { u8 write_protect; u32 address; u32 address_length; } __attribute__((packed)); union acpi_resource_attribute { struct acpi_memory_attribute mem; struct acpi_io_attribute io; u8 type_specific; }; struct acpi_resource_source { u8 index; u16 string_length; char *string_ptr; } __attribute__((packed)); struct acpi_resource_address16 { u8 resource_type; u8 producer_consumer; u8 decode; u8 min_address_fixed; u8 max_address_fixed; union acpi_resource_attribute info; struct acpi_address16_attribute address; struct acpi_resource_source resource_source; } __attribute__((packed)); struct acpi_resource_address32 { u8 resource_type; u8 producer_consumer; u8 decode; u8 min_address_fixed; u8 max_address_fixed; union acpi_resource_attribute info; struct acpi_address32_attribute address; struct acpi_resource_source resource_source; } __attribute__((packed)); struct acpi_resource_address64 { u8 resource_type; u8 producer_consumer; u8 decode; u8 min_address_fixed; u8 max_address_fixed; union acpi_resource_attribute info; struct acpi_address64_attribute address; struct acpi_resource_source resource_source; } __attribute__((packed)); struct acpi_resource_extended_address64 { u8 resource_type; u8 producer_consumer; u8 decode; u8 min_address_fixed; u8 max_address_fixed; union acpi_resource_attribute info; u8 revision_ID; struct acpi_address64_attribute address; u64 type_specific; } __attribute__((packed)); struct acpi_resource_extended_irq { u8 producer_consumer; u8 triggering; u8 polarity; u8 shareable; u8 wake_capable; u8 interrupt_count; struct acpi_resource_source resource_source; union { u32 interrupt; struct { struct {} __Empty_interrupts; u32 interrupts[0]; }; }; } __attribute__((packed)); struct acpi_resource_generic_register { u8 space_id; u8 bit_width; u8 bit_offset; u8 access_size; u64 address; } __attribute__((packed)); struct acpi_resource_gpio { u8 revision_id; u8 connection_type; u8 producer_consumer; u8 pin_config; u8 shareable; u8 wake_capable; u8 io_restriction; u8 triggering; u8 polarity; u16 drive_strength; u16 debounce_timeout; u16 pin_table_length; u16 vendor_length; struct acpi_resource_source resource_source; u16 *pin_table; u8 *vendor_data; } __attribute__((packed)); struct acpi_resource_i2c_serialbus { u8 revision_id; u8 type; u8 producer_consumer; u8 slave_mode; u8 connection_sharing; u8 type_revision_id; u16 type_data_length; u16 vendor_length; struct acpi_resource_source resource_source; u8 *vendor_data; u8 access_mode; u16 slave_address; u32 connection_speed; } __attribute__((packed)); struct acpi_resource_spi_serialbus { u8 revision_id; u8 type; u8 producer_consumer; u8 slave_mode; u8 connection_sharing; u8 type_revision_id; u16 type_data_length; u16 vendor_length; struct acpi_resource_source resource_source; u8 *vendor_data; u8 wire_mode; u8 device_polarity; u8 data_bit_length; u8 clock_phase; u8 clock_polarity; u16 device_selection; u32 connection_speed; } __attribute__((packed)); struct acpi_resource_uart_serialbus { u8 revision_id; u8 type; u8 producer_consumer; u8 slave_mode; u8 connection_sharing; u8 type_revision_id; u16 type_data_length; u16 vendor_length; struct acpi_resource_source resource_source; u8 *vendor_data; u8 endian; u8 data_bits; u8 stop_bits; u8 flow_control; u8 parity; u8 lines_enabled; u16 rx_fifo_size; u16 tx_fifo_size; u32 default_baud_rate; } __attribute__((packed)); struct acpi_resource_csi2_serialbus { u8 revision_id; u8 type; u8 producer_consumer; u8 slave_mode; u8 connection_sharing; u8 type_revision_id; u16 type_data_length; u16 vendor_length; struct acpi_resource_source resource_source; u8 *vendor_data; u8 local_port_instance; u8 phy_type; } __attribute__((packed)); struct acpi_resource_common_serialbus { u8 revision_id; u8 type; u8 producer_consumer; u8 slave_mode; u8 connection_sharing; u8 type_revision_id; u16 type_data_length; u16 vendor_length; struct acpi_resource_source resource_source; u8 *vendor_data; } __attribute__((packed)); struct acpi_resource_pin_function { u8 revision_id; u8 pin_config; u8 shareable; u16 function_number; u16 pin_table_length; u16 vendor_length; struct acpi_resource_source resource_source; u16 *pin_table; u8 *vendor_data; } __attribute__((packed)); struct acpi_resource_pin_config { u8 revision_id; u8 producer_consumer; u8 shareable; u8 pin_config_type; u32 pin_config_value; u16 pin_table_length; u16 vendor_length; struct acpi_resource_source resource_source; u16 *pin_table; u8 *vendor_data; } __attribute__((packed)); struct acpi_resource_label { u16 string_length; char *string_ptr; } __attribute__((packed)); struct acpi_resource_pin_group { u8 revision_id; u8 producer_consumer; u16 pin_table_length; u16 vendor_length; u16 *pin_table; struct acpi_resource_label resource_label; u8 *vendor_data; } __attribute__((packed)); struct acpi_resource_pin_group_function { u8 revision_id; u8 producer_consumer; u8 shareable; u16 function_number; u16 vendor_length; struct acpi_resource_source resource_source; struct acpi_resource_label resource_source_label; u8 *vendor_data; } __attribute__((packed)); struct acpi_resource_pin_group_config { u8 revision_id; u8 producer_consumer; u8 shareable; u8 pin_config_type; u32 pin_config_value; u16 vendor_length; struct acpi_resource_source resource_source; struct acpi_resource_label resource_source_label; u8 *vendor_data; } __attribute__((packed)); struct acpi_resource_clock_input { u8 revision_id; u8 mode; u8 scale; u16 frequency_divisor; u32 frequency_numerator; struct acpi_resource_source resource_source; } __attribute__((packed)); struct acpi_resource_address { u8 resource_type; u8 producer_consumer; u8 decode; u8 min_address_fixed; u8 max_address_fixed; union acpi_resource_attribute info; }; union acpi_resource_data { struct acpi_resource_irq irq; struct acpi_resource_dma dma; struct acpi_resource_start_dependent start_dpf; struct acpi_resource_io io; struct acpi_resource_fixed_io fixed_io; struct acpi_resource_fixed_dma fixed_dma; struct acpi_resource_vendor vendor; struct acpi_resource_vendor_typed vendor_typed; struct acpi_resource_end_tag end_tag; struct acpi_resource_memory24 memory24; struct acpi_resource_memory32 memory32; struct acpi_resource_fixed_memory32 fixed_memory32; struct acpi_resource_address16 address16; struct acpi_resource_address32 address32; struct acpi_resource_address64 address64; struct acpi_resource_extended_address64 ext_address64; struct acpi_resource_extended_irq extended_irq; struct acpi_resource_generic_register generic_reg; struct acpi_resource_gpio gpio; struct acpi_resource_i2c_serialbus i2c_serial_bus; struct acpi_resource_spi_serialbus spi_serial_bus; struct acpi_resource_uart_serialbus uart_serial_bus; struct acpi_resource_csi2_serialbus csi2_serial_bus; struct acpi_resource_common_serialbus common_serial_bus; struct acpi_resource_pin_function pin_function; struct acpi_resource_pin_config pin_config; struct acpi_resource_pin_group pin_group; struct acpi_resource_pin_group_function pin_group_function; struct acpi_resource_pin_group_config pin_group_config; struct acpi_resource_clock_input clock_input; struct acpi_resource_address address; }; struct acpi_resource { u32 type; u32 length; union acpi_resource_data data; }; struct acpi_rsconvert_info { u8 opcode; u8 resource_offset; u8 aml_offset; u8 value; }; struct acpi_rw_lock { void *writer_mutex; void *reader_mutex; u32 num_readers; }; struct acpi_s2idle_dev_ops { struct list_head list_node; void (*prepare)(void); void (*check)(void); void (*restore)(void); }; struct acpi_scan_clear_dep_work { struct work_struct work; struct acpi_device *adev; }; struct acpi_scan_handler { struct list_head list_node; const struct acpi_device_id *ids; bool (*match)(const char *, const struct acpi_device_id **); int (*attach)(struct acpi_device *, const struct acpi_device_id *); void (*detach)(struct acpi_device *); void (*post_eject)(struct acpi_device *); void (*bind)(struct device *); void (*unbind)(struct device *); struct acpi_hotplug_profile hotplug; }; typedef u32 (*acpi_sci_handler)(void *); struct acpi_sci_handler_info { struct acpi_sci_handler_info *next; acpi_sci_handler address; void *context; }; struct acpi_signal_fatal_info { u32 type; u32 code; u32 argument; }; typedef acpi_status (*acpi_object_converter)(struct acpi_namespace_node *, union acpi_operand_object *, union acpi_operand_object **); struct acpi_simple_repair_info { char name[4]; u32 unexpected_btypes; u32 package_index; acpi_object_converter object_converter; }; struct acpi_srat_cpu_affinity { struct acpi_subtable_header header; u8 proximity_domain_lo; u8 apic_id; u32 flags; u8 local_sapic_eid; u8 proximity_domain_hi[3]; u32 clock_domain; }; struct acpi_srat_generic_affinity { struct acpi_subtable_header header; u8 reserved; u8 device_handle_type; u32 proximity_domain; u8 device_handle[16]; u32 flags; u32 reserved1; }; struct acpi_srat_gicc_affinity { struct acpi_subtable_header header; u32 proximity_domain; u32 acpi_processor_uid; u32 flags; u32 clock_domain; } __attribute__((packed)); struct acpi_srat_mem_affinity { struct acpi_subtable_header header; u32 proximity_domain; u16 reserved; u64 base_address; u64 length; u32 reserved1; u32 flags; u64 reserved2; } __attribute__((packed)); struct acpi_srat_rintc_affinity { struct acpi_subtable_header header; u16 reserved; u32 proximity_domain; u32 acpi_processor_uid; u32 flags; u32 clock_domain; }; struct acpi_srat_x2apic_cpu_affinity { struct acpi_subtable_header header; u16 reserved; u32 proximity_domain; u32 apic_id; u32 flags; u32 clock_domain; u32 reserved2; }; struct acpi_subtable_entry { union acpi_subtable_headers *hdr; enum acpi_subtable_type type; }; union acpi_subtable_headers { struct acpi_subtable_header common; struct acpi_hmat_structure hmat; struct acpi_prmt_module_header prmt; struct acpi_cedt_header cedt; struct acpi_cdat_header cdat; }; typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *, void *, const long unsigned int); struct acpi_subtable_proc { int id; acpi_tbl_entry_handler handler; acpi_tbl_entry_handler_arg handler_arg; void *arg; int count; }; struct acpi_table_attr { struct bin_attribute attr; char name[4]; int instance; char filename[8]; struct list_head node; }; struct acpi_table_header { char signature[4]; u32 length; u8 revision; u8 checksum; char oem_id[6]; char oem_table_id[8]; u32 oem_revision; char asl_compiler_id[4]; u32 asl_compiler_revision; }; struct acpi_table_bert { struct acpi_table_header header; u32 region_length; u64 address; }; struct acpi_table_boot { struct acpi_table_header header; u8 cmos_index; u8 reserved[3]; }; struct acpi_table_ccel { struct acpi_table_header header; u8 CCtype; u8 Ccsub_type; u16 reserved; u64 log_area_minimum_length; u64 log_area_start_address; }; struct acpi_table_cdat { u32 length; u8 revision; u8 checksum; u8 reserved[6]; u32 sequence; }; struct acpi_table_desc { acpi_physical_address address; struct acpi_table_header *pointer; u32 length; union acpi_name_union signature; acpi_owner_id owner_id; u8 flags; u16 validation_count; }; struct acpi_table_ecdt { struct acpi_table_header header; struct acpi_generic_address control; struct acpi_generic_address data; u32 uid; u8 gpe; u8 id[0]; } __attribute__((packed)); struct acpi_table_facs { char signature[4]; u32 length; u32 hardware_signature; u32 firmware_waking_vector; u32 global_lock; u32 flags; u64 xfirmware_waking_vector; u8 version; u8 reserved[3]; u32 ospm_flags; u8 reserved1[24]; }; struct acpi_table_fadt { struct acpi_table_header header; u32 facs; u32 dsdt; u8 model; u8 preferred_profile; u16 sci_interrupt; u32 smi_command; u8 acpi_enable; u8 acpi_disable; u8 s4_bios_request; u8 pstate_control; u32 pm1a_event_block; u32 pm1b_event_block; u32 pm1a_control_block; u32 pm1b_control_block; u32 pm2_control_block; u32 pm_timer_block; u32 gpe0_block; u32 gpe1_block; u8 pm1_event_length; u8 pm1_control_length; u8 pm2_control_length; u8 pm_timer_length; u8 gpe0_block_length; u8 gpe1_block_length; u8 gpe1_base; u8 cst_control; u16 c2_latency; u16 c3_latency; u16 flush_size; u16 flush_stride; u8 duty_offset; u8 duty_width; u8 day_alarm; u8 month_alarm; u8 century; u16 boot_flags; u8 reserved; u32 flags; struct acpi_generic_address reset_register; u8 reset_value; u16 arm_boot_flags; u8 minor_revision; u64 Xfacs; u64 Xdsdt; struct acpi_generic_address xpm1a_event_block; struct acpi_generic_address xpm1b_event_block; struct acpi_generic_address xpm1a_control_block; struct acpi_generic_address xpm1b_control_block; struct acpi_generic_address xpm2_control_block; struct acpi_generic_address xpm_timer_block; struct acpi_generic_address xgpe0_block; struct acpi_generic_address xgpe1_block; struct acpi_generic_address sleep_control; struct acpi_generic_address sleep_status; u64 hypervisor_id; } __attribute__((packed)); struct acpi_table_hpet { struct acpi_table_header header; u32 id; struct acpi_generic_address address; u8 sequence; u16 minimum_tick; u8 flags; } __attribute__((packed)); struct acpi_table_list { struct acpi_table_desc *tables; u32 current_table_count; u32 max_table_count; u8 flags; }; struct acpi_table_lpit { struct acpi_table_header header; }; struct acpi_table_madt { struct acpi_table_header header; u32 address; u32 flags; }; struct acpi_table_mcfg { struct acpi_table_header header; u8 reserved[8]; }; struct acpi_table_pcct { struct acpi_table_header header; u32 flags; u64 reserved; }; struct acpi_table_rsdp { char signature[8]; u8 checksum; char oem_id[6]; u8 revision; u32 rsdt_physical_address; u32 length; u64 xsdt_physical_address; u8 extended_checksum; u8 reserved[3]; } __attribute__((packed)); struct acpi_table_slit { struct acpi_table_header header; u64 locality_count; u8 entry[0]; } __attribute__((packed)); struct acpi_table_spcr { struct acpi_table_header header; u8 interface_type; u8 reserved[3]; struct acpi_generic_address serial_port; u8 interrupt_type; u8 pc_interrupt; u32 interrupt; u8 baud_rate; u8 parity; u8 stop_bits; u8 flow_control; u8 terminal_type; u8 language; u16 pci_device_id; u16 pci_vendor_id; u8 pci_bus; u8 pci_device; u8 pci_function; u32 pci_flags; u8 pci_segment; u32 uart_clk_freq; u32 precise_baudrate; u16 name_space_string_length; u16 name_space_string_offset; char name_space_string[0]; } __attribute__((packed)); struct acpi_table_srat { struct acpi_table_header header; u32 table_revision; u64 reserved; }; struct acpi_table_stao { struct acpi_table_header header; u8 ignore_uart; } __attribute__((packed)); struct acpi_table_tpm2 { struct acpi_table_header header; u16 platform_class; u16 reserved; u64 control_address; u32 start_method; } __attribute__((packed)); struct client_hdr { u32 log_max_len; u64 log_start_addr; } __attribute__((packed)); struct server_hdr { u16 reserved; u64 log_max_len; u64 log_start_addr; } __attribute__((packed)); struct acpi_tcpa { struct acpi_table_header hdr; u16 platform_class; union { struct client_hdr client; struct server_hdr server; }; }; struct acpi_thermal_trip { long unsigned int temp_dk; struct acpi_handle_list devices; }; struct acpi_thermal_passive { struct acpi_thermal_trip trip; long unsigned int tc1; long unsigned int tc2; long unsigned int delay; }; struct acpi_thermal_active { struct acpi_thermal_trip trip; }; struct acpi_thermal_trips { struct acpi_thermal_passive passive; struct acpi_thermal_active active[10]; }; struct thermal_zone_device; struct acpi_thermal { struct acpi_device *device; acpi_bus_id name; long unsigned int temp_dk; long unsigned int last_temp_dk; long unsigned int polling_frequency; volatile u8 zombie; struct acpi_thermal_trips trips; struct thermal_zone_device *thermal_zone; int kelvin_offset; struct work_struct thermal_check_work; struct mutex thermal_check_lock; refcount_t thermal_check_count; }; struct acpi_tpm2_phy { u8 start_method_specific[12]; u32 log_area_minimum_length; u64 log_area_start_address; }; struct acpi_vendor_uuid { u8 subtype; u8 data[16]; }; struct acpi_vendor_walk_info { struct acpi_vendor_uuid *uuid; struct acpi_buffer *buffer; acpi_status status; }; struct acpi_wakeup_handler { struct list_head list_node; bool (*wakeup)(void *); void *context; }; typedef acpi_status (*acpi_parse_downwards)(struct acpi_walk_state *, union acpi_parse_object **); typedef acpi_status (*acpi_parse_upwards)(struct acpi_walk_state *); struct acpi_walk_state { struct acpi_walk_state *next; u8 descriptor_type; u8 walk_type; u16 opcode; u8 next_op_info; u8 num_operands; u8 operand_index; acpi_owner_id owner_id; u8 last_predicate; u8 current_result; u8 return_used; u8 scope_depth; u8 pass_number; u8 namespace_override; u8 result_size; u8 result_count; u8 *aml; u32 arg_types; u32 method_breakpoint; u32 user_breakpoint; u32 parse_flags; struct acpi_parse_state parser_state; u32 prev_arg_types; u32 arg_count; u16 method_nesting_depth; u8 method_is_nested; struct acpi_namespace_node arguments[7]; struct acpi_namespace_node local_variables[8]; union acpi_operand_object *operands[9]; union acpi_operand_object **params; u8 *aml_last_while; union acpi_operand_object **caller_return_desc; union acpi_generic_state *control_state; struct acpi_namespace_node *deferred_node; union acpi_operand_object *implicit_return_obj; struct acpi_namespace_node *method_call_node; union acpi_parse_object *method_call_op; union acpi_operand_object *method_desc; struct acpi_namespace_node *method_node; char *method_pathname; union acpi_parse_object *op; const struct acpi_opcode_info *op_info; union acpi_parse_object *origin; union acpi_operand_object *result_obj; union acpi_generic_state *results; union acpi_operand_object *return_desc; union acpi_generic_state *scope_info; union acpi_parse_object *prev_op; union acpi_parse_object *next_op; struct acpi_thread_state *thread; acpi_parse_downwards descending_callback; acpi_parse_upwards ascending_callback; }; struct pnp_dev; struct acpipnp_parse_option_s { struct pnp_dev *dev; unsigned int option_flags; }; struct action_cache { long unsigned int allow_native[8]; }; struct action_devres { void *data; void (*action)(void *); }; struct action_gate_entry { u8 gate_state; u32 interval; s32 ipv; s32 maxoctets; }; struct mac_addr { u8 mac_addr_value[6]; }; struct ad_system { u16 sys_priority; struct mac_addr sys_mac_addr; }; struct bond_3ad_stats { atomic64_t lacpdu_rx; atomic64_t lacpdu_tx; atomic64_t lacpdu_unknown_rx; atomic64_t lacpdu_illegal_rx; atomic64_t marker_rx; atomic64_t marker_tx; atomic64_t marker_resp_rx; atomic64_t marker_resp_tx; atomic64_t marker_unknown_rx; }; struct ad_bond_info { struct ad_system system; struct bond_3ad_stats stats; atomic_t agg_select_timer; u16 aggregator_identifier; }; struct ad_info { __u16 aggregator_id; __u16 ports; __u16 actor_key; __u16 partner_key; __u8 partner_system[6]; }; struct port; struct slave; struct aggregator { struct mac_addr aggregator_mac_address; u16 aggregator_identifier; bool is_individual; u16 actor_admin_aggregator_key; u16 actor_oper_aggregator_key; struct mac_addr partner_system; u16 partner_system_priority; u16 partner_oper_aggregator_key; u16 receive_state; u16 transmit_state; struct port *lag_ports; struct slave *slave; u16 is_active; u16 num_of_ports; }; struct port_params { struct mac_addr system; u16 system_priority; u16 key; u16 port_number; u16 port_priority; u16 port_state; }; struct lacpdu { u8 subtype; u8 version_number; u8 tlv_type_actor_info; u8 actor_information_length; __be16 actor_system_priority; struct mac_addr actor_system; __be16 actor_key; __be16 actor_port_priority; __be16 actor_port; u8 actor_state; u8 reserved_3_1[3]; u8 tlv_type_partner_info; u8 partner_information_length; __be16 partner_system_priority; struct mac_addr partner_system; __be16 partner_key; __be16 partner_port_priority; __be16 partner_port; u8 partner_state; u8 reserved_3_2[3]; u8 tlv_type_collector_info; u8 collector_information_length; __be16 collector_max_delay; u8 reserved_12[12]; u8 tlv_type_terminator; u8 terminator_length; u8 reserved_50[50]; }; struct port { u16 actor_port_number; u16 actor_port_priority; struct mac_addr actor_system; u16 actor_system_priority; u16 actor_port_aggregator_identifier; bool ntt; u16 actor_admin_port_key; u16 actor_oper_port_key; u8 actor_admin_port_state; u8 actor_oper_port_state; struct port_params partner_admin; struct port_params partner_oper; bool is_enabled; u16 sm_vars; rx_states_t sm_rx_state; u16 sm_rx_timer_counter; periodic_states_t sm_periodic_state; u16 sm_periodic_timer_counter; mux_states_t sm_mux_state; u16 sm_mux_timer_counter; tx_states_t sm_tx_state; u16 sm_tx_timer_counter; u16 sm_churn_actor_timer_counter; u16 sm_churn_partner_timer_counter; u32 churn_actor_count; u32 churn_partner_count; churn_state_t sm_churn_actor_state; churn_state_t sm_churn_partner_state; struct slave *slave; struct aggregator *aggregator; struct port *next_port_in_aggregator; u32 transaction_id; struct lacpdu lacpdu; }; struct ad_slave_info { struct aggregator aggregator; struct port port; struct bond_3ad_stats stats; u16 id; }; struct rb_root { struct rb_node *rb_node; }; struct rb_root_cached { struct rb_root rb_root; struct rb_node *rb_leftmost; }; struct address_space_operations; struct address_space { struct inode *host; struct xarray i_pages; struct rw_semaphore invalidate_lock; gfp_t gfp_mask; atomic_t i_mmap_writable; struct rb_root_cached i_mmap; long unsigned int nrpages; long unsigned int writeback_index; const struct address_space_operations *a_ops; long unsigned int flags; errseq_t wb_err; spinlock_t i_private_lock; struct list_head i_private_list; struct rw_semaphore i_mmap_rwsem; void *i_private_data; }; struct writeback_control; struct readahead_control; struct kiocb; struct iov_iter; struct swap_info_struct; struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*read_folio)(struct file *, struct folio *); int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *); void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, struct folio **, void **); int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct folio *, void *); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio)(struct folio *, size_t, size_t); bool (*release_folio)(struct folio *, gfp_t); void (*free_folio)(struct folio *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migrate_folio)(struct address_space *, struct folio *, struct folio *, enum migrate_mode); int (*launder_folio)(struct folio *); bool (*is_partially_uptodate)(struct folio *, size_t, size_t); void (*is_dirty_writeback)(struct folio *, bool *, bool *); int (*error_remove_folio)(struct address_space *, struct folio *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); int (*swap_rw)(struct kiocb *, struct iov_iter *); }; struct adjust_trip_data { struct acpi_thermal *tz; u32 event; }; struct advisor_ctx { ktime_t start_scan; long unsigned int scan_time; long unsigned int change; long long unsigned int cpu_time; }; struct crypto_aead; struct aead_request; struct aead_alg { int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); int (*setauthsize)(struct crypto_aead *, unsigned int); int (*encrypt)(struct aead_request *); int (*decrypt)(struct aead_request *); int (*init)(struct crypto_aead *); void (*exit)(struct crypto_aead *); unsigned int ivsize; unsigned int maxauthsize; unsigned int chunksize; struct crypto_alg base; }; struct crypto_sync_skcipher; struct aead_geniv_ctx { spinlock_t lock; struct crypto_aead *child; struct crypto_sync_skcipher *sknull; u8 salt[0]; }; struct crypto_template; struct crypto_spawn; struct crypto_instance { struct crypto_alg alg; struct crypto_template *tmpl; union { struct hlist_node list; struct crypto_spawn *spawns; }; struct work_struct free_work; void *__ctx[0]; }; struct aead_instance { void (*free)(struct aead_instance *); union { struct { char head[64]; struct crypto_instance base; } s; struct aead_alg alg; }; }; struct aead_request { struct crypto_async_request base; unsigned int assoclen; unsigned int cryptlen; u8 *iv; struct scatterlist *src; struct scatterlist *dst; void *__ctx[0]; }; struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; }; struct scatterlist { long unsigned int page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; unsigned int dma_flags; }; struct af_alg_sgl { struct sg_table sgt; struct scatterlist sgl[17]; bool need_unpin; }; struct af_alg_rsgl { struct af_alg_sgl sgl; struct list_head list; size_t sg_num_bytes; }; struct skcipher_request { unsigned int cryptlen; u8 *iv; struct scatterlist *src; struct scatterlist *dst; struct crypto_async_request base; void *__ctx[0]; }; struct sock; struct af_alg_async_req { struct kiocb *iocb; struct sock *sk; struct af_alg_rsgl first_rsgl; struct af_alg_rsgl *last_rsgl; struct list_head rsgl_list; struct scatterlist *tsgl; unsigned int tsgl_entries; unsigned int outlen; unsigned int areqlen; union { struct aead_request aead_req; struct skcipher_request skcipher_req; } cra_u; }; struct af_alg_iv; struct af_alg_control { struct af_alg_iv *iv; int op; unsigned int aead_assoclen; }; struct crypto_wait { struct completion completion; int err; }; struct af_alg_ctx { struct list_head tsgl_list; void *iv; void *state; size_t aead_assoclen; struct crypto_wait wait; size_t used; atomic_t rcvused; bool more; bool merge; bool enc; bool init; unsigned int len; unsigned int inflight; }; struct af_alg_iv { __u32 ivlen; __u8 iv[0]; }; struct af_alg_tsgl { struct list_head list; unsigned int cur; struct scatterlist sg[0]; }; struct proto_ops; struct af_alg_type { void * (*bind)(const char *, u32, u32); void (*release)(void *); int (*setkey)(void *, const u8 *, unsigned int); int (*setentropy)(void *, sockptr_t, unsigned int); int (*accept)(void *, struct sock *); int (*accept_nokey)(void *, struct sock *); int (*setauthsize)(void *, unsigned int); struct proto_ops *ops; struct proto_ops *ops_nokey; struct module *owner; char name[14]; }; struct af_vsockmon_hdr { __le64 src_cid; __le64 dst_cid; __le32 src_port; __le32 dst_port; __le16 op; __le16 transport; __le16 len; __u8 reserved[2]; }; struct affinity_context { const struct cpumask *new_mask; struct cpumask *user_mask; unsigned int flags; }; struct component_master_ops; struct component_match; struct aggregate_device { struct list_head node; bool bound; const struct component_master_ops *ops; struct device *parent; struct component_match *match; }; struct agp_3_5_dev { struct list_head list; u8 capndx; u32 maxbw; struct pci_dev *dev; }; struct agp_version; struct agp_bridge_driver; struct vm_operations_struct; struct agp_bridge_data { const struct agp_version *version; const struct agp_bridge_driver *driver; const struct vm_operations_struct *vm_ops; void *previous_size; void *current_size; void *dev_private_data; struct pci_dev *dev; u32 *gatt_table; u32 *gatt_table_real; long unsigned int scratch_page; struct page *scratch_page_page; dma_addr_t scratch_page_dma; long unsigned int gart_bus_addr; long unsigned int gatt_bus_addr; u32 mode; long unsigned int *key_list; atomic_t current_memory_agp; atomic_t agp_in_use; int max_memory_agp; int aperture_size_idx; int capndx; int flags; char major_version; char minor_version; struct list_head list; u32 apbase_config; struct list_head mapped_list; spinlock_t mapped_lock; }; struct gatt_mask; struct agp_memory; struct agp_bridge_driver { struct module *owner; const void *aperture_sizes; int num_aperture_sizes; enum aper_size_type size_type; bool cant_use_aperture; bool needs_scratch_page; const struct gatt_mask *masks; int (*fetch_size)(void); int (*configure)(void); void (*agp_enable)(struct agp_bridge_data *, u32); void (*cleanup)(void); void (*tlb_flush)(struct agp_memory *); long unsigned int (*mask_memory)(struct agp_bridge_data *, dma_addr_t, int); void (*cache_flush)(void); int (*create_gatt_table)(struct agp_bridge_data *); int (*free_gatt_table)(struct agp_bridge_data *); int (*insert_memory)(struct agp_memory *, off_t, int); int (*remove_memory)(struct agp_memory *, off_t, int); struct agp_memory * (*alloc_by_type)(size_t, int); void (*free_by_type)(struct agp_memory *); struct page * (*agp_alloc_page)(struct agp_bridge_data *); int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t); void (*agp_destroy_page)(struct page *, int); void (*agp_destroy_pages)(struct agp_memory *); int (*agp_type_to_mask_type)(struct agp_bridge_data *, int); }; struct agp_device_ids { short unsigned int device_id; enum chipset_type chipset; const char *chipset_name; int (*chipset_setup)(struct pci_dev *); }; struct agp_version { u16 major; u16 minor; }; struct agp_kern_info { struct agp_version version; struct pci_dev *device; enum chipset_type chipset; long unsigned int mode; long unsigned int aper_base; size_t aper_size; int max_memory; int current_memory; bool cant_use_aperture; long unsigned int page_mask; const struct vm_operations_struct *vm_ops; }; struct agp_memory { struct agp_memory *next; struct agp_memory *prev; struct agp_bridge_data *bridge; struct page **pages; size_t page_count; int key; int num_scratch_pages; off_t pg_start; u32 type; u32 physical; bool is_bound; bool is_flushed; struct list_head mapped_list; struct scatterlist *sg_list; int num_sg; }; struct hash_alg_common { unsigned int digestsize; unsigned int statesize; struct crypto_alg base; }; struct ahash_request; struct crypto_ahash; struct ahash_alg { int (*init)(struct ahash_request *); int (*update)(struct ahash_request *); int (*final)(struct ahash_request *); int (*finup)(struct ahash_request *); int (*digest)(struct ahash_request *); int (*export)(struct ahash_request *, void *); int (*import)(struct ahash_request *, const void *); int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); int (*init_tfm)(struct crypto_ahash *); void (*exit_tfm)(struct crypto_ahash *); int (*clone_tfm)(struct crypto_ahash *, struct crypto_ahash *); struct hash_alg_common halg; }; struct ahash_instance { void (*free)(struct ahash_instance *); union { struct { char head[96]; struct crypto_instance base; } s; struct ahash_alg alg; }; }; struct ahash_request { struct crypto_async_request base; unsigned int nbytes; struct scatterlist *src; u8 *result; void *priv; void *__ctx[0]; }; struct wait_page_queue; struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long int); void *private; int ki_flags; u16 ki_ioprio; union { struct wait_page_queue *ki_waitq; ssize_t (*dio_complete)(void *); }; }; struct cred; struct fsync_iocb { struct file *file; struct work_struct work; bool datasync; struct cred *creds; }; struct wait_queue_entry; typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); struct wait_queue_entry { unsigned int flags; void *private; wait_queue_func_t func; struct list_head entry; }; struct poll_iocb { struct file *file; struct wait_queue_head *head; __poll_t events; bool cancelled; bool work_scheduled; bool work_need_resched; struct wait_queue_entry wait; struct work_struct work; }; typedef int kiocb_cancel_fn(struct kiocb *); struct io_event { __u64 data; __u64 obj; __s64 res; __s64 res2; }; struct kioctx; struct eventfd_ctx; struct aio_kiocb { union { struct file *ki_filp; struct kiocb rw; struct fsync_iocb fsync; struct poll_iocb poll; }; struct kioctx *ki_ctx; kiocb_cancel_fn *ki_cancel; struct io_event ki_res; struct list_head ki_list; refcount_t ki_refcnt; struct eventfd_ctx *ki_eventfd; }; struct poll_table_struct; typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); struct poll_table_struct { poll_queue_proc _qproc; __poll_t _key; }; struct aio_poll_table { struct poll_table_struct pt; struct aio_kiocb *iocb; bool queued; int error; }; struct aio_ring { unsigned int id; unsigned int nr; unsigned int head; unsigned int tail; unsigned int magic; unsigned int compat_features; unsigned int incompat_features; unsigned int header_length; struct io_event io_events[0]; }; struct aio_waiter { struct wait_queue_entry w; size_t min_nr; }; struct akcipher_request; struct crypto_akcipher; struct akcipher_alg { int (*sign)(struct akcipher_request *); int (*verify)(struct akcipher_request *); int (*encrypt)(struct akcipher_request *); int (*decrypt)(struct akcipher_request *); int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); unsigned int (*max_size)(struct crypto_akcipher *); int (*init)(struct crypto_akcipher *); void (*exit)(struct crypto_akcipher *); struct crypto_alg base; }; struct akcipher_instance { void (*free)(struct akcipher_instance *); union { struct { char head[72]; struct crypto_instance base; } s; struct akcipher_alg alg; }; }; struct akcipher_request { struct crypto_async_request base; struct scatterlist *src; struct scatterlist *dst; unsigned int src_len; unsigned int dst_len; void *__ctx[0]; }; struct alarm { struct timerqueue_node node; struct hrtimer timer; enum alarmtimer_restart (*function)(struct alarm *, ktime_t); enum alarmtimer_type type; int state; void *data; }; struct timerqueue_head { struct rb_root_cached rb_root; }; struct timespec64; struct alarm_base { spinlock_t lock; struct timerqueue_head timerqueue; ktime_t (*get_ktime)(void); void (*get_timespec)(struct timespec64 *); clockid_t base_clockid; }; struct tlb_client_info; struct rlb_client_info; struct alb_bond_info { struct tlb_client_info *tx_hashtbl; u32 unbalanced_load; atomic_t tx_rebalance_counter; int lp_counter; int rlb_enabled; struct rlb_client_info *rx_hashtbl; u32 rx_hashtbl_used_head; u8 rx_ntt; struct slave *rx_slave; u8 primary_is_promisc; u32 rlb_promisc_timeout_counter; u32 rlb_update_delay_counter; u32 rlb_update_retry_counter; u8 rlb_rebalance; }; struct bonding; struct alb_walk_data { struct bonding *bond; struct slave *slave; const u8 *mac_addr; bool strict_match; }; struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; }; struct proto; struct inet_timewait_death_row; struct sock_common { union { __addrpair skc_addrpair; struct { __be32 skc_daddr; __be32 skc_rcv_saddr; }; }; union { unsigned int skc_hash; __u16 skc_u16hashes[2]; }; union { __portpair skc_portpair; struct { __be16 skc_dport; __u16 skc_num; }; }; short unsigned int skc_family; volatile unsigned char skc_state; unsigned char skc_reuse: 4; unsigned char skc_reuseport: 1; unsigned char skc_ipv6only: 1; unsigned char skc_net_refcnt: 1; int skc_bound_dev_if; union { struct hlist_node skc_bind_node; struct hlist_node skc_portaddr_node; }; struct proto *skc_prot; possible_net_t skc_net; struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; atomic64_t skc_cookie; union { long unsigned int skc_flags; struct sock *skc_listener; struct inet_timewait_death_row *skc_tw_dr; }; int skc_dontcopy_begin[0]; union { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; }; short unsigned int skc_tx_queue_mapping; short unsigned int skc_rx_queue_mapping; union { int skc_incoming_cpu; u32 skc_rcv_wnd; u32 skc_tw_rcv_nxt; }; refcount_t skc_refcnt; int skc_dontcopy_end[0]; union { u32 skc_rxhash; u32 skc_window_clamp; u32 skc_tw_snd_nxt; }; }; struct page_frag { struct page *page; __u32 offset; __u32 size; }; struct sock_cgroup_data { struct cgroup *cgroup; u32 classid; }; struct dst_entry; struct sk_filter; struct socket_wq; struct socket; struct mem_cgroup; struct xfrm_policy; struct pid; struct sock_reuseport; struct bpf_local_storage; struct sock { struct sock_common __sk_common; __u8 __cacheline_group_begin__sock_write_rx[0]; atomic_t sk_drops; __s32 sk_peek_off; struct sk_buff_head sk_error_queue; struct sk_buff_head sk_receive_queue; struct { atomic_t rmem_alloc; int len; struct sk_buff *head; struct sk_buff *tail; } sk_backlog; __u8 __cacheline_group_end__sock_write_rx[0]; __u8 __cacheline_group_begin__sock_read_rx[0]; struct dst_entry *sk_rx_dst; int sk_rx_dst_ifindex; u32 sk_rx_dst_cookie; unsigned int sk_ll_usec; unsigned int sk_napi_id; u16 sk_busy_poll_budget; u8 sk_prefer_busy_poll; u8 sk_userlocks; int sk_rcvbuf; struct sk_filter *sk_filter; union { struct socket_wq *sk_wq; struct socket_wq *sk_wq_raw; }; void (*sk_data_ready)(struct sock *); long int sk_rcvtimeo; int sk_rcvlowat; __u8 __cacheline_group_end__sock_read_rx[0]; __u8 __cacheline_group_begin__sock_read_rxtx[0]; int sk_err; struct socket *sk_socket; struct mem_cgroup *sk_memcg; struct xfrm_policy *sk_policy[2]; __u8 __cacheline_group_end__sock_read_rxtx[0]; __u8 __cacheline_group_begin__sock_write_rxtx[0]; socket_lock_t sk_lock; u32 sk_reserved_mem; int sk_forward_alloc; u32 sk_tsflags; __u8 __cacheline_group_end__sock_write_rxtx[0]; __u8 __cacheline_group_begin__sock_write_tx[0]; int sk_write_pending; atomic_t sk_omem_alloc; int sk_sndbuf; int sk_wmem_queued; refcount_t sk_wmem_alloc; long unsigned int sk_tsq_flags; union { struct sk_buff *sk_send_head; struct rb_root tcp_rtx_queue; }; struct sk_buff_head sk_write_queue; u32 sk_dst_pending_confirm; u32 sk_pacing_status; struct page_frag sk_frag; struct timer_list sk_timer; long unsigned int sk_pacing_rate; atomic_t sk_zckey; atomic_t sk_tskey; __u8 __cacheline_group_end__sock_write_tx[0]; __u8 __cacheline_group_begin__sock_read_tx[0]; long unsigned int sk_max_pacing_rate; long int sk_sndtimeo; u32 sk_priority; u32 sk_mark; struct dst_entry *sk_dst_cache; netdev_features_t sk_route_caps; u16 sk_gso_type; u16 sk_gso_max_segs; unsigned int sk_gso_max_size; gfp_t sk_allocation; u32 sk_txhash; u8 sk_pacing_shift; bool sk_use_task_frag; __u8 __cacheline_group_end__sock_read_tx[0]; u8 sk_gso_disabled: 1; u8 sk_kern_sock: 1; u8 sk_no_check_tx: 1; u8 sk_no_check_rx: 1; u8 sk_shutdown; u16 sk_type; u16 sk_protocol; long unsigned int sk_lingertime; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err_soft; u32 sk_ack_backlog; u32 sk_max_ack_backlog; kuid_t sk_uid; spinlock_t sk_peer_lock; int sk_bind_phc; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; ktime_t sk_stamp; int sk_disconnects; u8 sk_txrehash; u8 sk_clockid; u8 sk_txtime_deadline_mode: 1; u8 sk_txtime_report_errors: 1; u8 sk_txtime_unused: 6; void *sk_user_data; void *sk_security; struct sock_cgroup_data sk_cgrp_data; void (*sk_state_change)(struct sock *); void (*sk_write_space)(struct sock *); void (*sk_error_report)(struct sock *); int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); void (*sk_destruct)(struct sock *); struct sock_reuseport *sk_reuseport_cb; struct bpf_local_storage *sk_bpf_storage; struct callback_head sk_rcu; netns_tracker ns_tracker; struct xarray sk_user_frags; }; struct alg_sock { struct sock sk; struct sock *parent; atomic_t refcnt; atomic_t nokey_refcnt; const struct af_alg_type *type; void *private; }; struct alg_type_list { const struct af_alg_type *type; struct list_head list; }; struct zonelist; struct zoneref; struct alloc_context { struct zonelist *zonelist; nodemask_t *nodemask; struct zoneref *preferred_zoneref; int migratetype; enum zone_type highest_zoneidx; bool spread_dirty_pages; }; struct codetag { unsigned int flags; unsigned int lineno; const char *modname; const char *function; const char *filename; }; struct alloc_tag_counters; struct alloc_tag { struct codetag ct; struct alloc_tag_counters *counters; }; struct alloc_tag_counters { u64 bytes; u64 calls; }; struct alps_bitmap_point { int start_bit; int num_bits; }; struct input_mt_pos { s16 x; s16 y; }; struct alps_fields { unsigned int x_map; unsigned int y_map; unsigned int fingers; int pressure; struct input_mt_pos st; struct input_mt_pos mt[4]; unsigned int first_mp: 1; unsigned int is_mp: 1; unsigned int left: 1; unsigned int right: 1; unsigned int middle: 1; unsigned int ts_left: 1; unsigned int ts_right: 1; unsigned int ts_middle: 1; }; struct psmouse; struct alps_nibble_commands; struct alps_data { struct psmouse *psmouse; struct input_dev *dev2; struct input_dev *dev3; char phys2[32]; char phys3[32]; struct delayed_work dev3_register_work; const struct alps_nibble_commands *nibble_commands; int addr_command; u16 proto_version; u8 byte0; u8 mask0; u8 dev_id[3]; u8 fw_ver[3]; int flags; int x_max; int y_max; int x_bits; int y_bits; unsigned int x_res; unsigned int y_res; int (*hw_init)(struct psmouse *); void (*process_packet)(struct psmouse *); int (*decode_fields)(struct alps_fields *, unsigned char *, struct psmouse *); void (*set_abs_params)(struct alps_data *, struct input_dev *); int prev_fin; int multi_packet; int second_touch; unsigned char multi_data[6]; struct alps_fields f; u8 quirks; struct timer_list timer; }; struct alps_protocol_info { u16 version; u8 byte0; u8 mask0; unsigned int flags; }; struct alps_model_info { u8 signature[3]; struct alps_protocol_info protocol_info; }; struct alps_nibble_commands { int command; unsigned char data; }; struct alt_instr { s32 instr_offset; s32 repl_offset; union { struct { u32 cpuid: 16; u32 flags: 16; }; u32 ft_flags; }; u8 instrlen; u8 replacementlen; } __attribute__((packed)); struct amd_aperf_mperf { u64 aperf; u64 mperf; u64 tsc; }; struct amd_chipset_type { enum amd_chipset_gen gen; u8 rev; }; struct amd_chipset_info { struct pci_dev *nb_dev; struct pci_dev *smbus_dev; int nb_type; struct amd_chipset_type sb_type; int isoc_reqs; int probe_count; bool need_pll_quirk; }; struct amd_cpudata { int cpu; struct freq_qos_request req[2]; u64 cppc_req_cached; u32 highest_perf; u32 nominal_perf; u32 lowest_nonlinear_perf; u32 lowest_perf; u32 prefcore_ranking; u32 min_limit_perf; u32 max_limit_perf; u32 min_limit_freq; u32 max_limit_freq; u32 max_freq; u32 min_freq; u32 nominal_freq; u32 lowest_nonlinear_freq; struct amd_aperf_mperf cur; struct amd_aperf_mperf prev; u64 freq; bool boost_supported; bool hw_prefcore; s16 epp_policy; s16 epp_cached; u32 policy; u64 cppc_cap1_cached; bool suspended; s16 epp_default; bool boost_state; }; struct amd_hostbridge { u32 bus; u32 slot; u32 device; }; struct amd_l3_cache { unsigned int indices; u8 subcaches[4]; }; struct amd_lps0_hid_device_data { const bool check_off_by_one; }; struct event_constraint { union { long unsigned int idxmsk[1]; u64 idxmsk64; }; u64 code; u64 cmask; int weight; int overlap; int flags; unsigned int size; }; struct perf_event; struct amd_nb { int nb_id; int refcnt; struct perf_event *owners[64]; struct event_constraint event_constraints[64]; }; struct amd_nb_bus_dev_range { u8 bus; u8 dev_base; u8 dev_limit; }; struct threshold_bank; struct amd_northbridge { struct pci_dev *root; struct pci_dev *misc; struct pci_dev *link; struct amd_l3_cache l3_cache; struct threshold_bank *bank4; }; struct amd_northbridge_info { u16 num; u64 flags; struct amd_northbridge *nb; }; union amd_uncore_info; struct amd_uncore_pmu; struct amd_uncore { union amd_uncore_info *info; struct amd_uncore_pmu *pmus; unsigned int num_pmus; bool init_done; void (*scan)(struct amd_uncore *, unsigned int); int (*init)(struct amd_uncore *, unsigned int); void (*move)(struct amd_uncore *, unsigned int); void (*free)(struct amd_uncore *, unsigned int); }; struct amd_uncore_ctx { int refcnt; int cpu; struct perf_event **events; struct hlist_node node; }; union amd_uncore_info { struct { u64 aux_data: 32; u64 num_pmcs: 8; u64 gid: 8; u64 cid: 8; } split; u64 full; }; typedef struct cpumask cpumask_t; struct perf_cpu_pmu_context; struct perf_event_pmu_context; struct kmem_cache; struct perf_output_handle; struct pmu { struct list_head entry; struct module *module; struct device *dev; struct device *parent; const struct attribute_group **attr_groups; const struct attribute_group **attr_update; const char *name; int type; int capabilities; unsigned int scope; int *pmu_disable_count; struct perf_cpu_pmu_context *cpu_pmu_context; atomic_t exclusive_cnt; int task_ctx_nr; int hrtimer_interval_ms; unsigned int nr_addr_filters; void (*pmu_enable)(struct pmu *); void (*pmu_disable)(struct pmu *); int (*event_init)(struct perf_event *); void (*event_mapped)(struct perf_event *, struct mm_struct *); void (*event_unmapped)(struct perf_event *, struct mm_struct *); int (*add)(struct perf_event *, int); void (*del)(struct perf_event *, int); void (*start)(struct perf_event *, int); void (*stop)(struct perf_event *, int); void (*read)(struct perf_event *); void (*start_txn)(struct pmu *, unsigned int); int (*commit_txn)(struct pmu *); void (*cancel_txn)(struct pmu *); int (*event_idx)(struct perf_event *); void (*sched_task)(struct perf_event_pmu_context *, bool); struct kmem_cache *task_ctx_cache; void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); void * (*setup_aux)(struct perf_event *, void **, int, bool); void (*free_aux)(void *); long int (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, long unsigned int); int (*addr_filters_validate)(struct list_head *); void (*addr_filters_sync)(struct perf_event *); int (*aux_output_match)(struct perf_event *); bool (*filter)(struct pmu *, int); int (*check_period)(struct perf_event *, u64); }; struct amd_uncore_pmu { char name[16]; int num_counters; int rdpmc_base; u32 msr_base; int group; cpumask_t active_mask; struct pmu pmu; struct amd_uncore_ctx **ctx; }; struct aml_resource_small_header { u8 descriptor_type; }; struct aml_resource_large_header { u8 descriptor_type; u16 resource_length; } __attribute__((packed)); struct aml_resource_irq { u8 descriptor_type; u16 irq_mask; u8 flags; } __attribute__((packed)); struct aml_resource_dma { u8 descriptor_type; u8 dma_channel_mask; u8 flags; }; struct aml_resource_start_dependent { u8 descriptor_type; u8 flags; }; struct aml_resource_end_dependent { u8 descriptor_type; }; struct aml_resource_io { u8 descriptor_type; u8 flags; u16 minimum; u16 maximum; u8 alignment; u8 address_length; }; struct aml_resource_fixed_io { u8 descriptor_type; u16 address; u8 address_length; } __attribute__((packed)); struct aml_resource_fixed_dma { u8 descriptor_type; u16 request_lines; u16 channels; u8 width; } __attribute__((packed)); struct aml_resource_vendor_small { u8 descriptor_type; }; struct aml_resource_end_tag { u8 descriptor_type; u8 checksum; }; struct aml_resource_memory24 { u8 descriptor_type; u16 resource_length; u8 flags; u16 minimum; u16 maximum; u16 alignment; u16 address_length; } __attribute__((packed)); struct aml_resource_generic_register { u8 descriptor_type; u16 resource_length; u8 address_space_id; u8 bit_width; u8 bit_offset; u8 access_size; u64 address; } __attribute__((packed)); struct aml_resource_vendor_large { u8 descriptor_type; u16 resource_length; } __attribute__((packed)); struct aml_resource_memory32 { u8 descriptor_type; u16 resource_length; u8 flags; u32 minimum; u32 maximum; u32 alignment; u32 address_length; } __attribute__((packed)); struct aml_resource_fixed_memory32 { u8 descriptor_type; u16 resource_length; u8 flags; u32 address; u32 address_length; } __attribute__((packed)); struct aml_resource_address16 { u8 descriptor_type; u16 resource_length; u8 resource_type; u8 flags; u8 specific_flags; u16 granularity; u16 minimum; u16 maximum; u16 translation_offset; u16 address_length; } __attribute__((packed)); struct aml_resource_address32 { u8 descriptor_type; u16 resource_length; u8 resource_type; u8 flags; u8 specific_flags; u32 granularity; u32 minimum; u32 maximum; u32 translation_offset; u32 address_length; } __attribute__((packed)); struct aml_resource_address64 { u8 descriptor_type; u16 resource_length; u8 resource_type; u8 flags; u8 specific_flags; u64 granularity; u64 minimum; u64 maximum; u64 translation_offset; u64 address_length; } __attribute__((packed)); struct aml_resource_extended_address64 { u8 descriptor_type; u16 resource_length; u8 resource_type; u8 flags; u8 specific_flags; u8 revision_ID; u8 reserved; u64 granularity; u64 minimum; u64 maximum; u64 translation_offset; u64 address_length; u64 type_specific; } __attribute__((packed)); struct aml_resource_extended_irq { u8 descriptor_type; u16 resource_length; u8 flags; u8 interrupt_count; union { u32 interrupt; struct { struct {} __Empty_interrupts; u32 interrupts[0]; }; }; } __attribute__((packed)); struct aml_resource_gpio { u8 descriptor_type; u16 resource_length; u8 revision_id; u8 connection_type; u16 flags; u16 int_flags; u8 pin_config; u16 drive_strength; u16 debounce_timeout; u16 pin_table_offset; u8 res_source_index; u16 res_source_offset; u16 vendor_offset; u16 vendor_length; } __attribute__((packed)); struct aml_resource_i2c_serialbus { u8 descriptor_type; u16 resource_length; u8 revision_id; u8 res_source_index; u8 type; u8 flags; u16 type_specific_flags; u8 type_revision_id; u16 type_data_length; u32 connection_speed; u16 slave_address; } __attribute__((packed)); struct aml_resource_spi_serialbus { u8 descriptor_type; u16 resource_length; u8 revision_id; u8 res_source_index; u8 type; u8 flags; u16 type_specific_flags; u8 type_revision_id; u16 type_data_length; u32 connection_speed; u8 data_bit_length; u8 clock_phase; u8 clock_polarity; u16 device_selection; } __attribute__((packed)); struct aml_resource_uart_serialbus { u8 descriptor_type; u16 resource_length; u8 revision_id; u8 res_source_index; u8 type; u8 flags; u16 type_specific_flags; u8 type_revision_id; u16 type_data_length; u32 default_baud_rate; u16 rx_fifo_size; u16 tx_fifo_size; u8 parity; u8 lines_enabled; } __attribute__((packed)); struct aml_resource_csi2_serialbus { u8 descriptor_type; u16 resource_length; u8 revision_id; u8 res_source_index; u8 type; u8 flags; u16 type_specific_flags; u8 type_revision_id; u16 type_data_length; } __attribute__((packed)); struct aml_resource_common_serialbus { u8 descriptor_type; u16 resource_length; u8 revision_id; u8 res_source_index; u8 type; u8 flags; u16 type_specific_flags; u8 type_revision_id; u16 type_data_length; } __attribute__((packed)); struct aml_resource_pin_function { u8 descriptor_type; u16 resource_length; u8 revision_id; u16 flags; u8 pin_config; u16 function_number; u16 pin_table_offset; u8 res_source_index; u16 res_source_offset; u16 vendor_offset; u16 vendor_length; } __attribute__((packed)); struct aml_resource_pin_config { u8 descriptor_type; u16 resource_length; u8 revision_id; u16 flags; u8 pin_config_type; u32 pin_config_value; u16 pin_table_offset; u8 res_source_index; u16 res_source_offset; u16 vendor_offset; u16 vendor_length; } __attribute__((packed)); struct aml_resource_pin_group { u8 descriptor_type; u16 resource_length; u8 revision_id; u16 flags; u16 pin_table_offset; u16 label_offset; u16 vendor_offset; u16 vendor_length; } __attribute__((packed)); struct aml_resource_pin_group_function { u8 descriptor_type; u16 resource_length; u8 revision_id; u16 flags; u16 function_number; u8 res_source_index; u16 res_source_offset; u16 res_source_label_offset; u16 vendor_offset; u16 vendor_length; } __attribute__((packed)); struct aml_resource_pin_group_config { u8 descriptor_type; u16 resource_length; u8 revision_id; u16 flags; u8 pin_config_type; u32 pin_config_value; u8 res_source_index; u16 res_source_offset; u16 res_source_label_offset; u16 vendor_offset; u16 vendor_length; } __attribute__((packed)); struct aml_resource_clock_input { u8 descriptor_type; u16 resource_length; u8 revision_id; u16 flags; u16 frequency_divisor; u32 frequency_numerator; } __attribute__((packed)); struct aml_resource_address { u8 descriptor_type; u16 resource_length; u8 resource_type; u8 flags; u8 specific_flags; } __attribute__((packed)); union aml_resource { u8 descriptor_type; struct aml_resource_small_header small_header; struct aml_resource_large_header large_header; struct aml_resource_irq irq; struct aml_resource_dma dma; struct aml_resource_start_dependent start_dpf; struct aml_resource_end_dependent end_dpf; struct aml_resource_io io; struct aml_resource_fixed_io fixed_io; struct aml_resource_fixed_dma fixed_dma; struct aml_resource_vendor_small vendor_small; struct aml_resource_end_tag end_tag; struct aml_resource_memory24 memory24; struct aml_resource_generic_register generic_reg; struct aml_resource_vendor_large vendor_large; struct aml_resource_memory32 memory32; struct aml_resource_fixed_memory32 fixed_memory32; struct aml_resource_address16 address16; struct aml_resource_address32 address32; struct aml_resource_address64 address64; struct aml_resource_extended_address64 ext_address64; struct aml_resource_extended_irq extended_irq; struct aml_resource_gpio gpio; struct aml_resource_i2c_serialbus i2c_serial_bus; struct aml_resource_spi_serialbus spi_serial_bus; struct aml_resource_uart_serialbus uart_serial_bus; struct aml_resource_csi2_serialbus csi2_serial_bus; struct aml_resource_common_serialbus common_serial_bus; struct aml_resource_pin_function pin_function; struct aml_resource_pin_config pin_config; struct aml_resource_pin_group pin_group; struct aml_resource_pin_group_function pin_group_function; struct aml_resource_pin_group_config pin_group_config; struct aml_resource_clock_input clock_input; struct aml_resource_address address; u32 dword_item; u16 word_item; u8 byte_item; }; struct kobj_uevent_env; struct kobj_ns_type_operations; struct class { const char *name; const struct attribute_group **class_groups; const struct attribute_group **dev_groups; int (*dev_uevent)(const struct device *, struct kobj_uevent_env *); char * (*devnode)(const struct device *, umode_t *); void (*class_release)(const struct class *); void (*dev_release)(struct device *); int (*shutdown_pre)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(const struct device *); void (*get_ownership)(const struct device *, kuid_t *, kgid_t *); const struct dev_pm_ops *pm; }; struct transport_container; struct transport_class { struct class class; int (*setup)(struct transport_container *, struct device *, struct device *); int (*configure)(struct transport_container *, struct device *, struct device *); int (*remove)(struct transport_container *, struct device *, struct device *); }; struct klist_node; struct klist { spinlock_t k_lock; struct list_head k_list; void (*get)(struct klist_node *); void (*put)(struct klist_node *); }; struct attribute_container { struct list_head node; struct klist containers; struct class *class; const struct attribute_group *grp; struct device_attribute **attrs; int (*match)(struct attribute_container *, struct device *); long unsigned int flags; }; struct anon_transport_class { struct transport_class tclass; struct attribute_container container; }; struct anon_vma { struct anon_vma *root; struct rw_semaphore rwsem; atomic_t refcount; long unsigned int num_children; long unsigned int num_active_vmas; struct anon_vma *parent; struct rb_root_cached rb_root; }; struct anon_vma_chain { struct vm_area_struct *vma; struct anon_vma *anon_vma; struct list_head same_vma; struct rb_node rb; long unsigned int rb_subtree_last; }; struct anon_vma_name { struct kref kref; char name[0]; }; struct apd_private_data; struct apd_device_desc { unsigned int fixed_clk_rate; struct property_entry *properties; int (*setup)(struct apd_private_data *); }; struct clk; struct apd_private_data { struct clk *clk; struct acpi_device *adev; const struct apd_device_desc *dev_desc; }; struct aper_size_info_16 { int size; int num_entries; int page_order; u16 size_value; }; struct aper_size_info_32 { int size; int num_entries; int page_order; u32 size_value; }; struct aper_size_info_8 { int size; int num_entries; int page_order; u8 size_value; }; struct aper_size_info_fixed { int size; int num_entries; int page_order; }; struct aper_size_info_lvl2 { int size; int num_entries; u32 size_value; }; struct aperfmperf { seqcount_t seq; long unsigned int last_update; u64 acnt; u64 mcnt; u64 aperf; u64 mperf; }; struct aperture_range { struct device *dev; resource_size_t base; resource_size_t size; struct list_head lh; void (*detach)(struct device *); }; struct apic { void (*eoi)(void); void (*native_eoi)(void); void (*write)(u32, u32); u32 (*read)(u32); void (*wait_icr_idle)(void); u32 (*safe_wait_icr_idle)(void); void (*send_IPI)(int, int); void (*send_IPI_mask)(const struct cpumask *, int); void (*send_IPI_mask_allbutself)(const struct cpumask *, int); void (*send_IPI_allbutself)(int); void (*send_IPI_all)(int); void (*send_IPI_self)(int); u32 disable_esr: 1; u32 dest_mode_logical: 1; u32 x2apic_set_max_apicid: 1; u32 nmi_to_offline_cpu: 1; u32 (*calc_dest_apicid)(unsigned int); u64 (*icr_read)(void); void (*icr_write)(u32, u32); u32 max_apic_id; int (*probe)(void); int (*acpi_madt_oem_check)(char *, char *); void (*init_apic_ldr)(void); u32 (*cpu_present_to_apicid)(int); u32 (*get_apic_id)(u32); int (*wakeup_secondary_cpu)(u32, long unsigned int); int (*wakeup_secondary_cpu_64)(u32, long unsigned int); char *name; }; struct irq_cfg { unsigned int dest_apicid; unsigned int vector; }; struct apic_chip_data { struct irq_cfg hw_irq_cfg; unsigned int vector; unsigned int prev_vector; unsigned int cpu; unsigned int prev_cpu; unsigned int irq; struct hlist_node clist; unsigned int move_in_progress: 1; unsigned int is_managed: 1; unsigned int can_reserve: 1; unsigned int has_reserved: 1; }; union apic_ir { long unsigned int map[4]; u32 regs[8]; }; struct apic_override { void (*eoi)(void); void (*native_eoi)(void); void (*write)(u32, u32); u32 (*read)(u32); void (*send_IPI)(int, int); void (*send_IPI_mask)(const struct cpumask *, int); void (*send_IPI_mask_allbutself)(const struct cpumask *, int); void (*send_IPI_allbutself)(int); void (*send_IPI_all)(int); void (*send_IPI_self)(int); u64 (*icr_read)(void); void (*icr_write)(u32, u32); int (*wakeup_secondary_cpu)(u32, long unsigned int); int (*wakeup_secondary_cpu_64)(u32, long unsigned int); }; struct apm_bios_info { __u16 version; __u16 cseg; __u32 offset; __u16 cseg_16; __u16 dseg; __u16 flags; __u16 cseg_len; __u16 cseg_16_len; __u16 dseg_len; }; struct workqueue_attrs; struct pool_workqueue; struct apply_wqattrs_ctx { struct workqueue_struct *wq; struct workqueue_attrs *attrs; struct list_head list; struct pool_workqueue *dfl_pwq; struct pool_workqueue *pwq_tbl[0]; }; struct arch_elf_state {}; struct arch_hw_breakpoint { long unsigned int address; long unsigned int mask; u8 len; u8 type; }; struct arch_hybrid_cpu_scale { long unsigned int capacity; long unsigned int freq_ratio; }; struct arch_io_reserve_memtype_wc_devres { resource_size_t start; resource_size_t size; }; struct lbr_entry { u64 from; u64 to; u64 info; }; struct arch_lbr_state { u64 lbr_ctl; u64 lbr_depth; u64 ler_from; u64 ler_to; u64 ler_info; struct lbr_entry entries[0]; }; struct arch_optimized_insn { kprobe_opcode_t copied_insn[4]; kprobe_opcode_t *insn; size_t size; }; struct kprobe; struct pt_regs; struct arch_specific_insn { kprobe_opcode_t *insn; unsigned int boostable: 1; unsigned char size; union { unsigned char opcode; struct { unsigned char type; } jcc; struct { unsigned char type; unsigned char asize; } loop; struct { unsigned char reg; } indirect; }; s32 rel32; void (*emulate_op)(struct kprobe *, struct pt_regs *); int tp_len; }; struct arch_tlbflush_unmap_batch { struct cpumask cpumask; }; struct uprobe_xol_ops; struct arch_uprobe { union { u8 insn[16]; u8 ixol[16]; }; const struct uprobe_xol_ops *ops; union { struct { s32 offs; u8 ilen; u8 opc1; } branch; struct { u8 fixups; u8 ilen; } defparam; struct { u8 reg_offset; u8 ilen; } push; }; }; struct arch_uprobe_task { long unsigned int saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; }; struct arch_vdso_data {}; struct arg_dev_net_ip { struct net *net; struct in6_addr *addr; }; struct arg_netdev_event { const struct net_device *dev; union { unsigned char nh_flags; long unsigned int event; }; }; struct arp_pkt { __be16 hw_addr_space; __be16 prot_addr_space; u8 hw_addr_len; u8 prot_addr_len; __be16 op_code; u8 mac_src[6]; __be32 ip_src; u8 mac_dst[6]; __be32 ip_dst; } __attribute__((packed)); struct arphdr { __be16 ar_hrd; __be16 ar_pro; unsigned char ar_hln; unsigned char ar_pln; __be16 ar_op; }; struct sockaddr { sa_family_t sa_family; union { char sa_data_min[14]; struct { struct {} __empty_sa_data; char sa_data[0]; }; }; }; struct arpreq { struct sockaddr arp_pa; struct sockaddr arp_ha; int arp_flags; struct sockaddr arp_netmask; char arp_dev[16]; }; struct trace_array; struct trace_buffer; struct trace_array_cpu; struct array_buffer { struct trace_array *tr; struct trace_buffer *buffer; struct trace_array_cpu *data; u64 time_start; int cpu; }; typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); struct asn1_decoder { const unsigned char *machine; size_t machlen; const asn1_action_t *actions; }; struct assoc_array_ptr; struct assoc_array { struct assoc_array_ptr *root; long unsigned int nr_leaves_on_tree; }; struct assoc_array_node; struct assoc_array_delete_collapse_context { struct assoc_array_node *node; const void *skip_leaf; int slot; }; struct assoc_array_ops; struct assoc_array_edit { struct callback_head rcu; struct assoc_array *array; const struct assoc_array_ops *ops; const struct assoc_array_ops *ops_for_excised_subtree; struct assoc_array_ptr *leaf; struct assoc_array_ptr **leaf_p; struct assoc_array_ptr *dead_leaf; struct assoc_array_ptr *new_meta[3]; struct assoc_array_ptr *excised_meta[1]; struct assoc_array_ptr *excised_subtree; struct assoc_array_ptr **set_backpointers[16]; struct assoc_array_ptr *set_backpointers_to; struct assoc_array_node *adjust_count_on; long int adjust_count_by; struct { struct assoc_array_ptr **ptr; struct assoc_array_ptr *to; } set[2]; struct { u8 *p; u8 to; } set_parent_slot[1]; u8 segment_cache[17]; }; struct assoc_array_node { struct assoc_array_ptr *back_pointer; u8 parent_slot; struct assoc_array_ptr *slots[16]; long unsigned int nr_leaves_on_branch; }; struct assoc_array_ops { long unsigned int (*get_key_chunk)(const void *, int); long unsigned int (*get_object_key_chunk)(const void *, int); bool (*compare_object)(const void *, const void *); int (*diff_objects)(const void *, const void *); void (*free_object)(void *); }; struct assoc_array_shortcut { struct assoc_array_ptr *back_pointer; int parent_slot; int skip_to_level; struct assoc_array_ptr *next_node; long unsigned int index_key[0]; }; struct assoc_array_walk_result { struct { struct assoc_array_node *node; int level; int slot; } terminal_node; struct { struct assoc_array_shortcut *shortcut; int level; int sc_level; long unsigned int sc_segments; long unsigned int dissimilarity; } wrong_shortcut; }; struct asym_cap_data { struct list_head link; struct callback_head rcu; long unsigned int capacity; long unsigned int cpus[0]; }; struct asymmetric_key_id { short unsigned int len; unsigned char data[0]; }; struct asymmetric_key_ids { void *id[3]; }; struct key_preparsed_payload; struct asymmetric_key_parser { struct list_head link; struct module *owner; const char *name; int (*parse)(struct key_preparsed_payload *); }; struct key; struct seq_file; struct kernel_pkey_params; struct kernel_pkey_query; struct public_key_signature; struct asymmetric_key_subtype { struct module *owner; const char *name; short unsigned int name_len; void (*describe)(const struct key *, struct seq_file *); void (*destroy)(void *, void *); int (*query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); int (*eds_op)(struct kernel_pkey_params *, const void *, void *); int (*verify_signature)(const struct key *, const struct public_key_signature *); }; struct async_domain { struct list_head pending; unsigned int registered: 1; }; typedef void (*async_func_t)(void *, async_cookie_t); struct async_entry { struct list_head domain_list; struct list_head global_list; struct work_struct work; async_cookie_t cookie; async_func_t func; void *data; struct async_domain *domain; }; struct io_poll { struct file *file; struct wait_queue_head *head; __poll_t events; int retries; struct wait_queue_entry wait; }; struct async_poll { struct io_poll poll; struct io_poll *double_poll; }; struct ps2dev; typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8, unsigned int); typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8); struct serio; struct ps2dev { struct serio *serio; struct mutex cmd_mutex; wait_queue_head_t wait; long unsigned int flags; u8 cmdbuf[8]; u8 cmdcnt; u8 nak; ps2_pre_receive_handler_t pre_receive_handler; ps2_receive_handler_t receive_handler; }; struct vivaldi_data { u32 function_row_physmap[24]; unsigned int num_function_row_keys; }; struct atkbd { struct ps2dev ps2dev; struct input_dev *dev; char name[64]; char phys[32]; short unsigned int id; short unsigned int keycode[512]; long unsigned int force_release_mask[8]; unsigned char set; bool translated; bool extra; bool write; bool softrepeat; bool softraw; bool scroll; bool enabled; unsigned char emul; bool resend; bool release; long unsigned int xl_bit; unsigned int last; long unsigned int time; long unsigned int err_count; struct delayed_work event_work; long unsigned int event_jiffies; long unsigned int event_mask; struct mutex mutex; struct vivaldi_data vdata; }; struct atomic_notifier_head { spinlock_t lock; struct notifier_block *head; }; struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; }; struct audit_aux_data { struct audit_aux_data *next; int type; }; struct audit_cap_data { kernel_cap_t permitted; kernel_cap_t inheritable; union { unsigned int fE; kernel_cap_t effective; }; kernel_cap_t ambient; kuid_t rootid; }; struct audit_aux_data_bprm_fcaps { struct audit_aux_data d; struct audit_cap_data fcap; unsigned int fcap_ver; struct audit_cap_data old_pcap; struct audit_cap_data new_pcap; }; struct audit_aux_data_pids { struct audit_aux_data d; pid_t target_pid[16]; kuid_t target_auid[16]; kuid_t target_uid[16]; unsigned int target_sessionid[16]; u32 target_sid[16]; char target_comm[256]; int pid_count; }; struct audit_context; struct audit_buffer { struct sk_buff *skb; struct audit_context *ctx; gfp_t gfp_mask; }; struct audit_tree; struct audit_node { struct list_head list; struct audit_tree *owner; unsigned int index; }; struct fsnotify_mark; struct audit_chunk { struct list_head hash; long unsigned int key; struct fsnotify_mark *mark; struct list_head trees; int count; atomic_long_t refs; struct callback_head head; struct audit_node owners[0]; }; struct timespec64 { time64_t tv_sec; long int tv_nsec; }; struct filename; struct audit_names { struct list_head list; struct filename *name; int name_len; bool hidden; long unsigned int ino; dev_t dev; umode_t mode; kuid_t uid; kgid_t gid; dev_t rdev; u32 osid; struct audit_cap_data fcap; unsigned int fcap_ver; unsigned char type; bool should_free; }; struct vfsmount; struct path { struct vfsmount *mnt; struct dentry *dentry; }; struct mq_attr { __kernel_long_t mq_flags; __kernel_long_t mq_maxmsg; __kernel_long_t mq_msgsize; __kernel_long_t mq_curmsgs; __kernel_long_t __reserved[4]; }; struct open_how { __u64 flags; __u64 mode; __u64 resolve; }; struct audit_ntp_val { long long int oldval; long long int newval; }; struct audit_ntp_data { struct audit_ntp_val vals[6]; }; struct audit_proctitle { int len; char *value; }; struct audit_tree_refs; struct audit_context { int dummy; enum { AUDIT_CTX_UNUSED = 0, AUDIT_CTX_SYSCALL = 1, AUDIT_CTX_URING = 2, } context; enum audit_state state; enum audit_state current_state; unsigned int serial; int major; int uring_op; struct timespec64 ctime; long unsigned int argv[4]; long int return_code; u64 prio; int return_valid; struct audit_names preallocated_names[5]; int name_count; struct list_head names_list; char *filterkey; struct path pwd; struct audit_aux_data *aux; struct audit_aux_data *aux_pids; struct __kernel_sockaddr_storage *sockaddr; size_t sockaddr_len; pid_t ppid; kuid_t uid; kuid_t euid; kuid_t suid; kuid_t fsuid; kgid_t gid; kgid_t egid; kgid_t sgid; kgid_t fsgid; long unsigned int personality; int arch; pid_t target_pid; kuid_t target_auid; kuid_t target_uid; unsigned int target_sessionid; u32 target_sid; char target_comm[16]; struct audit_tree_refs *trees; struct audit_tree_refs *first_trees; struct list_head killed_trees; int tree_count; int type; union { struct { int nargs; long int args[6]; } socketcall; struct { kuid_t uid; kgid_t gid; umode_t mode; u32 osid; int has_perm; uid_t perm_uid; gid_t perm_gid; umode_t perm_mode; long unsigned int qbytes; } ipc; struct { mqd_t mqdes; struct mq_attr mqstat; } mq_getsetattr; struct { mqd_t mqdes; int sigev_signo; } mq_notify; struct { mqd_t mqdes; size_t msg_len; unsigned int msg_prio; struct timespec64 abs_timeout; } mq_sendrecv; struct { int oflag; umode_t mode; struct mq_attr attr; } mq_open; struct { pid_t pid; struct audit_cap_data cap; } capset; struct { int fd; int flags; } mmap; struct open_how openat2; struct { int argc; } execve; struct { char *name; } module; struct { struct audit_ntp_data ntp_data; struct timespec64 tk_injoffset; } time; }; int fds[2]; struct audit_proctitle proctitle; }; struct audit_ctl_mutex { struct mutex lock; void *owner; }; struct audit_field; struct audit_watch; struct audit_fsnotify_mark; struct audit_krule { u32 pflags; u32 flags; u32 listnr; u32 action; u32 mask[64]; u32 buflen; u32 field_count; char *filterkey; struct audit_field *fields; struct audit_field *arch_f; struct audit_field *inode_f; struct audit_watch *watch; struct audit_tree *tree; struct audit_fsnotify_mark *exe; struct list_head rlist; struct list_head list; u64 prio; }; struct audit_entry { struct list_head list; struct callback_head rcu; struct audit_krule rule; }; struct audit_features { __u32 vers; __u32 mask; __u32 features; __u32 lock; }; struct audit_field { u32 type; union { u32 val; kuid_t uid; kgid_t gid; struct { char *lsm_str; void *lsm_rule; }; }; u32 op; }; struct fsnotify_group; struct fsnotify_mark_connector; struct fsnotify_mark { __u32 mask; refcount_t refcnt; struct fsnotify_group *group; struct list_head g_list; spinlock_t lock; struct hlist_node obj_list; struct fsnotify_mark_connector *connector; __u32 ignore_mask; unsigned int flags; }; struct audit_fsnotify_mark { dev_t dev; long unsigned int ino; char *path; struct fsnotify_mark mark; struct audit_krule *rule; }; struct audit_net { struct sock *sk; }; struct audit_netlink_list { __u32 portid; struct net *net; struct sk_buff_head q; }; struct audit_nfcfgop_tab { enum audit_nfcfgop op; const char *s; }; struct audit_parent { struct list_head watches; struct fsnotify_mark mark; }; struct audit_reply { __u32 portid; struct net *net; struct sk_buff *skb; }; struct audit_rule_data { __u32 flags; __u32 action; __u32 field_count; __u32 mask[64]; __u32 fields[64]; __u32 values[64]; __u32 fieldflags[64]; __u32 buflen; char buf[0]; }; struct audit_sig_info { uid_t uid; pid_t pid; char ctx[0]; }; struct audit_status { __u32 mask; __u32 enabled; __u32 failure; __u32 pid; __u32 rate_limit; __u32 backlog_limit; __u32 lost; __u32 backlog; union { __u32 version; __u32 feature_bitmap; }; __u32 backlog_wait_time; __u32 backlog_wait_time_actual; }; struct audit_tree { refcount_t count; int goner; struct audit_chunk *root; struct list_head chunks; struct list_head rules; struct list_head list; struct list_head same_root; struct callback_head head; char pathname[0]; }; struct audit_tree_mark { struct fsnotify_mark mark; struct audit_chunk *chunk; }; struct audit_tree_refs { struct audit_tree_refs *next; struct audit_chunk *c[31]; }; struct audit_tty_status { __u32 enabled; __u32 log_passwd; }; struct audit_watch { refcount_t count; dev_t dev; char *path; long unsigned int ino; struct audit_parent *parent; struct list_head wlist; struct list_head rules; }; struct auditd_connection { struct pid *pid; u32 portid; struct net *net; struct callback_head rcu; }; struct crypto_spawn { struct list_head list; struct crypto_alg *alg; union { struct crypto_instance *inst; struct crypto_spawn *next; }; const struct crypto_type *frontend; u32 mask; bool dead; bool registered; }; struct crypto_ahash_spawn { struct crypto_spawn base; }; struct crypto_skcipher_spawn { struct crypto_spawn base; }; struct authenc_esn_instance_ctx { struct crypto_ahash_spawn auth; struct crypto_skcipher_spawn enc; }; struct authenc_esn_request_ctx { struct scatterlist src[2]; struct scatterlist dst[2]; char tail[0]; }; struct authenc_instance_ctx { struct crypto_ahash_spawn auth; struct crypto_skcipher_spawn enc; unsigned int reqoff; }; struct authenc_request_ctx { struct scatterlist src[2]; struct scatterlist dst[2]; char tail[0]; }; struct auto_mode_param { int qp_type; }; struct av_decision { u32 allowed; u32 auditallow; u32 auditdeny; u32 seqno; u32 flags; }; struct hlist_head { struct hlist_node *first; }; struct avc_cache { struct hlist_head slots[512]; spinlock_t slots_lock[512]; atomic_t lru_hint; atomic_t active_nodes; u32 latest_notif; }; struct avc_cache_stats { unsigned int lookups; unsigned int misses; unsigned int allocations; unsigned int reclaims; unsigned int frees; }; struct avc_callback_node { int (*callback)(u32); u32 events; struct avc_callback_node *next; }; struct avc_xperms_node; struct avc_entry { u32 ssid; u32 tsid; u16 tclass; struct av_decision avd; struct avc_xperms_node *xp_node; }; struct avc_node { struct avc_entry ae; struct hlist_node list; struct callback_head rhead; }; struct extended_perms_data; struct extended_perms_decision { u8 used; u8 driver; struct extended_perms_data *allowed; struct extended_perms_data *auditallow; struct extended_perms_data *dontaudit; }; struct avc_xperms_decision_node { struct extended_perms_decision xpd; struct list_head xpd_list; }; struct extended_perms_data { u32 p[8]; }; struct extended_perms { u16 len; struct extended_perms_data drivers; }; struct avc_xperms_node { struct extended_perms xp; struct list_head xpd_head; }; struct avtab_node; struct avtab { struct avtab_node **htable; u32 nel; u32 nslot; u32 mask; }; struct avtab_extended_perms; struct avtab_datum { union { u32 data; struct avtab_extended_perms *xperms; } u; }; struct avtab_extended_perms { u8 specified; u8 driver; struct extended_perms_data perms; }; struct avtab_key { u16 source_type; u16 target_type; u16 target_class; u16 specified; }; struct avtab_node { struct avtab_key key; struct avtab_datum datum; struct avtab_node *next; }; struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; }; struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; }; struct percpu_ref_data; struct percpu_ref { long unsigned int percpu_count_ptr; struct percpu_ref_data *data; }; struct backing_dev_info; struct cgroup_subsys_state; struct bdi_writeback { struct backing_dev_info *bdi; long unsigned int state; long unsigned int last_old_flush; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; struct list_head b_dirty_time; spinlock_t list_lock; atomic_t writeback_inodes; struct percpu_counter stat[4]; long unsigned int bw_time_stamp; long unsigned int dirtied_stamp; long unsigned int written_stamp; long unsigned int write_bandwidth; long unsigned int avg_write_bandwidth; long unsigned int dirty_ratelimit; long unsigned int balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; enum wb_reason start_all_reason; spinlock_t work_lock; struct list_head work_list; struct delayed_work dwork; struct delayed_work bw_dwork; struct list_head bdi_node; struct percpu_ref refcnt; struct fprop_local_percpu memcg_completions; struct cgroup_subsys_state *memcg_css; struct cgroup_subsys_state *blkcg_css; struct list_head memcg_node; struct list_head blkcg_node; struct list_head b_attached; struct list_head offline_node; union { struct work_struct release_work; struct callback_head rcu; }; }; struct backing_dev_info { u64 id; struct rb_node rb_node; struct list_head bdi_list; long unsigned int ra_pages; long unsigned int io_pages; struct kref refcnt; unsigned int capabilities; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; atomic_long_t tot_write_bandwidth; long unsigned int last_bdp_sleep; struct bdi_writeback wb; struct list_head wb_list; struct xarray cgwb_tree; struct mutex cgwb_release_mutex; struct rw_semaphore wb_switch_rwsem; wait_queue_head_t wb_waitq; struct device *dev; char dev_name[64]; struct device *owner; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; }; struct file_ra_state { long unsigned int start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; }; struct file_operations; struct fown_struct; struct file { atomic_long_t f_count; spinlock_t f_lock; fmode_t f_mode; const struct file_operations *f_op; struct address_space *f_mapping; void *private_data; struct inode *f_inode; unsigned int f_flags; unsigned int f_iocb_flags; const struct cred *f_cred; struct path f_path; union { struct mutex f_pos_lock; u64 f_pipe; }; loff_t f_pos; void *f_security; struct fown_struct *f_owner; errseq_t f_wb_err; errseq_t f_sb_err; struct hlist_head *f_ep; union { struct callback_head f_task_work; struct llist_node f_llist; struct file_ra_state f_ra; freeptr_t f_freeptr; }; }; struct backing_file { struct file file; struct path user_path; }; struct backlight_properties { int brightness; int max_brightness; int power; enum backlight_type type; unsigned int state; enum backlight_scale scale; }; struct backlight_ops; struct backlight_device { struct backlight_properties props; struct mutex update_lock; struct mutex ops_lock; const struct backlight_ops *ops; struct notifier_block fb_notif; struct list_head entry; struct device dev; bool fb_bl_on[32]; int use_count; }; struct backlight_ops { unsigned int options; int (*update_status)(struct backlight_device *); int (*get_brightness)(struct backlight_device *); bool (*controls_device)(struct backlight_device *, struct device *); }; struct bpf_verifier_env; struct backtrack_state { struct bpf_verifier_env *env; u32 frame; u32 reg_masks[8]; u64 stack_masks[8]; }; struct badblocks { struct device *dev; int count; int unacked_exist; int shift; u64 *page; int changed; seqlock_t lock; sector_t sector; sector_t size; }; struct badblocks_context { sector_t start; sector_t len; int ack; }; struct balance_callback { struct balance_callback *next; void (*func)(struct rq *); }; struct batadv_unicast_packet { __u8 packet_type; __u8 version; __u8 ttl; __u8 ttvn; __u8 dest[6]; }; struct batch_u16 { u16 entropy[48]; local_lock_t lock; long unsigned int generation; unsigned int position; }; struct batch_u32 { u32 entropy[24]; local_lock_t lock; long unsigned int generation; unsigned int position; }; struct batch_u64 { u64 entropy[12]; local_lock_t lock; long unsigned int generation; unsigned int position; }; struct batch_u8 { u8 entropy[96]; local_lock_t lock; long unsigned int generation; unsigned int position; }; struct minmax_sample { u32 t; u32 v; }; struct minmax { struct minmax_sample s[3]; }; struct bbr { u32 min_rtt_us; u32 min_rtt_stamp; u32 probe_rtt_done_stamp; struct minmax bw; u32 rtt_cnt; u32 next_rtt_delivered; u64 cycle_mstamp; u32 mode: 3; u32 prev_ca_state: 3; u32 packet_conservation: 1; u32 round_start: 1; u32 idle_restart: 1; u32 probe_rtt_round_done: 1; u32 unused: 13; u32 lt_is_sampling: 1; u32 lt_rtt_cnt: 7; u32 lt_use_bw: 1; u32 lt_bw; u32 lt_last_delivered; u32 lt_last_stamp; u32 lt_last_lost; u32 pacing_gain: 10; u32 cwnd_gain: 10; u32 full_bw_reached: 1; u32 full_bw_cnt: 2; u32 cycle_idx: 3; u32 has_seen_rtt: 1; u32 unused_b: 5; u32 prior_cwnd; u32 full_bw; u64 ack_epoch_mstamp; u16 extra_acked[2]; u32 ack_epoch_acked: 20; u32 extra_acked_win_rtts: 5; u32 extra_acked_win_idx: 1; u32 unused_c: 6; }; struct gendisk; struct request_queue; struct disk_stats; struct blk_holder_ops; struct partition_meta_info; struct block_device { sector_t bd_start_sect; sector_t bd_nr_sectors; struct gendisk *bd_disk; struct request_queue *bd_queue; struct disk_stats *bd_stats; long unsigned int bd_stamp; atomic_t __bd_flags; dev_t bd_dev; struct address_space *bd_mapping; atomic_t bd_openers; spinlock_t bd_size_lock; void *bd_claiming; void *bd_holder; const struct blk_holder_ops *bd_holder_ops; struct mutex bd_holder_lock; int bd_holders; struct kobject *bd_holder_dir; atomic_t bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; struct partition_meta_info *bd_meta_info; int bd_writers; void *bd_security; struct device bd_device; }; struct posix_acl; struct inode_operations; struct super_block; struct file_lock_context; struct pipe_inode_info; struct cdev; struct fsverity_info; struct inode { umode_t i_mode; short unsigned int i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; long unsigned int i_ino; union { const unsigned int i_nlink; unsigned int __i_nlink; }; dev_t i_rdev; loff_t i_size; time64_t i_atime_sec; time64_t i_mtime_sec; time64_t i_ctime_sec; u32 i_atime_nsec; u32 i_mtime_nsec; u32 i_ctime_nsec; u32 i_generation; spinlock_t i_lock; short unsigned int i_bytes; u8 i_blkbits; enum rw_hint i_write_hint; blkcnt_t i_blocks; u32 i_state; struct rw_semaphore i_rwsem; long unsigned int dirtied_when; long unsigned int dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union { struct hlist_head i_dentry; struct callback_head i_rcu; }; atomic64_t i_version; atomic64_t i_sequence; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; union { const struct file_operations *i_fop; void (*free_inode)(struct inode *); }; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union { struct pipe_inode_info *i_pipe; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; }; __u32 i_fsnotify_mask; struct fsnotify_mark_connector *i_fsnotify_marks; struct fsverity_info *i_verity_info; void *i_private; }; struct bdev_inode { struct block_device bdev; struct inode vfs_inode; }; struct bgl_lock { spinlock_t lock; }; struct bh_accounting { int nr; int ratelimit; }; struct bh_lru { struct buffer_head *bhs[16]; }; struct bictcp { u32 cnt; u32 last_max_cwnd; u32 last_cwnd; u32 last_time; u32 bic_origin_point; u32 bic_K; u32 delay_min; u32 epoch_start; u32 ack_cnt; u32 tcp_cwnd; u16 unused; u8 sample_cnt; u8 found; u32 round_start; u32 end_seq; u32 last_ack; u32 curr_rtt; }; struct binfmt_misc { struct list_head entries; rwlock_t entries_lock; bool enabled; }; struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } __attribute__((packed)); struct bio; typedef void bio_end_io_t(struct bio *); struct bio_issue { u64 value; }; struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; }; struct blkcg_gq; struct bio_set; struct bio { struct bio *bi_next; struct block_device *bi_bdev; blk_opf_t bi_opf; short unsigned int bi_flags; short unsigned int bi_ioprio; enum rw_hint bi_write_hint; blk_status_t bi_status; atomic_t __bi_remaining; struct bvec_iter bi_iter; union { blk_qc_t bi_cookie; unsigned int __bi_nr_segments; }; bio_end_io_t *bi_end_io; void *bi_private; struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; short unsigned int bi_vcnt; short unsigned int bi_max_vecs; atomic_t __bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0]; }; struct bio_alloc_cache { struct bio *free_list; struct bio *free_list_irq; unsigned int nr; unsigned int nr_irq; }; struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; short unsigned int bip_vcnt; short unsigned int bip_max_vcnt; short unsigned int bip_flags; int: 0; struct bvec_iter bio_iter; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0]; }; struct bio_list { struct bio *head; struct bio *tail; }; struct iovec { void *iov_base; __kernel_size_t iov_len; }; struct kvec; struct folio_queue; struct iov_iter { u8 iter_type; bool nofault; bool data_source; size_t iov_offset; union { struct iovec __ubuf_iovec; struct { union { const struct iovec *__iov; const struct kvec *kvec; const struct bio_vec *bvec; const struct folio_queue *folioq; struct xarray *xarray; void *ubuf; }; size_t count; }; }; union { long unsigned int nr_segs; u8 folioq_slot; loff_t xarray_start; }; }; struct bio_map_data { bool is_our_pages: 1; bool is_null_mapped: 1; struct iov_iter iter; struct iovec iov[0]; }; struct bio_post_read_ctx { struct bio *bio; struct work_struct work; unsigned int cur_step; unsigned int enabled_steps; }; typedef void *mempool_alloc_t(gfp_t, void *); typedef void mempool_free_t(void *, void *); struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; }; typedef struct mempool_s mempool_t; struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; struct bio_alloc_cache *cache; mempool_t bio_pool; mempool_t bvec_pool; unsigned int back_pad; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; struct hlist_node cpuhp_dead; }; struct bio_slab { struct kmem_cache *slab; unsigned int slab_ref; unsigned int slab_size; char name[8]; }; struct biovec_slab { int nr_vecs; char *name; struct kmem_cache *slab; }; struct blacklist_entry { struct list_head next; char *buf; }; struct blake2b_state { u64 h[8]; u64 t[2]; u64 f[2]; u8 buf[128]; unsigned int buflen; unsigned int outlen; }; struct blake2b_tfm_ctx { u8 key[64]; unsigned int keylen; }; struct blake2s_state { u32 h[8]; u32 t[2]; u32 f[2]; u8 buf[64]; unsigned int buflen; unsigned int outlen; }; struct blk_expired_data { bool has_timedout_rq; long unsigned int next; long unsigned int timeout_start; }; struct request; struct blk_flush_queue { spinlock_t mq_flush_lock; unsigned int flush_pending_idx: 1; unsigned int flush_running_idx: 1; blk_status_t rq_status; long unsigned int flush_pending_since; struct list_head flush_queue[2]; long unsigned int flush_data_in_flight; struct request *flush_rq; }; struct blk_holder_ops { void (*mark_dead)(struct block_device *, bool); void (*sync)(struct block_device *); int (*freeze)(struct block_device *); int (*thaw)(struct block_device *); }; struct blk_independent_access_range; struct blk_ia_range_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_independent_access_range *, char *); }; struct blk_independent_access_range { struct kobject kobj; sector_t sector; sector_t nr_sectors; }; struct blk_independent_access_ranges { struct kobject kobj; bool sysfs_registered; unsigned int nr_ia_ranges; struct blk_independent_access_range ia_range[0]; }; struct blk_integrity { unsigned char flags; enum blk_integrity_checksum csum_type; unsigned char tuple_size; unsigned char pi_offset; unsigned char interval_exp; unsigned char tag_size; }; struct blk_io_trace { __u32 magic; __u32 sequence; __u64 time; __u64 sector; __u32 bytes; __u32 action; __u32 pid; __u32 device; __u32 cpu; __u16 error; __u16 pdu_len; }; struct blk_io_trace_remap { __be32 device_from; __be32 device_to; __be64 sector_from; }; struct rq_qos_ops; struct rq_qos { const struct rq_qos_ops *ops; struct gendisk *disk; enum rq_qos_id id; struct rq_qos *next; struct dentry *debugfs_dir; }; struct blk_iolatency { struct rq_qos rqos; struct timer_list timer; bool enabled; atomic_t enable_cnt; struct work_struct enable_work; }; struct blk_iou_cmd { int res; bool nowait; }; struct blk_major_name { struct blk_major_name *next; int major; char name[16]; void (*probe)(dev_t); }; struct blk_mq_ctx; struct blk_mq_hw_ctx; struct blk_mq_alloc_data { struct request_queue *q; blk_mq_req_flags_t flags; unsigned int shallow_depth; blk_opf_t cmd_flags; req_flags_t rq_flags; unsigned int nr_tags; struct request **cached_rq; struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; }; struct blk_mq_ctxs; struct blk_mq_ctx { struct { spinlock_t lock; struct list_head rq_lists[3]; long: 64; long: 64; }; unsigned int cpu; short unsigned int index_hw[3]; struct blk_mq_hw_ctx *hctxs[3]; struct request_queue *queue; struct blk_mq_ctxs *ctxs; struct kobject kobj; long: 64; }; struct blk_mq_ctxs { struct kobject kobj; struct blk_mq_ctx *queue_ctx; }; struct seq_operations; struct blk_mq_debugfs_attr { const char *name; umode_t mode; int (*show)(void *, struct seq_file *); ssize_t (*write)(void *, const char *, size_t, loff_t *); const struct seq_operations *seq_ops; }; struct sbitmap_word; struct sbitmap { unsigned int depth; unsigned int shift; unsigned int map_nr; bool round_robin; struct sbitmap_word *map; unsigned int *alloc_hint; }; typedef struct wait_queue_entry wait_queue_entry_t; struct blk_mq_tags; struct blk_mq_hw_ctx { struct { spinlock_t lock; struct list_head dispatch; long unsigned int state; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct delayed_work run_work; cpumask_var_t cpumask; int next_cpu; int next_cpu_batch; long unsigned int flags; void *sched_data; struct request_queue *queue; struct blk_flush_queue *fq; void *driver_data; struct sbitmap ctx_map; struct blk_mq_ctx *dispatch_from; unsigned int dispatch_busy; short unsigned int type; short unsigned int nr_ctx; struct blk_mq_ctx **ctxs; spinlock_t dispatch_wait_lock; wait_queue_entry_t dispatch_wait; atomic_t wait_index; struct blk_mq_tags *tags; struct blk_mq_tags *sched_tags; unsigned int numa_node; unsigned int queue_num; atomic_t nr_active; struct hlist_node cpuhp_online; struct hlist_node cpuhp_dead; struct kobject kobj; struct dentry *debugfs_dir; struct dentry *sched_debugfs_dir; struct list_head hctx_list; long: 64; long: 64; }; struct blk_mq_hw_ctx_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_mq_hw_ctx *, char *); }; struct blk_mq_queue_data; struct io_comp_batch; struct blk_mq_tag_set; struct blk_mq_ops { blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); void (*commit_rqs)(struct blk_mq_hw_ctx *); void (*queue_rqs)(struct request **); int (*get_budget)(struct request_queue *); void (*put_budget)(struct request_queue *, int); void (*set_rq_budget_token)(struct request *, int); int (*get_rq_budget_token)(struct request *); enum blk_eh_timer_return (*timeout)(struct request *); int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); void (*complete)(struct request *); int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); int (*init_request)(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int); void (*exit_request)(struct blk_mq_tag_set *, struct request *, unsigned int); void (*cleanup_rq)(struct request *); bool (*busy)(struct request_queue *); void (*map_queues)(struct blk_mq_tag_set *); void (*show_rq)(struct seq_file *, struct request *); }; struct elevator_type; struct blk_mq_qe_pair { struct list_head node; struct request_queue *q; struct elevator_type *type; }; struct blk_mq_queue_data { struct request *rq; bool last; }; struct blk_mq_queue_map { unsigned int *mq_map; unsigned int nr_queues; unsigned int queue_offset; }; struct srcu_struct; struct blk_mq_tag_set { const struct blk_mq_ops *ops; struct blk_mq_queue_map map[3]; unsigned int nr_maps; unsigned int nr_hw_queues; unsigned int queue_depth; unsigned int reserved_tags; unsigned int cmd_size; int numa_node; unsigned int timeout; unsigned int flags; void *driver_data; struct blk_mq_tags **tags; struct blk_mq_tags *shared_tags; struct mutex tag_list_lock; struct list_head tag_list; struct srcu_struct *srcu; }; struct sbq_wait_state; struct sbitmap_queue { struct sbitmap sb; unsigned int wake_batch; atomic_t wake_index; struct sbq_wait_state *ws; atomic_t ws_active; unsigned int min_shallow_depth; atomic_t completion_cnt; atomic_t wakeup_cnt; }; struct blk_mq_tags { unsigned int nr_tags; unsigned int nr_reserved_tags; unsigned int active_queues; struct sbitmap_queue bitmap_tags; struct sbitmap_queue breserved_tags; struct request **rqs; struct request **static_rqs; struct list_head page_list; spinlock_t lock; }; struct blk_plug { struct request *mq_list; struct request *cached_rq; u64 cur_ktime; short unsigned int nr_ios; short unsigned int rq_count; bool multiple_queues; bool has_elevator; struct list_head cb_list; }; struct blk_plug_cb; typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); struct blk_plug_cb { struct list_head list; blk_plug_cb_fn callback; void *data; }; struct blk_queue_stats { struct list_head callbacks; spinlock_t lock; int accounting; }; struct blk_rq_stat { u64 mean; u64 min; u64 max; u32 nr_samples; u64 batch; }; struct blk_rq_wait { struct completion done; blk_status_t ret; }; struct blk_stat_callback { struct list_head list; struct timer_list timer; struct blk_rq_stat *cpu_stat; int (*bucket_fn)(const struct request *); unsigned int buckets; struct blk_rq_stat *stat; void (*timer_fn)(struct blk_stat_callback *); void *data; struct callback_head rcu; }; struct rchan; struct blk_trace { int trace_state; struct rchan *rchan; long unsigned int *sequence; unsigned char *msg_data; u16 act_mask; u64 start_lba; u64 end_lba; u32 pid; u32 dev; struct dentry *dir; struct list_head running_list; atomic_t dropped; }; struct blk_user_trace_setup { char name[32]; __u16 act_mask; __u32 buf_size; __u32 buf_nr; __u64 start_lba; __u64 end_lba; __u32 pid; }; struct blk_zone { __u64 start; __u64 len; __u64 wp; __u8 type; __u8 cond; __u8 non_seq; __u8 reset; __u8 resv[4]; __u64 capacity; __u8 reserved[24]; }; struct cgroup_subsys; struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct list_head sibling; struct list_head children; struct list_head rstat_css_node; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct work_struct destroy_work; struct rcu_work destroy_rwork; struct cgroup_subsys_state *parent; int nr_descendants; }; struct blkcg_policy_data; struct llist_head; struct blkcg { struct cgroup_subsys_state css; spinlock_t lock; refcount_t online_pin; atomic_t congestion_count; struct xarray blkg_tree; struct blkcg_gq *blkg_hint; struct hlist_head blkg_list; struct blkcg_policy_data *cpd[6]; struct list_head all_blkcgs_node; struct llist_head *lhead; struct list_head cgwb_list; }; struct blkg_iostat { u64 bytes[3]; u64 ios[3]; }; struct blkg_iostat_set { struct u64_stats_sync sync; struct blkcg_gq *blkg; struct llist_node lnode; int lqueued; struct blkg_iostat cur; struct blkg_iostat last; }; struct blkg_policy_data; struct blkcg_gq { struct request_queue *q; struct list_head q_node; struct hlist_node blkcg_node; struct blkcg *blkcg; struct blkcg_gq *parent; struct percpu_ref refcnt; bool online; struct blkg_iostat_set *iostat_cpu; struct blkg_iostat_set iostat; struct blkg_policy_data *pd[6]; union { struct work_struct async_bio_work; struct work_struct free_work; }; atomic_t use_delay; atomic64_t delay_nsec; atomic64_t delay_start; u64 last_delay; int last_use; struct callback_head callback_head; }; typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(struct gendisk *, struct blkcg *, gfp_t); typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); typedef void blkcg_pol_stat_pd_fn(struct blkg_policy_data *, struct seq_file *); struct cftype; struct blkcg_policy { int plid; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; blkcg_pol_free_cpd_fn *cpd_free_fn; blkcg_pol_alloc_pd_fn *pd_alloc_fn; blkcg_pol_init_pd_fn *pd_init_fn; blkcg_pol_online_pd_fn *pd_online_fn; blkcg_pol_offline_pd_fn *pd_offline_fn; blkcg_pol_free_pd_fn *pd_free_fn; blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; blkcg_pol_stat_pd_fn *pd_stat_fn; }; struct blkcg_policy_data { struct blkcg *blkcg; int plid; }; struct blkdev_dio { union { struct kiocb *iocb; struct task_struct *waiter; }; size_t size; atomic_t ref; unsigned int flags; long: 64; long: 64; long: 64; long: 64; long: 64; struct bio bio; long: 64; long: 64; }; struct blkg_conf_ctx { char *input; char *body; struct block_device *bdev; struct blkcg_gq *blkg; }; struct blkg_policy_data { struct blkcg_gq *blkg; int plid; bool online; }; struct blkg_rwstat { struct percpu_counter cpu_cnt[5]; atomic64_t aux_cnt[5]; }; struct blkg_rwstat_sample { u64 cnt[5]; }; struct blkpg_ioctl_arg { int op; int flags; int datalen; void *data; }; struct blkpg_partition { long long int start; long long int length; int pno; char devname[64]; char volname[64]; }; struct block_buffer { u32 filled; bool is_root_hash; u8 *data; }; typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *); struct hd_geometry; struct pr_ops; struct block_device_operations { void (*submit_bio)(struct bio *); int (*poll_bio)(struct bio *, struct io_comp_batch *, unsigned int); int (*open)(struct gendisk *, blk_mode_t); void (*release)(struct gendisk *); int (*ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); int (*compat_ioctl)(struct block_device *, blk_mode_t, unsigned int, long unsigned int); unsigned int (*check_events)(struct gendisk *, unsigned int); void (*unlock_native_capacity)(struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); int (*set_read_only)(struct block_device *, bool); void (*free_disk)(struct gendisk *); void (*swap_slot_free_notify)(struct block_device *, long unsigned int); int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *); char * (*devnode)(struct gendisk *, umode_t *); int (*get_unique_id)(struct gendisk *, u8 *, enum blk_unique_id); struct module *owner; const struct pr_ops *pr_ops; int (*alternative_gpt_sector)(struct gendisk *, sector_t *); }; struct blockgroup_lock { struct bgl_lock locks[128]; }; struct blocking_notifier_head { struct rw_semaphore rwsem; struct notifier_block *head; }; struct bond_marker { u8 subtype; u8 version_number; u8 tlv_type; u8 marker_length; u16 requester_port; struct mac_addr requester_system; u32 requester_transaction_id; u16 pad; u8 tlv_type_terminator; u8 terminator_length; u8 reserved_90[90]; } __attribute__((packed)); struct ethhdr { unsigned char h_dest[6]; unsigned char h_source[6]; __be16 h_proto; }; struct bond_marker_header { struct ethhdr hdr; struct bond_marker marker; }; struct class_attribute { struct attribute attr; ssize_t (*show)(const struct class *, const struct class_attribute *, char *); ssize_t (*store)(const struct class *, const struct class_attribute *, const char *, size_t); }; struct bond_net { struct net *net; struct list_head dev_list; struct proc_dir_entry *proc_dir; struct class_attribute class_attr_bonding_masters; }; struct bond_opt_value { char *string; u64 value; u32 flags; union { char extra[16]; struct net_device *slave_dev; }; }; struct bond_option { int id; const char *name; const char *desc; u32 flags; long unsigned int unsuppmodes; const struct bond_opt_value *values; int (*set)(struct bonding *, const struct bond_opt_value *); }; struct reciprocal_value { u32 m; u8 sh1; u8 sh2; }; struct bond_params { int mode; int xmit_policy; int miimon; u8 num_peer_notif; u8 missed_max; int arp_interval; int arp_validate; int arp_all_targets; int use_carrier; int fail_over_mac; int updelay; int downdelay; int peer_notif_delay; int lacp_active; int lacp_fast; unsigned int min_links; int ad_select; char primary[16]; int primary_reselect; __be32 arp_targets[16]; int tx_queues; int all_slaves_active; int resend_igmp; int lp_interval; int packets_per_slave; int tlb_dynamic_lb; struct reciprocal_value reciprocal_packets_per_slave; u16 ad_actor_sys_prio; u16 ad_user_port_key; struct in6_addr ns_targets[16]; int coupled_control; u8 ad_actor_system[8]; }; struct bond_up_slave { unsigned int count; struct callback_head rcu; struct slave *arr[0]; }; struct bond_vlan_tag { __be16 vlan_proto; short unsigned int vlan_id; }; struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; __u64 rx_otherhost_dropped; }; struct bpf_prog; struct bonding { struct net_device *dev; struct slave *curr_active_slave; struct slave *current_arp_slave; struct slave *primary_slave; struct bond_up_slave *usable_slaves; struct bond_up_slave *all_slaves; bool force_primary; bool notifier_ctx; s32 slave_cnt; int (*recv_probe)(const struct sk_buff *, struct bonding *, struct slave *); spinlock_t mode_lock; spinlock_t stats_lock; u32 send_peer_notif; u8 igmp_retrans; struct proc_dir_entry *proc_entry; char proc_file_name[16]; struct list_head bond_list; u32 *rr_tx_counter; struct ad_bond_info ad_info; struct alb_bond_info alb_info; struct bond_params params; struct workqueue_struct *wq; struct delayed_work mii_work; struct delayed_work arp_work; struct delayed_work alb_work; struct delayed_work ad_work; struct delayed_work mcast_work; struct delayed_work slave_arr_work; struct dentry *debug_dir; struct rtnl_link_stats64 bond_stats; struct bpf_prog *xdp_prog; }; struct boot_e820_entry { __u64 addr; __u64 size; __u32 type; } __attribute__((packed)); struct screen_info { __u8 orig_x; __u8 orig_y; __u16 ext_mem_k; __u16 orig_video_page; __u8 orig_video_mode; __u8 orig_video_cols; __u8 flags; __u8 unused2; __u16 orig_video_ega_bx; __u16 unused3; __u8 orig_video_lines; __u8 orig_video_isVGA; __u16 orig_video_points; __u16 lfb_width; __u16 lfb_height; __u16 lfb_depth; __u32 lfb_base; __u32 lfb_size; __u16 cl_magic; __u16 cl_offset; __u16 lfb_linelength; __u8 red_size; __u8 red_pos; __u8 green_size; __u8 green_pos; __u8 blue_size; __u8 blue_pos; __u8 rsvd_size; __u8 rsvd_pos; __u16 vesapm_seg; __u16 vesapm_off; __u16 pages; __u16 vesa_attributes; __u32 capabilities; __u32 ext_lfb_base; __u8 _reserved[2]; } __attribute__((packed)); struct ist_info { __u32 signature; __u32 command; __u32 event; __u32 perf_level; }; struct sys_desc_table { __u16 length; __u8 table[14]; }; struct olpc_ofw_header { __u32 ofw_magic; __u32 ofw_version; __u32 cif_handler; __u32 irq_desc_table; }; struct edid_info { unsigned char dummy[128]; }; struct efi_info { __u32 efi_loader_signature; __u32 efi_systab; __u32 efi_memdesc_size; __u32 efi_memdesc_version; __u32 efi_memmap; __u32 efi_memmap_size; __u32 efi_systab_hi; __u32 efi_memmap_hi; }; struct setup_header { __u8 setup_sects; __u16 root_flags; __u32 syssize; __u16 ram_size; __u16 vid_mode; __u16 root_dev; __u16 boot_flag; __u16 jump; __u32 header; __u16 version; __u32 realmode_swtch; __u16 start_sys_seg; __u16 kernel_version; __u8 type_of_loader; __u8 loadflags; __u16 setup_move_size; __u32 code32_start; __u32 ramdisk_image; __u32 ramdisk_size; __u32 bootsect_kludge; __u16 heap_end_ptr; __u8 ext_loader_ver; __u8 ext_loader_type; __u32 cmd_line_ptr; __u32 initrd_addr_max; __u32 kernel_alignment; __u8 relocatable_kernel; __u8 min_alignment; __u16 xloadflags; __u32 cmdline_size; __u32 hardware_subarch; __u64 hardware_subarch_data; __u32 payload_offset; __u32 payload_length; __u64 setup_data; __u64 pref_address; __u32 init_size; __u32 handover_offset; __u32 kernel_info_offset; } __attribute__((packed)); struct edd_device_params { __u16 length; __u16 info_flags; __u32 num_default_cylinders; __u32 num_default_heads; __u32 sectors_per_track; __u64 number_of_sectors; __u16 bytes_per_sector; __u32 dpte_ptr; __u16 key; __u8 device_path_info_length; __u8 reserved2; __u16 reserved3; __u8 host_bus_type[4]; __u8 interface_type[8]; union { struct { __u16 base_address; __u16 reserved1; __u32 reserved2; } isa; struct { __u8 bus; __u8 slot; __u8 function; __u8 channel; __u32 reserved; } pci; struct { __u64 reserved; } ibnd; struct { __u64 reserved; } xprs; struct { __u64 reserved; } htpt; struct { __u64 reserved; } unknown; } interface_path; union { struct { __u8 device; __u8 reserved1; __u16 reserved2; __u32 reserved3; __u64 reserved4; } ata; struct { __u8 device; __u8 lun; __u8 reserved1; __u8 reserved2; __u32 reserved3; __u64 reserved4; } atapi; struct { __u16 id; __u64 lun; __u16 reserved1; __u32 reserved2; } __attribute__((packed)) scsi; struct { __u64 serial_number; __u64 reserved; } usb; struct { __u64 eui; __u64 reserved; } i1394; struct { __u64 wwid; __u64 lun; } fibre; struct { __u64 identity_tag; __u64 reserved; } i2o; struct { __u32 array_number; __u32 reserved1; __u64 reserved2; } raid; struct { __u8 device; __u8 reserved1; __u16 reserved2; __u32 reserved3; __u64 reserved4; } sata; struct { __u64 reserved1; __u64 reserved2; } unknown; } device_path; __u8 reserved4; __u8 checksum; } __attribute__((packed)); struct edd_info { __u8 device; __u8 version; __u16 interface_support; __u16 legacy_max_cylinder; __u8 legacy_max_head; __u8 legacy_sectors_per_track; struct edd_device_params params; }; struct boot_params { struct screen_info screen_info; struct apm_bios_info apm_bios_info; __u8 _pad2[4]; __u64 tboot_addr; struct ist_info ist_info; __u64 acpi_rsdp_addr; __u8 _pad3[8]; __u8 hd0_info[16]; __u8 hd1_info[16]; struct sys_desc_table sys_desc_table; struct olpc_ofw_header olpc_ofw_header; __u32 ext_ramdisk_image; __u32 ext_ramdisk_size; __u32 ext_cmd_line_ptr; __u8 _pad4[112]; __u32 cc_blob_address; struct edid_info edid_info; struct efi_info efi_info; __u32 alt_mem_k; __u32 scratch; __u8 e820_entries; __u8 eddbuf_entries; __u8 edd_mbr_sig_buf_entries; __u8 kbd_status; __u8 secure_boot; __u8 _pad5[2]; __u8 sentinel; __u8 _pad6[1]; struct setup_header hdr; __u8 _pad7[36]; __u32 edd_mbr_sig_buffer[16]; struct boot_e820_entry e820_table[128]; __u8 _pad8[48]; struct edd_info eddbuf[6]; __u8 _pad9[276]; }; struct boot_params_to_save { unsigned int start; unsigned int len; }; struct boot_triggers { const char *event; char *trigger; }; struct bp_slots_histogram { atomic_t count[4]; }; struct bp_cpuinfo { unsigned int cpu_pinned; struct bp_slots_histogram tsk_pinned; }; struct text_poke_loc; struct bp_patching_desc { struct text_poke_loc *vec; int nr_entries; atomic_t refs; }; struct bpf_active_lock { void *ptr; u32 id; }; struct bpf_map_ops; struct btf_record; struct btf; struct obj_cgroup; struct btf_type; struct bpf_map { const struct bpf_map_ops *ops; struct bpf_map *inner_map_meta; void *security; enum bpf_map_type map_type; u32 key_size; u32 value_size; u32 max_entries; u64 map_extra; u32 map_flags; u32 id; struct btf_record *record; int numa_node; u32 btf_key_type_id; u32 btf_value_type_id; u32 btf_vmlinux_value_type_id; struct btf *btf; struct obj_cgroup *objcg; char name[16]; struct mutex freeze_mutex; atomic64_t refcnt; atomic64_t usercnt; union { struct work_struct work; struct callback_head rcu; }; atomic64_t writecnt; struct { const struct btf_type *attach_func_proto; spinlock_t lock; enum bpf_prog_type type; bool jited; bool xdp_has_frags; } owner; bool bypass_spec_v1; bool frozen; bool free_after_mult_rcu_gp; bool free_after_rcu_gp; atomic64_t sleepable_refcnt; s64 *elem_count; }; typedef struct lockdep_map *lockdep_map_p; struct maple_tree { union { spinlock_t ma_lock; lockdep_map_p ma_external_lock; }; unsigned int ma_flags; void *ma_root; }; struct vm_struct; struct bpf_arena { struct bpf_map map; u64 user_vm_start; u64 user_vm_end; struct vm_struct *kern_vm; struct maple_tree mt; struct list_head vma_list; struct mutex lock; }; struct bpf_array_aux; struct bpf_array { struct bpf_map map; u32 elem_size; u32 index_mask; struct bpf_array_aux *aux; union { struct { struct {} __empty_value; char value[0]; }; struct { struct {} __empty_ptrs; void *ptrs[0]; }; struct { struct {} __empty_pptrs; void *pptrs[0]; }; }; }; struct bpf_array_aux { struct list_head poke_progs; struct bpf_map *map; struct mutex poke_mutex; struct work_struct work; }; struct bpf_async_cb { struct bpf_map *map; struct bpf_prog *prog; void *callback_fn; void *value; union { struct callback_head rcu; struct work_struct delete_work; }; u64 flags; }; struct bpf_spin_lock { __u32 val; }; struct bpf_hrtimer; struct bpf_work; struct bpf_async_kern { union { struct bpf_async_cb *cb; struct bpf_hrtimer *timer; struct bpf_work *work; }; struct bpf_spin_lock lock; }; struct btf_func_model { u8 ret_size; u8 ret_flags; u8 nr_args; u8 arg_size[12]; u8 arg_flags[12]; }; struct bpf_attach_target_info { struct btf_func_model fmodel; long int tgt_addr; struct module *tgt_mod; const char *tgt_name; const struct btf_type *tgt_type; }; union bpf_attr { struct { __u32 map_type; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; __u32 inner_map_fd; __u32 numa_node; char map_name[16]; __u32 map_ifindex; __u32 btf_fd; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 btf_vmlinux_value_type_id; __u64 map_extra; __s32 value_type_btf_obj_fd; __s32 map_token_fd; }; struct { __u32 map_fd; __u64 key; union { __u64 value; __u64 next_key; }; __u64 flags; }; struct { __u64 in_batch; __u64 out_batch; __u64 keys; __u64 values; __u32 count; __u32 map_fd; __u64 elem_flags; __u64 flags; } batch; struct { __u32 prog_type; __u32 insn_cnt; __u64 insns; __u64 license; __u32 log_level; __u32 log_size; __u64 log_buf; __u32 kern_version; __u32 prog_flags; char prog_name[16]; __u32 prog_ifindex; __u32 expected_attach_type; __u32 prog_btf_fd; __u32 func_info_rec_size; __u64 func_info; __u32 func_info_cnt; __u32 line_info_rec_size; __u64 line_info; __u32 line_info_cnt; __u32 attach_btf_id; union { __u32 attach_prog_fd; __u32 attach_btf_obj_fd; }; __u32 core_relo_cnt; __u64 fd_array; __u64 core_relos; __u32 core_relo_rec_size; __u32 log_true_size; __s32 prog_token_fd; }; struct { __u64 pathname; __u32 bpf_fd; __u32 file_flags; __s32 path_fd; }; struct { union { __u32 target_fd; __u32 target_ifindex; }; __u32 attach_bpf_fd; __u32 attach_type; __u32 attach_flags; __u32 replace_bpf_fd; union { __u32 relative_fd; __u32 relative_id; }; __u64 expected_revision; }; struct { __u32 prog_fd; __u32 retval; __u32 data_size_in; __u32 data_size_out; __u64 data_in; __u64 data_out; __u32 repeat; __u32 duration; __u32 ctx_size_in; __u32 ctx_size_out; __u64 ctx_in; __u64 ctx_out; __u32 flags; __u32 cpu; __u32 batch_size; } test; struct { union { __u32 start_id; __u32 prog_id; __u32 map_id; __u32 btf_id; __u32 link_id; }; __u32 next_id; __u32 open_flags; }; struct { __u32 bpf_fd; __u32 info_len; __u64 info; } info; struct { union { __u32 target_fd; __u32 target_ifindex; }; __u32 attach_type; __u32 query_flags; __u32 attach_flags; __u64 prog_ids; union { __u32 prog_cnt; __u32 count; }; __u64 prog_attach_flags; __u64 link_ids; __u64 link_attach_flags; __u64 revision; } query; struct { __u64 name; __u32 prog_fd; __u64 cookie; } raw_tracepoint; struct { __u64 btf; __u64 btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; __u32 btf_log_true_size; __u32 btf_flags; __s32 btf_token_fd; }; struct { __u32 pid; __u32 fd; __u32 flags; __u32 buf_len; __u64 buf; __u32 prog_id; __u32 fd_type; __u64 probe_offset; __u64 probe_addr; } task_fd_query; struct { union { __u32 prog_fd; __u32 map_fd; }; union { __u32 target_fd; __u32 target_ifindex; }; __u32 attach_type; __u32 flags; union { __u32 target_btf_id; struct { __u64 iter_info; __u32 iter_info_len; }; struct { __u64 bpf_cookie; } perf_event; struct { __u32 flags; __u32 cnt; __u64 syms; __u64 addrs; __u64 cookies; } kprobe_multi; struct { __u32 target_btf_id; __u64 cookie; } tracing; struct { __u32 pf; __u32 hooknum; __s32 priority; __u32 flags; } netfilter; struct { union { __u32 relative_fd; __u32 relative_id; }; __u64 expected_revision; } tcx; struct { __u64 path; __u64 offsets; __u64 ref_ctr_offsets; __u64 cookies; __u32 cnt; __u32 flags; __u32 pid; } uprobe_multi; struct { union { __u32 relative_fd; __u32 relative_id; }; __u64 expected_revision; } netkit; }; } link_create; struct { __u32 link_fd; union { __u32 new_prog_fd; __u32 new_map_fd; }; __u32 flags; union { __u32 old_prog_fd; __u32 old_map_fd; }; } link_update; struct { __u32 link_fd; } link_detach; struct { __u32 type; } enable_stats; struct { __u32 link_fd; __u32 flags; } iter_create; struct { __u32 prog_fd; __u32 map_fd; __u32 flags; } prog_bind_map; struct { __u32 flags; __u32 bpffs_fd; } token_create; }; struct bpf_binary_header { u32 size; long: 0; u8 image[0]; }; struct bpf_bloom_filter { struct bpf_map map; u32 bitset_mask; u32 hash_seed; u32 nr_hash_funcs; long unsigned int bitset[0]; }; struct bpf_bprintf_buffers { char bin_args[512]; char buf[1024]; }; struct bpf_bprintf_data { u32 *bin_args; char *buf; bool get_bin_args; bool get_buf; }; struct bpf_btf_info { __u64 btf; __u32 btf_size; __u32 id; __u64 name; __u32 name_len; __u32 kernel_btf; }; struct btf_field; struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; u8 release_regno; int regno; int access_size; int mem_size; u64 msize_max_value; int ref_obj_id; int dynptr_id; int map_uid; int func_id; struct btf *btf; u32 btf_id; struct btf *ret_btf; u32 ret_btf_id; u32 subprogno; struct btf_field *kptr_field; }; struct bpf_cand_cache { const char *name; u32 name_len; u16 kind; u16 cnt; struct { const struct btf *btf; u32 id; } cands[0]; }; struct bpf_run_ctx {}; struct bpf_prog_array_item; struct bpf_cg_run_ctx { struct bpf_run_ctx run_ctx; const struct bpf_prog_array_item *prog_item; int retval; }; struct bpf_cgroup_dev_ctx { __u32 access_type; __u32 major; __u32 minor; }; struct bpf_link_ops; struct bpf_link { atomic64_t refcnt; u32 id; enum bpf_link_type type; const struct bpf_link_ops *ops; struct bpf_prog *prog; union { struct callback_head rcu; struct work_struct work; }; }; struct bpf_cgroup_link { struct bpf_link link; struct cgroup *cgroup; enum bpf_attach_type type; }; struct bpf_cgroup_storage_key { __u64 cgroup_inode_id; __u32 attach_type; }; struct bpf_storage_buffer; struct bpf_cgroup_storage_map; struct bpf_cgroup_storage { union { struct bpf_storage_buffer *buf; void *percpu_buf; }; struct bpf_cgroup_storage_map *map; struct bpf_cgroup_storage_key key; struct list_head list_map; struct list_head list_cg; struct rb_node node; struct callback_head rcu; }; struct bpf_cgroup_storage_map { struct bpf_map map; spinlock_t lock; struct rb_root root; struct list_head list; }; struct bpf_lru_list { struct list_head lists[3]; unsigned int counts[2]; struct list_head *next_inactive_rotation; raw_spinlock_t lock; }; struct bpf_lru_locallist; struct bpf_common_lru { struct bpf_lru_list lru_list; struct bpf_lru_locallist *local_list; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_core_accessor { __u32 type_id; __u32 idx; const char *name; }; struct bpf_core_cand { const struct btf *btf; __u32 id; }; struct bpf_core_cand_list { struct bpf_core_cand *cands; int len; }; struct bpf_verifier_log; struct bpf_core_ctx { struct bpf_verifier_log *log; const struct btf *btf; }; struct bpf_core_relo { __u32 insn_off; __u32 type_id; __u32 access_str_off; enum bpf_core_relo_kind kind; }; struct bpf_core_relo_res { __u64 orig_val; __u64 new_val; bool poison; bool validate; bool fail_memsz_adjust; __u32 orig_sz; __u32 orig_type_id; __u32 new_sz; __u32 new_type_id; }; struct bpf_core_spec { const struct btf *btf; struct bpf_core_accessor spec[64]; __u32 root_type_id; enum bpf_core_relo_kind relo_kind; int len; int raw_spec[64]; int raw_len; __u32 bit_offset; }; struct bpf_cpu_map_entry; struct bpf_cpu_map { struct bpf_map map; struct bpf_cpu_map_entry **cpu_map; }; struct bpf_cpumap_val { __u32 qsize; union { int fd; __u32 id; } bpf_prog; }; struct xdp_bulk_queue; struct ptr_ring; struct bpf_cpu_map_entry { u32 cpu; int map_id; struct xdp_bulk_queue *bulkq; struct ptr_ring *queue; struct task_struct *kthread; struct bpf_cpumap_val value; struct bpf_prog *prog; struct completion kthread_running; struct rcu_work free_work; }; struct bpf_cpumask { cpumask_t cpumask; refcount_t usage; }; struct bpf_crypto_type; struct bpf_crypto_ctx { const struct bpf_crypto_type *type; void *tfm; u32 siv_len; struct callback_head rcu; refcount_t usage; }; struct bpf_crypto_params { char type[14]; u8 reserved[2]; char algo[128]; u8 key[256]; u32 key_len; u32 authsize; }; struct bpf_crypto_type { void * (*alloc_tfm)(const char *); void (*free_tfm)(void *); int (*has_algo)(const char *); int (*setkey)(void *, const u8 *, unsigned int); int (*setauthsize)(void *, unsigned int); int (*encrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); int (*decrypt)(void *, const u8 *, u8 *, unsigned int, u8 *); unsigned int (*ivsize)(void *); unsigned int (*statesize)(void *); u32 (*get_flags)(void *); struct module *owner; char name[14]; }; struct bpf_crypto_type_list { const struct bpf_crypto_type *type; struct list_head list; }; struct bpf_ct_opts { s32 netns_id; s32 error; u8 l4proto; u8 dir; u16 ct_zone_id; u8 ct_zone_dir; u8 reserved[3]; }; struct bpf_ctx_arg_aux { u32 offset; enum bpf_reg_type reg_type; struct btf *btf; u32 btf_id; }; struct skb_ext; struct sk_buff { union { struct { struct sk_buff *next; struct sk_buff *prev; union { struct net_device *dev; long unsigned int dev_scratch; }; }; struct rb_node rbnode; struct list_head list; struct llist_node ll_node; }; struct sock *sk; union { ktime_t tstamp; u64 skb_mstamp_ns; }; char cb[48]; union { struct { long unsigned int _skb_refdst; void (*destructor)(struct sk_buff *); }; struct list_head tcp_tsorted_anchor; long unsigned int _sk_redir; }; long unsigned int _nfct; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; __u8 __cloned_offset[0]; __u8 cloned: 1; __u8 nohdr: 1; __u8 fclone: 2; __u8 peeked: 1; __u8 head_frag: 1; __u8 pfmemalloc: 1; __u8 pp_recycle: 1; __u8 active_extensions; union { struct { __u8 __pkt_type_offset[0]; __u8 pkt_type: 3; __u8 ignore_df: 1; __u8 dst_pending_confirm: 1; __u8 ip_summed: 2; __u8 ooo_okay: 1; __u8 __mono_tc_offset[0]; __u8 tstamp_type: 2; __u8 tc_at_ingress: 1; __u8 tc_skip_classify: 1; __u8 remcsum_offload: 1; __u8 csum_complete_sw: 1; __u8 csum_level: 2; __u8 inner_protocol_type: 1; __u8 l4_hash: 1; __u8 sw_hash: 1; __u8 wifi_acked_valid: 1; __u8 wifi_acked: 1; __u8 no_fcs: 1; __u8 encapsulation: 1; __u8 encap_hdr_csum: 1; __u8 csum_valid: 1; __u8 ndisc_nodetype: 2; __u8 nf_trace: 1; __u8 redirected: 1; __u8 nf_skip_egress: 1; __u8 slow_gro: 1; __u8 unreadable: 1; __u16 tc_index; u16 alloc_cpu; union { __wsum csum; struct { __u16 csum_start; __u16 csum_offset; }; }; __u32 priority; int skb_iif; __u32 hash; union { u32 vlan_all; struct { __be16 vlan_proto; __u16 vlan_tci; }; }; union { unsigned int napi_id; unsigned int sender_cpu; }; __u32 secmark; union { __u32 mark; __u32 reserved_tailroom; }; union { __be16 inner_protocol; __u8 inner_ipproto; }; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; }; struct { __u8 __pkt_type_offset[0]; __u8 pkt_type: 3; __u8 ignore_df: 1; __u8 dst_pending_confirm: 1; __u8 ip_summed: 2; __u8 ooo_okay: 1; __u8 __mono_tc_offset[0]; __u8 tstamp_type: 2; __u8 tc_at_ingress: 1; __u8 tc_skip_classify: 1; __u8 remcsum_offload: 1; __u8 csum_complete_sw: 1; __u8 csum_level: 2; __u8 inner_protocol_type: 1; __u8 l4_hash: 1; __u8 sw_hash: 1; __u8 wifi_acked_valid: 1; __u8 wifi_acked: 1; __u8 no_fcs: 1; __u8 encapsulation: 1; __u8 encap_hdr_csum: 1; __u8 csum_valid: 1; __u8 ndisc_nodetype: 2; __u8 nf_trace: 1; __u8 redirected: 1; __u8 nf_skip_egress: 1; __u8 slow_gro: 1; __u8 unreadable: 1; __u16 tc_index; u16 alloc_cpu; union { __wsum csum; struct { __u16 csum_start; __u16 csum_offset; }; }; __u32 priority; int skb_iif; __u32 hash; union { u32 vlan_all; struct { __be16 vlan_proto; __u16 vlan_tci; }; }; union { unsigned int napi_id; unsigned int sender_cpu; }; __u32 secmark; union { __u32 mark; __u32 reserved_tailroom; }; union { __be16 inner_protocol; __u8 inner_ipproto; }; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; } headers; }; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; refcount_t users; struct skb_ext *extensions; }; struct xdp_md { __u32 data; __u32 data_end; __u32 data_meta; __u32 ingress_ifindex; __u32 rx_queue_index; __u32 egress_ifindex; }; struct xdp_rxq_info; struct xdp_txq_info; struct xdp_buff { void *data; void *data_end; void *data_meta; void *data_hard_start; struct xdp_rxq_info *rxq; struct xdp_txq_info *txq; u32 frame_sz; u32 flags; }; struct bpf_sock { __u32 bound_dev_if; __u32 family; __u32 type; __u32 protocol; __u32 mark; __u32 priority; __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; __be16 dst_port; __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; __s32 rx_queue_mapping; }; struct bpf_sock_addr { __u32 user_family; __u32 user_ip4; __u32 user_ip6[4]; __u32 user_port; __u32 family; __u32 type; __u32 protocol; __u32 msg_src_ip4; __u32 msg_src_ip6[4]; union { struct bpf_sock *sk; }; }; struct bpf_sock_addr_kern { struct sock *sk; struct sockaddr *uaddr; u64 tmp_reg; void *t_ctx; u32 uaddrlen; }; struct bpf_sock_ops { __u32 op; union { __u32 args[4]; __u32 reply; __u32 replylong[4]; }; __u32 family; __u32 remote_ip4; __u32 local_ip4; __u32 remote_ip6[4]; __u32 local_ip6[4]; __u32 remote_port; __u32 local_port; __u32 is_fullsock; __u32 snd_cwnd; __u32 srtt_us; __u32 bpf_sock_ops_cb_flags; __u32 state; __u32 rtt_min; __u32 snd_ssthresh; __u32 rcv_nxt; __u32 snd_nxt; __u32 snd_una; __u32 mss_cache; __u32 ecn_flags; __u32 rate_delivered; __u32 rate_interval_us; __u32 packets_out; __u32 retrans_out; __u32 total_retrans; __u32 segs_in; __u32 data_segs_in; __u32 segs_out; __u32 data_segs_out; __u32 lost_out; __u32 sacked_out; __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; union { struct bpf_sock *sk; }; union { void *skb_data; }; union { void *skb_data_end; }; __u32 skb_len; __u32 skb_tcp_flags; __u64 skb_hwtstamp; }; struct bpf_sock_ops_kern { struct sock *sk; union { u32 args[4]; u32 reply; u32 replylong[4]; }; struct sk_buff *syn_skb; struct sk_buff *skb; void *skb_data_end; u8 op; u8 is_fullsock; u8 remaining_opt_len; u64 temp; }; struct sk_msg_md { union { void *data; }; union { void *data_end; }; __u32 family; __u32 remote_ip4; __u32 local_ip4; __u32 remote_ip6[4]; __u32 local_ip6[4]; __u32 remote_port; __u32 local_port; __u32 size; union { struct bpf_sock *sk; }; }; struct sk_msg_sg { u32 start; u32 curr; u32 end; u32 size; u32 copybreak; long unsigned int copy[1]; struct scatterlist data[19]; }; struct sk_msg { struct sk_msg_sg sg; void *data; void *data_end; u32 apply_bytes; u32 cork_bytes; u32 flags; struct sk_buff *skb; struct sock *sk_redir; struct sock *sk; struct list_head list; }; struct bpf_flow_dissector { struct bpf_flow_keys *flow_keys; const struct sk_buff *skb; const void *data; const void *data_end; }; struct fred_cs { u64 cs: 16; u64 sl: 2; u64 wfe: 1; }; struct fred_ss { u64 ss: 16; u64 sti: 1; u64 swevent: 1; u64 nmi: 1; int: 13; u64 vector: 8; short: 8; u64 type: 4; char: 4; u64 enclave: 1; u64 lm: 1; u64 nested: 1; char: 1; u64 insnlen: 4; }; struct pt_regs { long unsigned int r15; long unsigned int r14; long unsigned int r13; long unsigned int r12; long unsigned int bp; long unsigned int bx; long unsigned int r11; long unsigned int r10; long unsigned int r9; long unsigned int r8; long unsigned int ax; long unsigned int cx; long unsigned int dx; long unsigned int si; long unsigned int di; long unsigned int orig_ax; long unsigned int ip; union { u16 cs; u64 csx; struct fred_cs fred_cs; }; long unsigned int flags; long unsigned int sp; union { u16 ss; u64 ssx; struct fred_ss fred_ss; }; }; typedef struct pt_regs bpf_user_pt_regs_t; struct bpf_perf_event_data { bpf_user_pt_regs_t regs; __u64 sample_period; __u64 addr; }; struct perf_sample_data; struct bpf_perf_event_data_kern { bpf_user_pt_regs_t *regs; struct perf_sample_data *data; struct perf_event *event; }; struct bpf_raw_tracepoint_args { __u64 args[0]; }; struct bpf_sysctl { __u32 write; __u32 file_pos; }; struct ctl_table_header; struct ctl_table; struct bpf_sysctl_kern { struct ctl_table_header *head; const struct ctl_table *table; void *cur_val; size_t cur_len; void *new_val; size_t new_len; int new_updated; int write; loff_t *ppos; u64 tmp_reg; }; struct bpf_sockopt { union { struct bpf_sock *sk; }; union { void *optval; }; union { void *optval_end; }; __s32 level; __s32 optname; __s32 optlen; __s32 retval; }; struct bpf_sockopt_kern { struct sock *sk; u8 *optval; u8 *optval_end; s32 level; s32 optname; s32 optlen; struct task_struct *current_task; u64 tmp_reg; }; struct sk_reuseport_md { union { void *data; }; union { void *data_end; }; __u32 len; __u32 eth_protocol; __u32 ip_protocol; __u32 bind_inany; __u32 hash; union { struct bpf_sock *sk; }; union { struct bpf_sock *migrating_sk; }; }; struct sk_reuseport_kern { struct sk_buff *skb; struct sock *sk; struct sock *selected_sk; struct sock *migrating_sk; void *data_end; u32 hash; u32 reuseport_id; bool bind_inany; }; struct bpf_sk_lookup { union { union { struct bpf_sock *sk; }; __u64 cookie; }; __u32 family; __u32 protocol; __u32 remote_ip4; __u32 remote_ip6[4]; __be16 remote_port; __u32 local_ip4; __u32 local_ip6[4]; __u32 local_port; __u32 ingress_ifindex; }; struct bpf_sk_lookup_kern { u16 family; u16 protocol; __be16 sport; u16 dport; struct { __be32 saddr; __be32 daddr; } v4; struct { const struct in6_addr *saddr; const struct in6_addr *daddr; } v6; struct sock *selected_sk; u32 ingress_ifindex; bool no_reuseport; }; struct nf_hook_state; struct bpf_nf_ctx { const struct nf_hook_state *state; struct sk_buff *skb; }; struct bpf_ctx_convert { struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog; struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern; struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog; struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern; struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog; struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern; struct xdp_md BPF_PROG_TYPE_XDP_prog; struct xdp_buff BPF_PROG_TYPE_XDP_kern; struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog; struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern; struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog; struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern; struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog; struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern; struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog; struct sk_buff BPF_PROG_TYPE_LWT_IN_kern; struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog; struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern; struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog; struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern; struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog; struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern; struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog; struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern; struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog; struct sk_buff BPF_PROG_TYPE_SK_SKB_kern; struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog; struct sk_msg BPF_PROG_TYPE_SK_MSG_kern; struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog; struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern; bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog; struct pt_regs BPF_PROG_TYPE_KPROBE_kern; __u64 BPF_PROG_TYPE_TRACEPOINT_prog; u64 BPF_PROG_TYPE_TRACEPOINT_kern; struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog; struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern; struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog; u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern; struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog; u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern; void *BPF_PROG_TYPE_TRACING_prog; void *BPF_PROG_TYPE_TRACING_kern; struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog; struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern; struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog; struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern; struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog; struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern; __u32 BPF_PROG_TYPE_LIRC_MODE2_prog; u32 BPF_PROG_TYPE_LIRC_MODE2_kern; struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog; struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern; struct bpf_sk_lookup BPF_PROG_TYPE_SK_LOOKUP_prog; struct bpf_sk_lookup_kern BPF_PROG_TYPE_SK_LOOKUP_kern; void *BPF_PROG_TYPE_STRUCT_OPS_prog; void *BPF_PROG_TYPE_STRUCT_OPS_kern; void *BPF_PROG_TYPE_EXT_prog; void *BPF_PROG_TYPE_EXT_kern; void *BPF_PROG_TYPE_LSM_prog; void *BPF_PROG_TYPE_LSM_kern; void *BPF_PROG_TYPE_SYSCALL_prog; void *BPF_PROG_TYPE_SYSCALL_kern; struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_prog; struct bpf_nf_ctx BPF_PROG_TYPE_NETFILTER_kern; }; struct bpf_devmap_val { __u32 ifindex; union { int fd; __u32 id; } bpf_prog; }; struct bpf_dispatcher_prog { struct bpf_prog *prog; refcount_t users; }; struct latch_tree_node { struct rb_node node[2]; }; struct bpf_ksym { long unsigned int start; long unsigned int end; char name[512]; struct list_head lnode; struct latch_tree_node tnode; bool prog; }; struct static_call_key; struct bpf_dispatcher { struct mutex mutex; void *func; struct bpf_dispatcher_prog progs[48]; int num_progs; void *image; void *rw_image; u32 image_off; struct bpf_ksym ksym; struct static_call_key *sc_key; void *sc_tramp; }; struct bpf_dtab_netdev; struct bpf_dtab { struct bpf_map map; struct bpf_dtab_netdev **netdev_map; struct list_head list; struct hlist_head *dev_index_head; spinlock_t index_lock; unsigned int items; u32 n_buckets; }; struct bpf_dtab_netdev { struct net_device *dev; struct hlist_node index_hlist; struct bpf_prog *xdp_prog; struct callback_head rcu; unsigned int idx; struct bpf_devmap_val val; }; struct bpf_dummy_ops_state; struct bpf_dummy_ops { int (*test_1)(struct bpf_dummy_ops_state *); int (*test_2)(struct bpf_dummy_ops_state *, int, short unsigned int, char, long unsigned int); int (*test_sleepable)(struct bpf_dummy_ops_state *); }; struct bpf_dummy_ops_state { int val; }; struct bpf_dummy_ops_test_args { u64 args[12]; struct bpf_dummy_ops_state state; }; struct bpf_dynptr { __u64 __opaque[2]; }; struct bpf_dynptr_kern { void *data; u32 size; u32 offset; }; struct bpf_prog_array_item { struct bpf_prog *prog; union { struct bpf_cgroup_storage *cgroup_storage[2]; u64 bpf_cookie; }; }; struct bpf_prog_array { struct callback_head rcu; struct bpf_prog_array_item items[0]; }; struct bpf_empty_prog_array { struct bpf_prog_array hdr; struct bpf_prog *null_prog; }; struct bpf_event_entry { struct perf_event *event; struct file *perf_file; struct file *map_file; struct callback_head rcu; }; struct bpf_fentry_test_t { struct bpf_fentry_test_t *a; }; struct bpf_fib_lookup { __u8 family; __u8 l4_protocol; __be16 sport; __be16 dport; union { __u16 tot_len; __u16 mtu_result; }; __u32 ifindex; union { __u8 tos; __be32 flowinfo; __u32 rt_metric; }; union { __be32 ipv4_src; __u32 ipv6_src[4]; }; union { __be32 ipv4_dst; __u32 ipv6_dst[4]; }; union { struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; }; __u32 tbid; }; union { struct { __u32 mark; }; struct { __u8 smac[6]; __u8 dmac[6]; }; }; }; struct bpf_flow_keys { __u16 nhoff; __u16 thoff; __u16 addr_proto; __u8 is_frag; __u8 is_first_frag; __u8 is_encap; __u8 ip_proto; __be16 n_proto; __be16 sport; __be16 dport; union { struct { __be32 ipv4_src; __be32 ipv4_dst; }; struct { __u32 ipv6_src[4]; __u32 ipv6_dst[4]; }; }; __u32 flags; __be32 flow_label; }; struct bpf_flowtable_opts { s32 error; }; struct bpf_fou_encap { __be16 sport; __be16 dport; }; struct bpf_func_info { __u32 insn_off; __u32 type_id; }; struct bpf_func_info_aux { u16 linkage; bool unreliable; bool called: 1; bool verified: 1; }; struct bpf_func_proto { u64 (*func)(u64, u64, u64, u64, u64); bool gpl_only; bool pkt_access; bool might_sleep; bool allow_fastcall; enum bpf_return_type ret_type; union { struct { enum bpf_arg_type arg1_type; enum bpf_arg_type arg2_type; enum bpf_arg_type arg3_type; enum bpf_arg_type arg4_type; enum bpf_arg_type arg5_type; }; enum bpf_arg_type arg_type[5]; }; union { struct { u32 *arg1_btf_id; u32 *arg2_btf_id; u32 *arg3_btf_id; u32 *arg4_btf_id; u32 *arg5_btf_id; }; u32 *arg_btf_id[5]; struct { size_t arg1_size; size_t arg2_size; size_t arg3_size; size_t arg4_size; size_t arg5_size; }; size_t arg_size[5]; }; int *ret_btf_id; bool (*allowed)(const struct bpf_prog *); }; struct tnum { u64 value; u64 mask; }; struct bpf_reg_state { enum bpf_reg_type type; s32 off; union { int range; struct { struct bpf_map *map_ptr; u32 map_uid; }; struct { struct btf *btf; u32 btf_id; }; struct { u32 mem_size; u32 dynptr_id; }; struct { enum bpf_dynptr_type type; bool first_slot; } dynptr; struct { struct btf *btf; u32 btf_id; enum bpf_iter_state state: 2; int depth: 30; } iter; struct { long unsigned int raw1; long unsigned int raw2; } raw; u32 subprogno; }; struct tnum var_off; s64 smin_value; s64 smax_value; u64 umin_value; u64 umax_value; s32 s32_min_value; s32 s32_max_value; u32 u32_min_value; u32 u32_max_value; u32 id; u32 ref_obj_id; struct bpf_reg_state *parent; u32 frameno; s32 subreg_def; enum bpf_reg_liveness live; bool precise; }; struct bpf_retval_range { s32 minval; s32 maxval; }; struct bpf_reference_state; struct bpf_stack_state; struct bpf_func_state { struct bpf_reg_state regs[11]; int callsite; u32 frameno; u32 subprogno; u32 async_entry_cnt; struct bpf_retval_range callback_ret_range; bool in_callback_fn; bool in_async_callback_fn; bool in_exception_callback_fn; u32 callback_depth; int acquired_refs; struct bpf_reference_state *refs; struct bpf_stack_state *stack; int allocated_stack; }; struct bpf_hrtimer { struct bpf_async_cb cb; struct hrtimer timer; atomic_t cancelling; }; struct bpf_mem_caches; struct bpf_mem_cache; struct bpf_mem_alloc { struct bpf_mem_caches *caches; struct bpf_mem_cache *cache; struct obj_cgroup *objcg; bool percpu; struct work_struct work; }; struct pcpu_freelist_node; struct pcpu_freelist_head { struct pcpu_freelist_node *first; raw_spinlock_t lock; }; struct pcpu_freelist { struct pcpu_freelist_head *freelist; struct pcpu_freelist_head extralist; }; struct bpf_lru_node; typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); struct bpf_lru { union { struct bpf_common_lru common_lru; struct bpf_lru_list *percpu_lru; }; del_from_htab_func del_from_htab; void *del_arg; unsigned int hash_offset; unsigned int nr_scans; bool percpu; long: 64; long: 64; long: 64; long: 64; }; struct bucket; struct htab_elem; struct bpf_htab { struct bpf_map map; struct bpf_mem_alloc ma; struct bpf_mem_alloc pcpu_ma; struct bucket *buckets; void *elems; long: 64; long: 64; long: 64; union { struct pcpu_freelist freelist; struct bpf_lru lru; }; struct htab_elem **extra_elems; struct percpu_counter pcount; atomic_t count; bool use_percpu_counter; u32 n_buckets; u32 elem_size; u32 hashrnd; struct lock_class_key lockdep_key; int *map_locked[8]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_id_pair { u32 old; u32 cur; }; struct bpf_idmap { u32 tmp_id_gen; struct bpf_id_pair map[600]; }; struct bpf_idset { u32 count; u32 ids[600]; }; struct bpf_insn { __u8 code; __u8 dst_reg: 4; __u8 src_reg: 4; __s16 off; __s32 imm; }; struct bpf_insn_access_aux { enum bpf_reg_type reg_type; bool is_ldsx; union { int ctx_field_size; struct { struct btf *btf; u32 btf_id; }; }; struct bpf_verifier_log *log; bool is_retval; }; struct bpf_map_ptr_state { struct bpf_map *map_ptr; bool poison; bool unpriv; }; struct bpf_loop_inline_state { unsigned int initialized: 1; unsigned int fit_for_inline: 1; u32 callback_subprogno; }; struct btf_struct_meta; struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; struct bpf_map_ptr_state map_ptr_state; s32 call_imm; u32 alu_limit; struct { u32 map_index; u32 map_off; }; struct { enum bpf_reg_type reg_type; union { struct { struct btf *btf; u32 btf_id; }; u32 mem_size; }; } btf_var; struct bpf_loop_inline_state loop_inline_state; }; union { u64 obj_new_size; u64 insert_off; }; struct btf_struct_meta *kptr_struct_meta; u64 map_key_state; int ctx_field_size; u32 seen; bool sanitize_stack_spill; bool zext_dst; bool needs_zext; bool storage_get_func_atomic; bool is_iter_next; bool call_with_percpu_alloc_ptr; u8 alu_state; u8 fastcall_pattern: 1; u8 fastcall_spills_num: 3; unsigned int orig_idx; bool jmp_point; bool prune_point; bool force_checkpoint; bool calls_callback; }; typedef void (*bpf_insn_print_t)(void *, const char *, ...); typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); struct bpf_insn_cbs { bpf_insn_print_t cb_print; bpf_insn_revmap_call_t cb_call; bpf_insn_print_imm_t cb_imm; void *private_data; }; struct bpf_iter_meta; struct bpf_iter__bpf_link { union { struct bpf_iter_meta *meta; }; union { struct bpf_link *link; }; }; struct bpf_iter__bpf_map { union { struct bpf_iter_meta *meta; }; union { struct bpf_map *map; }; }; struct bpf_iter__bpf_map_elem { union { struct bpf_iter_meta *meta; }; union { struct bpf_map *map; }; union { void *key; }; union { void *value; }; }; struct bpf_iter__bpf_prog { union { struct bpf_iter_meta *meta; }; union { struct bpf_prog *prog; }; }; struct bpf_iter__bpf_sk_storage_map { union { struct bpf_iter_meta *meta; }; union { struct bpf_map *map; }; union { struct sock *sk; }; union { void *value; }; }; struct bpf_iter__cgroup { union { struct bpf_iter_meta *meta; }; union { struct cgroup *cgroup; }; }; struct fib6_info; struct bpf_iter__ipv6_route { union { struct bpf_iter_meta *meta; }; union { struct fib6_info *rt; }; }; struct kallsym_iter; struct bpf_iter__ksym { union { struct bpf_iter_meta *meta; }; union { struct kallsym_iter *ksym; }; }; struct netlink_sock; struct bpf_iter__netlink { union { struct bpf_iter_meta *meta; }; union { struct netlink_sock *sk; }; }; struct bpf_iter__sockmap { union { struct bpf_iter_meta *meta; }; union { struct bpf_map *map; }; union { void *key; }; union { struct sock *sk; }; }; struct bpf_iter__task { union { struct bpf_iter_meta *meta; }; union { struct task_struct *task; }; }; struct bpf_iter__task__safe_trusted { struct bpf_iter_meta *meta; struct task_struct *task; }; struct bpf_iter__task_file { union { struct bpf_iter_meta *meta; }; union { struct task_struct *task; }; u32 fd; union { struct file *file; }; }; struct bpf_iter__task_vma { union { struct bpf_iter_meta *meta; }; union { struct task_struct *task; }; union { struct vm_area_struct *vma; }; }; struct bpf_iter__tcp { union { struct bpf_iter_meta *meta; }; union { struct sock_common *sk_common; }; uid_t uid; }; struct udp_sock; struct bpf_iter__udp { union { struct bpf_iter_meta *meta; }; union { struct udp_sock *udp_sk; }; uid_t uid; long: 0; int bucket; }; struct unix_sock; struct bpf_iter__unix { union { struct bpf_iter_meta *meta; }; union { struct unix_sock *unix_sk; }; uid_t uid; }; struct bpf_iter_aux_info { struct bpf_map *map; struct { struct cgroup *start; enum bpf_cgroup_iter_order order; } cgroup; struct { enum bpf_iter_task_type type; u32 pid; } task; }; struct bpf_iter_bits { __u64 __opaque[2]; }; struct bpf_iter_bits_kern { union { long unsigned int *bits; long unsigned int bits_copy; }; u32 nr_bits; int bit; }; struct bpf_iter_css { __u64 __opaque[3]; }; struct bpf_iter_css_kern { struct cgroup_subsys_state *start; struct cgroup_subsys_state *pos; unsigned int flags; }; struct bpf_iter_css_task { __u64 __opaque[1]; }; struct css_task_iter; struct bpf_iter_css_task_kern { struct css_task_iter *css_it; }; struct bpf_iter_target_info; struct bpf_iter_link { struct bpf_link link; struct bpf_iter_aux_info aux; struct bpf_iter_target_info *tinfo; }; union bpf_iter_link_info { struct { __u32 map_fd; } map; struct { enum bpf_cgroup_iter_order order; __u32 cgroup_fd; __u64 cgroup_id; } cgroup; struct { __u32 tid; __u32 pid; __u32 pid_fd; } task; }; struct bpf_iter_meta { union { struct seq_file *seq; }; u64 session_id; u64 seq_num; }; struct bpf_iter_meta__safe_trusted { struct seq_file *seq; }; struct bpf_iter_num { __u64 __opaque[1]; }; struct bpf_iter_num_kern { int cur; int end; }; struct bpf_iter_seq_info; struct bpf_iter_priv_data { struct bpf_iter_target_info *tinfo; const struct bpf_iter_seq_info *seq_info; struct bpf_prog *prog; u64 session_id; u64 seq_num; bool done_stop; long: 0; u8 target_private[0]; }; typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *, union bpf_iter_link_info *, struct bpf_iter_aux_info *); typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *); typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *, struct seq_file *); struct bpf_link_info; typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *, struct bpf_link_info *); typedef const struct bpf_func_proto * (*bpf_iter_get_func_proto_t)(enum bpf_func_id, const struct bpf_prog *); struct bpf_iter_reg { const char *target; bpf_iter_attach_target_t attach_target; bpf_iter_detach_target_t detach_target; bpf_iter_show_fdinfo_t show_fdinfo; bpf_iter_fill_link_info_t fill_link_info; bpf_iter_get_func_proto_t get_func_proto; u32 ctx_arg_info_size; u32 feature; struct bpf_ctx_arg_aux ctx_arg_info[2]; const struct bpf_iter_seq_info *seq_info; }; struct bpf_iter_seq_array_map_info { struct bpf_map *map; void *percpu_value_buf; u32 index; }; struct bpf_iter_seq_hash_map_info { struct bpf_map *map; struct bpf_htab *htab; void *percpu_value_buf; u32 bucket_id; u32 skip_elems; }; typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); typedef void (*bpf_iter_fini_seq_priv_t)(void *); struct bpf_iter_seq_info { const struct seq_operations *seq_ops; bpf_iter_init_seq_priv_t init_seq_private; bpf_iter_fini_seq_priv_t fini_seq_private; u32 seq_priv_size; }; struct bpf_iter_seq_link_info { u32 link_id; }; struct bpf_iter_seq_map_info { u32 map_id; }; struct bpf_iter_seq_prog_info { u32 prog_id; }; struct bpf_iter_seq_sk_storage_map_info { struct bpf_map *map; unsigned int bucket_id; unsigned int skip_elems; }; struct pid_namespace; struct bpf_iter_seq_task_common { struct pid_namespace *ns; enum bpf_iter_task_type type; u32 pid; u32 pid_visiting; }; struct bpf_iter_seq_task_file_info { struct bpf_iter_seq_task_common common; struct task_struct *task; u32 tid; u32 fd; }; struct bpf_iter_seq_task_info { struct bpf_iter_seq_task_common common; u32 tid; }; struct bpf_iter_seq_task_vma_info { struct bpf_iter_seq_task_common common; struct task_struct *task; struct mm_struct *mm; struct vm_area_struct *vma; u32 tid; long unsigned int prev_vm_start; long unsigned int prev_vm_end; }; struct bpf_iter_target_info { struct list_head list; const struct bpf_iter_reg *reg_info; u32 btf_id; }; struct bpf_iter_task { __u64 __opaque[3]; }; struct bpf_iter_task_kern { struct task_struct *task; struct task_struct *pos; unsigned int flags; }; struct bpf_iter_task_vma { __u64 __opaque[1]; }; struct bpf_iter_task_vma_kern_data; struct bpf_iter_task_vma_kern { struct bpf_iter_task_vma_kern_data *data; }; struct maple_enode; struct maple_alloc; struct ma_state { struct maple_tree *tree; long unsigned int index; long unsigned int last; struct maple_enode *node; long unsigned int min; long unsigned int max; struct maple_alloc *alloc; enum maple_status status; unsigned char depth; unsigned char offset; unsigned char mas_flags; unsigned char end; enum store_type store_type; }; struct vma_iterator { struct ma_state mas; }; struct mmap_unlock_irq_work; struct bpf_iter_task_vma_kern_data { struct task_struct *task; struct mm_struct *mm; struct mmap_unlock_irq_work *work; struct vma_iterator vmi; }; struct bpf_jit_poke_descriptor { void *tailcall_target; void *tailcall_bypass; void *bypass_addr; void *aux; union { struct { struct bpf_map *map; u32 key; } tail_call; }; bool tailcall_target_stable; u8 adj_off; u16 reason; u32 insn_idx; }; struct bpf_jmp_history_entry { u32 idx; u32 prev_idx: 22; u32 flags: 10; u64 linked_regs; }; struct bpf_key { struct key *key; bool has_ref; }; struct bpf_kfunc_btf { struct btf *btf; struct module *module; u16 offset; }; struct bpf_kfunc_btf_tab { struct bpf_kfunc_btf descs[256]; u32 nr_descs; }; struct bpf_kfunc_call_arg_meta { struct btf *btf; u32 func_id; u32 kfunc_flags; const struct btf_type *func_proto; const char *func_name; u32 ref_obj_id; u8 release_regno; bool r0_rdonly; u32 ret_btf_id; u64 r0_size; u32 subprogno; struct { u64 value; bool found; } arg_constant; struct btf *arg_btf; u32 arg_btf_id; bool arg_owning_ref; struct { struct btf_field *field; } arg_list_head; struct { struct btf_field *field; } arg_rbtree_root; struct { enum bpf_dynptr_type type; u32 id; u32 ref_obj_id; } initialized_dynptr; struct { u8 spi; u8 frameno; } iter; struct { struct bpf_map *ptr; int uid; } map; u64 mem_size; }; struct bpf_kfunc_desc { struct btf_func_model func_model; u32 func_id; s32 imm; u16 offset; long unsigned int addr; }; struct bpf_kfunc_desc_tab { struct bpf_kfunc_desc descs[256]; u32 nr_descs; }; struct ftrace_ops; struct ftrace_regs; typedef void (*ftrace_func_t)(long unsigned int, long unsigned int, struct ftrace_ops *, struct ftrace_regs *); struct ftrace_hash; struct ftrace_ops_hash { struct ftrace_hash *notrace_hash; struct ftrace_hash *filter_hash; struct mutex regex_lock; }; typedef int (*ftrace_ops_func_t)(struct ftrace_ops *, enum ftrace_ops_cmd); struct ftrace_ops { ftrace_func_t func; struct ftrace_ops *next; long unsigned int flags; void *private; ftrace_func_t saved_func; struct ftrace_ops_hash local_hash; struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash old_hash; long unsigned int trampoline; long unsigned int trampoline_size; struct list_head list; struct list_head subop_list; ftrace_ops_func_t ops_func; struct ftrace_ops *managed; long unsigned int direct_call; }; struct fprobe; typedef int (*fprobe_entry_cb)(struct fprobe *, long unsigned int, long unsigned int, struct pt_regs *, void *); typedef void (*fprobe_exit_cb)(struct fprobe *, long unsigned int, long unsigned int, struct pt_regs *, void *); struct rethook; struct fprobe { struct ftrace_ops ops; long unsigned int nmissed; unsigned int flags; struct rethook *rethook; size_t entry_data_size; int nr_maxactive; fprobe_entry_cb entry_handler; fprobe_exit_cb exit_handler; }; struct bpf_kprobe_multi_link { struct bpf_link link; struct fprobe fp; long unsigned int *addrs; u64 *cookies; u32 cnt; u32 mods_cnt; struct module **mods; u32 flags; }; struct bpf_session_run_ctx { struct bpf_run_ctx run_ctx; bool is_return; void *data; }; struct bpf_kprobe_multi_run_ctx { struct bpf_session_run_ctx session_ctx; struct bpf_kprobe_multi_link *link; long unsigned int entry_ip; }; struct bpf_line_info { __u32 insn_off; __u32 file_name_off; __u32 line_off; __u32 line_col; }; struct bpf_link_info { __u32 type; __u32 id; __u32 prog_id; union { struct { __u64 tp_name; __u32 tp_name_len; } raw_tracepoint; struct { __u32 attach_type; __u32 target_obj_id; __u32 target_btf_id; } tracing; struct { __u64 cgroup_id; __u32 attach_type; } cgroup; struct { __u64 target_name; __u32 target_name_len; union { struct { __u32 map_id; } map; }; union { struct { __u64 cgroup_id; __u32 order; } cgroup; struct { __u32 tid; __u32 pid; } task; }; } iter; struct { __u32 netns_ino; __u32 attach_type; } netns; struct { __u32 ifindex; } xdp; struct { __u32 map_id; } struct_ops; struct { __u32 pf; __u32 hooknum; __s32 priority; __u32 flags; } netfilter; struct { __u64 addrs; __u32 count; __u32 flags; __u64 missed; __u64 cookies; } kprobe_multi; struct { __u64 path; __u64 offsets; __u64 ref_ctr_offsets; __u64 cookies; __u32 path_size; __u32 count; __u32 flags; __u32 pid; } uprobe_multi; struct { __u32 type; union { struct { __u64 file_name; __u32 name_len; __u32 offset; __u64 cookie; } uprobe; struct { __u64 func_name; __u32 name_len; __u32 offset; __u64 addr; __u64 missed; __u64 cookie; } kprobe; struct { __u64 tp_name; __u32 name_len; __u64 cookie; } tracepoint; struct { __u64 config; __u32 type; __u64 cookie; } event; }; } perf_event; struct { __u32 ifindex; __u32 attach_type; } tcx; struct { __u32 ifindex; __u32 attach_type; } netkit; struct { __u32 map_id; __u32 attach_type; } sockmap; }; }; struct bpf_link_ops { void (*release)(struct bpf_link *); void (*dealloc)(struct bpf_link *); void (*dealloc_deferred)(struct bpf_link *); int (*detach)(struct bpf_link *); int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); int (*update_map)(struct bpf_link *, struct bpf_map *, struct bpf_map *); __poll_t (*poll)(struct file *, struct poll_table_struct *); }; struct bpf_link_primer { struct bpf_link *link; struct file *file; int fd; u32 id; }; struct bpf_list_head { __u64 __opaque[2]; }; struct bpf_list_node { __u64 __opaque[3]; }; struct bpf_list_node_kern { struct list_head list_head; void *owner; }; struct bpf_loader_ctx; struct bpf_load_and_run_opts { struct bpf_loader_ctx *ctx; const void *data; const void *insns; __u32 data_sz; __u32 insns_sz; const char *errstr; }; struct bpf_loader_ctx { __u32 sz; __u32 flags; __u32 log_level; __u32 log_size; __u64 log_buf; }; struct bpf_local_storage_data; struct bpf_local_storage_map; struct bpf_local_storage { struct bpf_local_storage_data *cache[16]; struct bpf_local_storage_map *smap; struct hlist_head list; void *owner; struct callback_head rcu; raw_spinlock_t lock; }; struct bpf_local_storage_cache { spinlock_t idx_lock; u64 idx_usage_counts[16]; }; struct bpf_local_storage_data { struct bpf_local_storage_map *smap; u8 data[0]; }; struct bpf_local_storage_elem { struct hlist_node map_node; struct hlist_node snode; struct bpf_local_storage *local_storage; struct callback_head rcu; long: 64; struct bpf_local_storage_data sdata; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_local_storage_map_bucket; struct bpf_local_storage_map { struct bpf_map map; struct bpf_local_storage_map_bucket *buckets; u32 bucket_log; u16 elem_size; u16 cache_idx; struct bpf_mem_alloc selem_ma; struct bpf_mem_alloc storage_ma; bool bpf_ma; }; struct bpf_local_storage_map_bucket { struct hlist_head list; raw_spinlock_t lock; }; struct bpf_lpm_trie_key_hdr { __u32 prefixlen; }; struct bpf_lpm_trie_key_u8 { union { struct bpf_lpm_trie_key_hdr hdr; __u32 prefixlen; }; __u8 data[0]; }; struct bpf_lru_locallist { struct list_head lists[2]; u16 next_steal; raw_spinlock_t lock; }; struct bpf_lru_node { struct list_head list; u16 cpu; u8 type; u8 ref; }; struct bpf_lwt_prog { struct bpf_prog *prog; char *name; }; struct bpf_lwt { struct bpf_lwt_prog in; struct bpf_lwt_prog out; struct bpf_lwt_prog xmit; int family; }; struct bpf_map_desc { int map_fd; __u32 max_entries; __u64 initial_value; }; struct bpf_offloaded_map; struct bpf_map_dev_ops { int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); int (*map_delete_elem)(struct bpf_offloaded_map *, void *); }; struct bpf_map_info { __u32 type; __u32 id; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; char name[16]; __u32 ifindex; __u32 btf_vmlinux_value_type_id; __u64 netns_dev; __u64 netns_ino; __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 btf_vmlinux_id; __u64 map_extra; }; typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); struct bpf_prog_aux; struct bpf_map_ops { int (*map_alloc_check)(union bpf_attr *); struct bpf_map * (*map_alloc)(union bpf_attr *); void (*map_release)(struct bpf_map *, struct file *); void (*map_free)(struct bpf_map *); int (*map_get_next_key)(struct bpf_map *, void *, void *); void (*map_release_uref)(struct bpf_map *); void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); int (*map_lookup_and_delete_elem)(struct bpf_map *, void *, void *, u64); int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); int (*map_update_batch)(struct bpf_map *, struct file *, const union bpf_attr *, union bpf_attr *); int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); void * (*map_lookup_elem)(struct bpf_map *, void *); long int (*map_update_elem)(struct bpf_map *, void *, void *, u64); long int (*map_delete_elem)(struct bpf_map *, void *); long int (*map_push_elem)(struct bpf_map *, void *, u64); long int (*map_pop_elem)(struct bpf_map *, void *); long int (*map_peek_elem)(struct bpf_map *, void *); void * (*map_lookup_percpu_elem)(struct bpf_map *, void *, u32); void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); void (*map_fd_put_ptr)(struct bpf_map *, void *, bool); int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); u32 (*map_fd_sys_lookup_elem)(void *); void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *); int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); long unsigned int (*map_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); struct bpf_local_storage ** (*map_owner_storage_ptr)(void *); long int (*map_redirect)(struct bpf_map *, u64, u64); bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); int (*map_set_for_each_callback_args)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *); long int (*map_for_each_callback)(struct bpf_map *, bpf_callback_t, void *, u64); u64 (*map_mem_usage)(const struct bpf_map *); int *map_btf_id; const struct bpf_iter_seq_info *iter_seq_info; }; struct llist_head { struct llist_node *first; }; struct rcuwait { struct task_struct *task; }; struct irq_work { struct __call_single_node node; void (*func)(struct irq_work *); struct rcuwait irqwait; }; struct bpf_mem_cache { struct llist_head free_llist; local_t active; struct llist_head free_llist_extra; struct irq_work refill_work; struct obj_cgroup *objcg; int unit_size; int free_cnt; int low_watermark; int high_watermark; int batch; int percpu_size; bool draining; struct bpf_mem_cache *tgt; struct llist_head free_by_rcu; struct llist_node *free_by_rcu_tail; struct llist_head waiting_for_gp; struct llist_node *waiting_for_gp_tail; struct callback_head rcu; atomic_t call_rcu_in_progress; struct llist_head free_llist_extra_rcu; struct llist_head free_by_rcu_ttrace; struct llist_head waiting_for_gp_ttrace; struct callback_head rcu_ttrace; atomic_t call_rcu_ttrace_in_progress; }; struct bpf_mem_caches { struct bpf_mem_cache cache[11]; }; struct bpf_mount_opts { kuid_t uid; kgid_t gid; umode_t mode; u64 delegate_cmds; u64 delegate_maps; u64 delegate_progs; u64 delegate_attachs; }; struct bpf_mprog_fp { struct bpf_prog *prog; }; struct bpf_mprog_bundle; struct bpf_mprog_entry { struct bpf_mprog_fp fp_items[64]; struct bpf_mprog_bundle *parent; }; struct bpf_mprog_cp { struct bpf_link *link; }; struct bpf_mprog_bundle { struct bpf_mprog_entry a; struct bpf_mprog_entry b; struct bpf_mprog_cp cp_items[64]; struct bpf_prog *ref; atomic64_t revision; u32 count; }; struct bpf_nested_pt_regs { struct pt_regs regs[3]; }; struct bpf_nh_params { u32 nh_family; union { u32 ipv4_nh; struct in6_addr ipv6_nh; }; }; struct bpf_redirect_info { u64 tgt_index; void *tgt_value; struct bpf_map *map; u32 flags; u32 map_id; enum bpf_map_type map_type; struct bpf_nh_params nh; u32 kern_flags; }; struct bpf_net_context { struct bpf_redirect_info ri; struct list_head cpu_map_flush_list; struct list_head dev_map_flush_list; struct list_head xskmap_map_flush_list; }; struct bpf_netns_link { struct bpf_link link; enum bpf_attach_type type; enum netns_bpf_attach_type netns_type; struct net *net; struct list_head node; }; typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); struct nf_hook_ops { nf_hookfn *hook; struct net_device *dev; void *priv; u8 pf; enum nf_hook_ops_type hook_ops_type: 8; unsigned int hooknum; int priority; }; struct nf_defrag_hook; struct bpf_nf_link { struct bpf_link link; struct nf_hook_ops hook_ops; struct net *net; u32 dead; const struct nf_defrag_hook *defrag_hook; }; struct bpf_prog_offload_ops; struct bpf_offload_dev { const struct bpf_prog_offload_ops *ops; struct list_head netdevs; void *priv; }; struct rhash_head { struct rhash_head *next; }; struct bpf_offload_netdev { struct rhash_head l; struct net_device *netdev; struct bpf_offload_dev *offdev; struct list_head progs; struct list_head maps; struct list_head offdev_netdevs; }; struct bpf_offloaded_map { struct bpf_map map; struct net_device *netdev; const struct bpf_map_dev_ops *dev_ops; void *dev_priv; struct list_head offloads; }; struct bpf_perf_event_value { __u64 counter; __u64 enabled; __u64 running; }; struct bpf_perf_link { struct bpf_link link; struct file *perf_file; }; struct bpf_pidns_info { __u32 pid; __u32 tgid; }; struct bpf_preload_info { char link_name[16]; struct bpf_link *link; }; struct bpf_preload_ops { int (*preload)(struct bpf_preload_info *); struct module *owner; }; struct sock_filter { __u16 code; __u8 jt; __u8 jf; __u32 k; }; struct bpf_prog_stats; struct sock_fprog_kern; struct bpf_prog { u16 pages; u16 jited: 1; u16 jit_requested: 1; u16 gpl_compatible: 1; u16 cb_access: 1; u16 dst_needed: 1; u16 blinding_requested: 1; u16 blinded: 1; u16 is_func: 1; u16 kprobe_override: 1; u16 has_callchain_buf: 1; u16 enforce_expected_attach_type: 1; u16 call_get_stack: 1; u16 call_get_func_ip: 1; u16 tstamp_type_access: 1; u16 sleepable: 1; enum bpf_prog_type type; enum bpf_attach_type expected_attach_type; u32 len; u32 jited_len; u8 tag[8]; struct bpf_prog_stats *stats; int *active; unsigned int (*bpf_func)(const void *, const struct bpf_insn *); struct bpf_prog_aux *aux; struct sock_fprog_kern *orig_prog; union { struct { struct {} __empty_insns; struct sock_filter insns[0]; }; struct { struct {} __empty_insnsi; struct bpf_insn insnsi[0]; }; }; }; struct bpf_trampoline; struct bpf_prog_ops; struct btf_mod_pair; struct user_struct; struct bpf_token; struct bpf_prog_offload; struct exception_table_entry; struct bpf_prog_aux { atomic64_t refcnt; u32 used_map_cnt; u32 used_btf_cnt; u32 max_ctx_offset; u32 max_pkt_offset; u32 max_tp_access; u32 stack_depth; u32 id; u32 func_cnt; u32 real_func_cnt; u32 func_idx; u32 attach_btf_id; u32 ctx_arg_info_size; u32 max_rdonly_access; u32 max_rdwr_access; struct btf *attach_btf; const struct bpf_ctx_arg_aux *ctx_arg_info; struct mutex dst_mutex; struct bpf_prog *dst_prog; struct bpf_trampoline *dst_trampoline; enum bpf_prog_type saved_dst_prog_type; enum bpf_attach_type saved_dst_attach_type; bool verifier_zext; bool dev_bound; bool offload_requested; bool attach_btf_trace; bool attach_tracing_prog; bool func_proto_unreliable; bool tail_call_reachable; bool xdp_has_frags; bool exception_cb; bool exception_boundary; struct bpf_arena *arena; const struct btf_type *attach_func_proto; const char *attach_func_name; struct bpf_prog **func; void *jit_data; struct bpf_jit_poke_descriptor *poke_tab; struct bpf_kfunc_desc_tab *kfunc_tab; struct bpf_kfunc_btf_tab *kfunc_btf_tab; u32 size_poke_tab; struct bpf_ksym ksym; const struct bpf_prog_ops *ops; struct bpf_map **used_maps; struct mutex used_maps_mutex; struct btf_mod_pair *used_btfs; struct bpf_prog *prog; struct user_struct *user; u64 load_time; u32 verified_insns; int cgroup_atype; struct bpf_map *cgroup_storage[2]; char name[16]; u64 (*bpf_exception_cb)(u64, u64, u64, u64, u64); void *security; struct bpf_token *token; struct bpf_prog_offload *offload; struct btf *btf; struct bpf_func_info *func_info; struct bpf_func_info_aux *func_info_aux; struct bpf_line_info *linfo; void **jited_linfo; u32 func_info_cnt; u32 nr_linfo; u32 linfo_idx; struct module *mod; u32 num_exentries; struct exception_table_entry *extable; union { struct work_struct work; struct callback_head rcu; }; }; struct bpf_prog_desc { int prog_fd; }; struct bpf_prog_dummy { struct bpf_prog prog; }; struct bpf_prog_info { __u32 type; __u32 id; __u8 tag[8]; __u32 jited_prog_len; __u32 xlated_prog_len; __u64 jited_prog_insns; __u64 xlated_prog_insns; __u64 load_time; __u32 created_by_uid; __u32 nr_map_ids; __u64 map_ids; char name[16]; __u32 ifindex; __u32 gpl_compatible: 1; __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; __u32 nr_jited_func_lens; __u64 jited_ksyms; __u64 jited_func_lens; __u32 btf_id; __u32 func_info_rec_size; __u64 func_info; __u32 nr_func_info; __u32 nr_line_info; __u64 line_info; __u64 jited_line_info; __u32 nr_jited_line_info; __u32 line_info_rec_size; __u32 jited_line_info_rec_size; __u32 nr_prog_tags; __u64 prog_tags; __u64 run_time_ns; __u64 run_cnt; __u64 recursion_misses; __u32 verified_insns; __u32 attach_btf_obj_id; __u32 attach_btf_id; }; struct bpf_prog_kstats { u64 nsecs; u64 cnt; u64 misses; }; struct bpf_prog_list { struct hlist_node node; struct bpf_prog *prog; struct bpf_cgroup_link *link; struct bpf_cgroup_storage *storage[2]; }; struct bpf_prog_offload { struct bpf_prog *prog; struct net_device *netdev; struct bpf_offload_dev *offdev; void *dev_priv; struct list_head offloads; bool dev_state; bool opt_failed; void *jited_image; u32 jited_len; }; struct bpf_prog_offload_ops { int (*insn_hook)(struct bpf_verifier_env *, int, int); int (*finalize)(struct bpf_verifier_env *); int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *); int (*remove_insns)(struct bpf_verifier_env *, u32, u32); int (*prepare)(struct bpf_prog *); int (*translate)(struct bpf_prog *); void (*destroy)(struct bpf_prog *); }; struct bpf_prog_ops { int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *); }; struct bpf_prog_pack { struct list_head list; void *ptr; long unsigned int bitmap[0]; }; struct bpf_prog_stats { u64_stats_t cnt; u64_stats_t nsecs; u64_stats_t misses; struct u64_stats_sync syncp; long: 64; }; struct bpf_queue_stack { struct bpf_map map; raw_spinlock_t lock; u32 head; u32 tail; u32 size; long: 0; char elements[0]; }; struct bpf_raw_event_map { struct tracepoint *tp; void *bpf_func; u32 num_args; u32 writable_size; long: 64; }; struct bpf_raw_tp_link { struct bpf_link link; struct bpf_raw_event_map *btp; u64 cookie; }; struct bpf_raw_tp_regs { struct pt_regs regs[3]; }; struct bpf_raw_tp_test_run_info { struct bpf_prog *prog; void *ctx; u32 retval; }; struct bpf_rb_node { __u64 __opaque[4]; }; struct bpf_rb_node_kern { struct rb_node rb_node; void *owner; }; struct bpf_rb_root { __u64 __opaque[2]; }; struct bpf_redir_neigh { __u32 nh_family; union { __be32 ipv4_nh; __u32 ipv6_nh[4]; }; }; struct bpf_refcount { __u32 __opaque[1]; }; struct bpf_reference_state { int id; int insn_idx; int callback_ref; }; struct bpf_reg_types { const enum bpf_reg_type types[10]; u32 *btf_id; }; struct bpf_ringbuf { wait_queue_head_t waitq; struct irq_work work; u64 mask; struct page **pages; int nr_pages; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t spinlock; atomic_t busy; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long unsigned int consumer_pos; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long unsigned int producer_pos; long unsigned int pending_pos; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; char data[0]; }; struct bpf_ringbuf_hdr { u32 len; u32 pg_off; }; struct bpf_ringbuf_map { struct bpf_map map; struct bpf_ringbuf *rb; }; struct bpf_sanitize_info { struct bpf_insn_aux_data aux; bool mask_to_left; }; struct bpf_scratchpad { union { __be32 diff[128]; u8 buff[512]; }; local_lock_t bh_lock; }; struct bpf_security_struct { u32 sid; }; struct bpf_tramp_link { struct bpf_link link; struct hlist_node tramp_hlist; u64 cookie; }; struct bpf_shim_tramp_link { struct bpf_tramp_link link; struct bpf_trampoline *trampoline; }; struct sk_psock_progs { struct bpf_prog *msg_parser; struct bpf_prog *stream_parser; struct bpf_prog *stream_verdict; struct bpf_prog *skb_verdict; struct bpf_link *msg_parser_link; struct bpf_link *stream_parser_link; struct bpf_link *stream_verdict_link; struct bpf_link *skb_verdict_link; }; struct bpf_shtab_bucket; struct bpf_shtab { struct bpf_map map; struct bpf_shtab_bucket *buckets; u32 buckets_num; u32 elem_size; struct sk_psock_progs progs; atomic_t count; }; struct bpf_shtab_bucket { struct hlist_head head; spinlock_t lock; }; struct bpf_shtab_elem { struct callback_head rcu; u32 hash; struct sock *sk; struct hlist_node node; u8 key[0]; }; struct bpf_sk_storage_diag { u32 nr_maps; struct bpf_map *maps[0]; }; struct qdisc_skb_cb { struct { unsigned int pkt_len; u16 slave_dev_queue_mapping; u16 tc_classid; }; unsigned char data[20]; }; struct bpf_skb_data_end { struct qdisc_skb_cb qdisc_cb; void *data_meta; void *data_end; }; struct bpf_sock_tuple { union { struct { __be32 saddr; __be32 daddr; __be16 sport; __be16 dport; } ipv4; struct { __be32 saddr[4]; __be32 daddr[4]; __be16 sport; __be16 dport; } ipv6; }; }; struct bpf_sockopt_buf { u8 data[32]; }; struct bpf_stab { struct bpf_map map; struct sock **sks; struct sk_psock_progs progs; spinlock_t lock; }; struct bpf_stack_build_id { __s32 status; unsigned char build_id[20]; union { __u64 offset; __u64 ip; }; }; struct stack_map_bucket; struct bpf_stack_map { struct bpf_map map; void *elems; struct pcpu_freelist freelist; u32 n_buckets; struct stack_map_bucket *buckets[0]; }; struct bpf_stack_state { struct bpf_reg_state spilled_ptr; u8 slot_type[8]; }; struct bpf_storage_blob { struct bpf_local_storage *storage; }; struct bpf_storage_buffer { struct callback_head rcu; char data[0]; }; struct bpf_verifier_ops; struct btf_member; struct bpf_struct_ops { const struct bpf_verifier_ops *verifier_ops; int (*init)(struct btf *); int (*check_member)(const struct btf_type *, const struct btf_member *, const struct bpf_prog *); int (*init_member)(const struct btf_type *, const struct btf_member *, void *, const void *); int (*reg)(void *, struct bpf_link *); void (*unreg)(void *, struct bpf_link *); int (*update)(void *, void *, struct bpf_link *); int (*validate)(void *); void *cfi_stubs; struct module *owner; const char *name; struct btf_func_model func_models[64]; }; struct bpf_struct_ops_arg_info { struct bpf_ctx_arg_aux *info; u32 cnt; }; struct bpf_struct_ops_common_value { refcount_t refcnt; enum bpf_struct_ops_state state; }; struct bpf_struct_ops_bpf_dummy_ops { struct bpf_struct_ops_common_value common; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct bpf_dummy_ops data; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_struct_ops_desc { struct bpf_struct_ops *st_ops; const struct btf_type *type; const struct btf_type *value_type; u32 type_id; u32 value_id; struct bpf_struct_ops_arg_info *arg_info; }; struct bpf_struct_ops_link { struct bpf_link link; struct bpf_map *map; wait_queue_head_t wait_hup; }; struct bpf_struct_ops_value { struct bpf_struct_ops_common_value common; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; char data[0]; }; struct bpf_struct_ops_map { struct bpf_map map; struct callback_head rcu; const struct bpf_struct_ops_desc *st_ops_desc; struct mutex lock; struct bpf_link **links; u32 links_cnt; u32 image_pages_cnt; void *image_pages[8]; struct btf *btf; struct bpf_struct_ops_value *uvalue; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct bpf_struct_ops_value kvalue; }; struct rate_sample; union tcp_cc_info; struct tcp_congestion_ops { u32 (*ssthresh)(struct sock *); void (*cong_avoid)(struct sock *, u32, u32); void (*set_state)(struct sock *, u8); void (*cwnd_event)(struct sock *, enum tcp_ca_event); void (*in_ack_event)(struct sock *, u32); void (*pkts_acked)(struct sock *, const struct ack_sample *); u32 (*min_tso_segs)(struct sock *); void (*cong_control)(struct sock *, u32, int, const struct rate_sample *); u32 (*undo_cwnd)(struct sock *); u32 (*sndbuf_expand)(struct sock *); size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); char name[16]; struct module *owner; struct list_head list; u32 key; u32 flags; void (*init)(struct sock *); void (*release)(struct sock *); long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bpf_struct_ops_tcp_congestion_ops { struct bpf_struct_ops_common_value common; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct tcp_congestion_ops data; }; struct bpf_subprog_arg_info { enum bpf_arg_type arg_type; union { u32 mem_size; u32 btf_id; }; }; struct bpf_subprog_info { u32 start; u32 linfo_idx; u16 stack_depth; u16 stack_extra; s16 fastcall_stack_off; bool has_tail_call: 1; bool tail_call_reachable: 1; bool has_ld_abs: 1; bool is_cb: 1; bool is_async_cb: 1; bool is_exception_cb: 1; bool args_cached: 1; bool keep_fastcall_stack: 1; u8 arg_cnt; struct bpf_subprog_arg_info args[5]; }; struct tcp_iter_state { struct seq_net_private p; enum tcp_seq_states state; struct sock *syn_wait_sk; int bucket; int offset; int sbucket; int num; loff_t last_pos; }; struct bpf_tcp_iter_state { struct tcp_iter_state state; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; struct sock **batch; bool st_bucket_done; }; struct bpf_tcp_req_attrs { u32 rcv_tsval; u32 rcv_tsecr; u16 mss; u8 rcv_wscale; u8 snd_wscale; u8 ecn_ok; u8 wscale_ok; u8 sack_ok; u8 tstamp_ok; u8 usec_ts_ok; u8 reserved[3]; }; struct bpf_tcp_sock { __u32 snd_cwnd; __u32 srtt_us; __u32 rtt_min; __u32 snd_ssthresh; __u32 rcv_nxt; __u32 snd_nxt; __u32 snd_una; __u32 mss_cache; __u32 ecn_flags; __u32 rate_delivered; __u32 rate_interval_us; __u32 packets_out; __u32 retrans_out; __u32 total_retrans; __u32 segs_in; __u32 data_segs_in; __u32 segs_out; __u32 data_segs_out; __u32 lost_out; __u32 sacked_out; __u64 bytes_received; __u64 bytes_acked; __u32 dsack_dups; __u32 delivered; __u32 delivered_ce; __u32 icsk_retransmits; }; struct bpf_test_timer { enum { NO_PREEMPT = 0, NO_MIGRATE = 1, } mode; u32 i; u64 time_start; u64 time_spent; }; struct bpf_throw_ctx { struct bpf_prog_aux *aux; u64 sp; u64 bp; int cnt; }; struct bpf_timer { __u64 __opaque[2]; }; struct user_namespace; struct bpf_token { struct work_struct work; atomic64_t refcnt; struct user_namespace *userns; u64 allowed_cmds; u64 allowed_maps; u64 allowed_progs; u64 allowed_attachs; void *security; }; struct bpf_trace_module { struct module *module; struct list_head list; }; struct bpf_trace_run_ctx { struct bpf_run_ctx run_ctx; u64 bpf_cookie; bool is_uprobe; }; union perf_sample_weight { __u64 full; struct { __u32 var1_dw; __u16 var2_w; __u16 var3_w; }; }; union perf_mem_data_src { __u64 val; struct { __u64 mem_op: 5; __u64 mem_lvl: 14; __u64 mem_snoop: 5; __u64 mem_lock: 2; __u64 mem_dtlb: 7; __u64 mem_lvl_num: 4; __u64 mem_remote: 1; __u64 mem_snoopx: 2; __u64 mem_blk: 3; __u64 mem_hops: 3; __u64 mem_rsvd: 18; }; }; struct perf_regs { __u64 abi; struct pt_regs *regs; }; struct perf_callchain_entry; struct perf_raw_record; struct perf_branch_stack; struct perf_sample_data { u64 sample_flags; u64 period; u64 dyn_size; u64 type; struct { u32 pid; u32 tid; } tid_entry; u64 time; u64 id; struct { u32 cpu; u32 reserved; } cpu_entry; u64 ip; struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; u64 *br_stack_cntr; union perf_sample_weight weight; union perf_mem_data_src data_src; u64 txn; struct perf_regs regs_user; struct perf_regs regs_intr; u64 stack_user_size; u64 stream_id; u64 cgroup; u64 addr; u64 phys_addr; u64 data_page_size; u64 code_page_size; u64 aux_size; long: 64; long: 64; long: 64; long: 64; }; struct bpf_trace_sample_data { struct perf_sample_data sds[3]; }; struct bpf_tracing_link { struct bpf_tramp_link link; enum bpf_attach_type attach_type; struct bpf_trampoline *trampoline; struct bpf_prog *tgt_prog; }; struct bpf_tramp_image { void *image; int size; struct bpf_ksym ksym; struct percpu_ref pcref; void *ip_after_call; void *ip_epilogue; union { struct callback_head rcu; struct work_struct work; }; }; struct bpf_tramp_links { struct bpf_tramp_link *links[38]; int nr_links; }; struct bpf_tramp_run_ctx { struct bpf_run_ctx run_ctx; u64 bpf_cookie; struct bpf_run_ctx *saved_run_ctx; }; struct bpf_trampoline { struct hlist_node hlist; struct ftrace_ops *fops; struct mutex mutex; refcount_t refcnt; u32 flags; u64 key; struct { struct btf_func_model model; void *addr; bool ftrace_managed; } func; struct bpf_prog *extension_prog; struct hlist_head progs_hlist[3]; int progs_cnt[3]; struct bpf_tramp_image *cur_image; }; struct bpf_tunnel_key { __u32 tunnel_id; union { __u32 remote_ipv4; __u32 remote_ipv6[4]; }; __u8 tunnel_tos; __u8 tunnel_ttl; union { __u16 tunnel_ext; __be16 tunnel_flags; }; __u32 tunnel_label; union { __u32 local_ipv4; __u32 local_ipv6[4]; }; }; struct bpf_tuple { struct bpf_prog *prog; struct bpf_link *link; }; struct udp_iter_state { struct seq_net_private p; int bucket; }; struct bpf_udp_iter_state { struct udp_iter_state state; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; int offset; struct sock **batch; bool st_bucket_done; }; struct bpf_unix_iter_state { struct seq_net_private p; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; struct sock **batch; bool st_bucket_done; }; struct uprobe_consumer { int (*handler)(struct uprobe_consumer *, struct pt_regs *); int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *); bool (*filter)(struct uprobe_consumer *, struct mm_struct *); struct list_head cons_node; }; struct bpf_uprobe_multi_link; struct uprobe; struct bpf_uprobe { struct bpf_uprobe_multi_link *link; loff_t offset; long unsigned int ref_ctr_offset; u64 cookie; struct uprobe *uprobe; struct uprobe_consumer consumer; }; struct bpf_uprobe_multi_link { struct path path; struct bpf_link link; u32 cnt; u32 flags; struct bpf_uprobe *uprobes; struct task_struct *task; }; struct bpf_uprobe_multi_run_ctx { struct bpf_run_ctx run_ctx; long unsigned int entry_ip; struct bpf_uprobe *uprobe; }; struct btf_mod_pair { struct btf *btf; struct module *module; }; struct bpf_verifier_log { u64 start_pos; u64 end_pos; char *ubuf; u32 level; u32 len_total; u32 len_max; char kbuf[1024]; }; struct bpf_verifier_stack_elem; struct bpf_verifier_state; struct bpf_verifier_state_list; struct bpf_verifier_env { u32 insn_idx; u32 prev_insn_idx; struct bpf_prog *prog; const struct bpf_verifier_ops *ops; struct module *attach_btf_mod; struct bpf_verifier_stack_elem *head; int stack_size; bool strict_alignment; bool test_state_freq; bool test_reg_invariants; struct bpf_verifier_state *cur_state; struct bpf_verifier_state_list **explored_states; struct bpf_verifier_state_list *free_list; struct bpf_map *used_maps[64]; struct btf_mod_pair used_btfs[64]; u32 used_map_cnt; u32 used_btf_cnt; u32 id_gen; u32 hidden_subprog_cnt; int exception_callback_subprog; bool explore_alu_limits; bool allow_ptr_leaks; bool allow_uninit_stack; bool bpf_capable; bool bypass_spec_v1; bool bypass_spec_v4; bool seen_direct_write; bool seen_exception; struct bpf_insn_aux_data *insn_aux_data; const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[258]; union { struct bpf_idmap idmap_scratch; struct bpf_idset idset_scratch; }; struct { int *insn_state; int *insn_stack; int cur_stack; } cfg; struct backtrack_state bt; struct bpf_jmp_history_entry *cur_hist_ent; u32 pass_cnt; u32 subprog_cnt; u32 prev_insn_processed; u32 insn_processed; u32 prev_jmps_processed; u32 jmps_processed; u64 verification_time; u32 max_states_per_insn; u32 total_states; u32 peak_states; u32 longest_mark_read_walk; bpfptr_t fd_array; u32 scratched_regs; u64 scratched_stack_slots; u64 prev_log_pos; u64 prev_insn_print_pos; struct bpf_reg_state fake_reg[2]; char tmp_str_buf[320]; struct bpf_insn insn_buf[32]; struct bpf_insn epilogue_buf[32]; }; struct bpf_verifier_ops { const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *); bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *); int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); int (*gen_epilogue)(struct bpf_insn *, const struct bpf_prog *, s16); int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); int (*btf_struct_access)(struct bpf_verifier_log *, const struct bpf_reg_state *, int, int); }; struct bpf_verifier_state { struct bpf_func_state *frame[8]; struct bpf_verifier_state *parent; u32 branches; u32 insn_idx; u32 curframe; struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; u32 active_preempt_lock; bool used_as_loop_entry; bool in_sleepable; u32 first_insn_idx; u32 last_insn_idx; struct bpf_verifier_state *loop_entry; struct bpf_jmp_history_entry *jmp_history; u32 jmp_history_cnt; u32 dfs_depth; u32 callback_unroll_depth; u32 may_goto_depth; }; struct bpf_verifier_stack_elem { struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; u32 log_pos; }; struct bpf_verifier_state_list { struct bpf_verifier_state state; struct bpf_verifier_state_list *next; int miss_cnt; int hit_cnt; }; struct bpf_work { struct bpf_async_cb cb; struct work_struct work; struct work_struct delete_work; }; struct bpf_wq { __u64 __opaque[2]; }; struct bpf_xdp_link; struct bpf_xdp_entity { struct bpf_prog *prog; struct bpf_xdp_link *link; }; struct bpf_xdp_link { struct bpf_link link; struct net_device *dev; int flags; }; struct bpf_xdp_sock { __u32 queue_id; }; struct bpf_xfrm_info { u32 if_id; int link; }; struct bpf_xfrm_state { __u32 reqid; __u32 spi; __u16 family; __u16 ext; union { __u32 remote_ipv4; __u32 remote_ipv6[4]; }; }; struct bpf_xfrm_state_opts { s32 error; s32 netns_id; u32 mark; xfrm_address_t daddr; __be32 spi; u8 proto; u16 family; }; struct bpffs_btf_enums { const struct btf *btf; const struct btf_type *cmd_t; const struct btf_type *map_t; const struct btf_type *prog_t; const struct btf_type *attach_t; }; struct trace_entry { short unsigned int type; unsigned char flags; unsigned char preempt_count; int pid; }; struct bprint_entry { struct trace_entry ent; long unsigned int ip; const char *fmt; u32 buf[0]; }; struct bputs_entry { struct trace_entry ent; long unsigned int ip; const char *str; }; struct br_input_skb_cb { struct net_device *brdev; u16 frag_max_size; u8 proxyarp_replied: 1; u8 src_port_isolated: 1; u8 promisc: 1; u32 backup_nhid; }; struct br_mdb_entry { __u32 ifindex; __u8 state; __u8 flags; __u16 vid; struct { union { __be32 ip4; struct in6_addr ip6; unsigned char mac_addr[6]; } u; __be16 proto; } addr; }; struct br_port_msg { __u8 family; __u32 ifindex; }; struct branch_entry { union { struct { u64 ip: 58; u64 ip_sign_ext: 5; u64 mispredict: 1; } split; u64 full; } from; union { struct { u64 ip: 58; u64 ip_sign_ext: 3; u64 reserved: 1; u64 spec: 1; u64 valid: 1; } split; u64 full; } to; }; struct brd_device { int brd_number; struct gendisk *brd_disk; struct list_head brd_list; struct xarray brd_pages; u64 brd_nr_pages; }; struct broadcast_sk { struct sock *sk; struct work_struct work; }; struct broken_edid { u8 manufacturer[4]; u32 model; u32 fix; }; struct fs_pin { wait_queue_head_t wait; int done; struct hlist_node s_list; struct hlist_node m_list; void (*kill)(struct fs_pin *); }; struct bsd_acct_struct { struct fs_pin pin; atomic_long_t count; struct callback_head rcu; struct mutex lock; int active; long unsigned int needcheck; struct file *file; struct pid_namespace *ns; struct work_struct work; struct completion done; }; struct bsd_partition { __le32 p_size; __le32 p_offset; __le32 p_fsize; __u8 p_fstype; __u8 p_frag; __le16 p_cpg; }; struct bsd_disklabel { __le32 d_magic; __s16 d_type; __s16 d_subtype; char d_typename[16]; char d_packname[16]; __u32 d_secsize; __u32 d_nsectors; __u32 d_ntracks; __u32 d_ncylinders; __u32 d_secpercyl; __u32 d_secperunit; __u16 d_sparespertrack; __u16 d_sparespercyl; __u32 d_acylinders; __u16 d_rpm; __u16 d_interleave; __u16 d_trackskew; __u16 d_cylskew; __u32 d_headswitch; __u32 d_trkseek; __u32 d_flags; __u32 d_drivedata[5]; __u32 d_spare[5]; __le32 d_magic2; __le16 d_checksum; __le16 d_npartitions; __le32 d_bbsize; __le32 d_sbsize; struct bsd_partition d_partitions[16]; }; struct bsg_buffer { unsigned int payload_len; int sg_cnt; struct scatterlist *sg_list; }; struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; }; struct sg_io_v4; typedef int bsg_sg_io_fn(struct request_queue *, struct sg_io_v4 *, bool, unsigned int); struct bsg_device { struct request_queue *queue; struct device device; struct cdev cdev; int max_queue; unsigned int timeout; unsigned int reserved_size; bsg_sg_io_fn *sg_io_fn; }; struct bsg_job { struct device *dev; struct kref kref; unsigned int timeout; void *request; void *reply; unsigned int request_len; unsigned int reply_len; struct bsg_buffer request_payload; struct bsg_buffer reply_payload; int result; unsigned int reply_payload_rcv_len; struct request *bidi_rq; struct bio *bidi_bio; void *dd_data; }; typedef int bsg_job_fn(struct bsg_job *); typedef enum blk_eh_timer_return bsg_timeout_fn(struct request *); struct bsg_set { struct blk_mq_tag_set tag_set; struct bsg_device *bd; bsg_job_fn *job_fn; bsg_timeout_fn *timeout_fn; }; typedef bool busy_tag_iter_fn(struct request *, void *); struct bt_iter_data { struct blk_mq_hw_ctx *hctx; struct request_queue *q; busy_tag_iter_fn *fn; void *data; bool reserved; }; struct bt_tags_iter_data { struct blk_mq_tags *tags; busy_tag_iter_fn *fn; void *data; unsigned int flags; }; struct btf_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; __u32 type_off; __u32 type_len; __u32 str_off; __u32 str_len; }; struct btf_kfunc_set_tab; struct btf_id_dtor_kfunc_tab; struct btf_struct_metas; struct btf_struct_ops_tab; struct btf { void *data; struct btf_type **types; u32 *resolved_ids; u32 *resolved_sizes; const char *strings; void *nohdr_data; struct btf_header hdr; u32 nr_types; u32 types_size; u32 data_size; refcount_t refcnt; u32 id; struct callback_head rcu; struct btf_kfunc_set_tab *kfunc_set_tab; struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; struct btf_struct_metas *struct_meta_tab; struct btf_struct_ops_tab *struct_ops_tab; struct btf *base_btf; u32 start_id; u32 start_str_off; char name[56]; bool kernel_btf; __u32 *base_id_map; }; struct btf_anon_stack { u32 tid; u32 offset; }; struct btf_array { __u32 type; __u32 index_type; __u32 nelems; }; struct btf_decl_tag { __s32 component_idx; }; struct btf_enum { __u32 name_off; __s32 val; }; struct btf_enum64 { __u32 name_off; __u32 val_lo32; __u32 val_hi32; }; typedef void (*btf_dtor_kfunc_t)(void *); struct btf_field_kptr { struct btf *btf; struct module *module; btf_dtor_kfunc_t dtor; u32 btf_id; }; struct btf_field_graph_root { struct btf *btf; u32 value_btf_id; u32 node_offset; struct btf_record *value_rec; }; struct btf_field { u32 offset; u32 size; enum btf_field_type type; union { struct btf_field_kptr kptr; struct btf_field_graph_root graph_root; }; }; struct btf_field_desc { int t_off_cnt; int t_offs[2]; int m_sz; int m_off_cnt; int m_offs[1]; }; struct btf_field_info { enum btf_field_type type; u32 off; union { struct { u32 type_id; } kptr; struct { const char *node_name; u32 value_btf_id; } graph_root; }; }; struct btf_field_iter { struct btf_field_desc desc; void *p; int m_idx; int off_idx; int vlen; }; struct btf_id_dtor_kfunc { u32 btf_id; u32 kfunc_btf_id; }; struct btf_id_dtor_kfunc_tab { u32 cnt; struct btf_id_dtor_kfunc dtors[0]; }; struct btf_id_set { u32 cnt; u32 ids[0]; }; struct btf_id_set8 { u32 cnt; u32 flags; struct { u32 id; u32 flags; } pairs[0]; }; typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *, u32); struct btf_kfunc_hook_filter { btf_kfunc_filter_t filters[16]; u32 nr_filters; }; struct btf_kfunc_id_set { struct module *owner; struct btf_id_set8 *set; btf_kfunc_filter_t filter; }; struct btf_kfunc_set_tab { struct btf_id_set8 *sets[14]; struct btf_kfunc_hook_filter hook_filters[14]; }; struct btf_verifier_env; struct resolve_vertex; struct btf_show; struct btf_kind_operations { s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); void (*log_details)(struct btf_verifier_env *, const struct btf_type *); void (*show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct btf_show *); }; struct btf_member { __u32 name_off; __u32 type; __u32 offset; }; struct btf_module { struct list_head list; struct module *module; struct btf *btf; struct bin_attribute *sysfs_attr; int flags; }; struct btf_name_info { const char *name; bool needs_size: 1; unsigned int size: 31; __u32 id; }; struct btf_param { __u32 name_off; __u32 type; }; struct btf_ptr { void *ptr; __u32 type_id; __u32 flags; }; struct btf_record { u32 cnt; u32 field_mask; int spin_lock_off; int timer_off; int wq_off; int refcount_off; struct btf_field fields[0]; }; struct btf_relocate { struct btf *btf; const struct btf *base_btf; const struct btf *dist_base_btf; unsigned int nr_base_types; unsigned int nr_split_types; unsigned int nr_dist_base_types; int dist_str_len; int base_str_len; __u32 *id_map; __u32 *str_map; }; struct btf_sec_info { u32 off; u32 len; }; struct btf_show { u64 flags; void *target; void (*showfn)(struct btf_show *, const char *, struct __va_list_tag *); const struct btf *btf; struct { u8 depth; u8 depth_to_show; u8 depth_check; u8 array_member: 1; u8 array_terminated: 1; u16 array_encoding; u32 type_id; int status; const struct btf_type *type; const struct btf_member *member; char name[80]; } state; struct { u32 size; void *head; void *data; u8 safe[32]; } obj; }; struct btf_show_snprintf { struct btf_show show; int len_left; int len; }; struct btf_struct_meta { u32 btf_id; struct btf_record *record; }; struct btf_struct_metas { u32 cnt; struct btf_struct_meta types[0]; }; struct btf_struct_ops_tab { u32 cnt; u32 capacity; struct bpf_struct_ops_desc ops[0]; }; struct btf_type { __u32 name_off; __u32 info; union { __u32 size; __u32 type; }; }; struct btf_var { __u32 linkage; }; struct btf_var_secinfo { __u32 type; __u32 offset; __u32 size; }; struct resolve_vertex { const struct btf_type *t; u32 type_id; u16 next_member; }; struct btf_verifier_env { struct btf *btf; u8 *visit_states; struct resolve_vertex stack[32]; struct bpf_verifier_log log; u32 log_type_id; u32 top_stack; enum verifier_phase phase; enum resolve_mode resolve_mode; }; struct bts_phys { struct page *page; long unsigned int size; long unsigned int offset; long unsigned int displacement; }; struct bts_buffer { size_t real_size; unsigned int nr_pages; unsigned int nr_bufs; unsigned int cur_buf; bool snapshot; local_t data_size; local_t head; long unsigned int end; void **data_pages; struct bts_phys buf[0]; }; struct perf_buffer; struct perf_output_handle { struct perf_event *event; struct perf_buffer *rb; long unsigned int wakeup; long unsigned int size; u64 aux_flags; union { void *addr; long unsigned int head; }; int page; }; struct debug_store { u64 bts_buffer_base; u64 bts_index; u64 bts_absolute_maximum; u64 bts_interrupt_threshold; u64 pebs_buffer_base; u64 pebs_index; u64 pebs_absolute_maximum; u64 pebs_interrupt_threshold; u64 pebs_event_reset[48]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bts_ctx { struct perf_output_handle handle; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct debug_store ds_back; int state; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct bts_record { u64 from; u64 to; u64 flags; }; struct hlist_nulls_head { struct hlist_nulls_node *first; }; struct bucket { struct hlist_nulls_head head; raw_spinlock_t raw_lock; }; struct rhash_lock_head; struct bucket_table { unsigned int size; unsigned int nest; u32 hash_rnd; struct list_head walkers; struct callback_head rcu; struct bucket_table *future_tbl; struct lockdep_map dep_map; long: 64; long: 64; long: 64; long: 64; struct rhash_lock_head *buckets[0]; }; struct buf_sel_arg { struct iovec *iovs; size_t out_len; size_t max_len; short unsigned int nr_iovs; short unsigned int mode; }; struct buffer_data_page { u64 time_stamp; local_t commit; unsigned char data[0]; }; struct buffer_data_read_page { unsigned int order; struct buffer_data_page *data; }; typedef void bh_end_io_t(struct buffer_head *, int); struct buffer_head { long unsigned int b_state; struct buffer_head *b_this_page; union { struct page *b_page; struct folio *b_folio; }; sector_t b_blocknr; size_t b_size; char *b_data; struct block_device *b_bdev; bh_end_io_t *b_end_io; void *b_private; struct list_head b_assoc_buffers; struct address_space *b_assoc_map; atomic_t b_count; spinlock_t b_uptodate_lock; }; struct buffer_page { struct list_head list; local_t write; unsigned int read; local_t entries; long unsigned int real_end; unsigned int order; u32 id: 30; u32 range: 1; struct buffer_data_page *page; }; struct buffer_ref { struct trace_buffer *buffer; void *page; int cpu; refcount_t refcount; }; struct bug_entry { int bug_addr_disp; int file_disp; short unsigned int line; short unsigned int flags; }; struct builtin_fw { char *name; void *data; long unsigned int size; }; struct group_data { int limit[21]; int base[20]; int permute[258]; int minLen; int maxLen; }; struct bunzip_data { int writeCopies; int writePos; int writeRunCountdown; int writeCount; int writeCurrent; long int (*fill)(void *, long unsigned int); long int inbufCount; long int inbufPos; unsigned char *inbuf; unsigned int inbufBitCount; unsigned int inbufBits; unsigned int crc32Table[256]; unsigned int headerCRC; unsigned int totalCRC; unsigned int writeCRC; unsigned int *dbuf; unsigned int dbufSize; unsigned char selectors[32768]; struct group_data groups[6]; int io_error; int byteCount[256]; unsigned char symToByte[256]; unsigned char mtfSymbol[256]; }; struct bus_attribute { struct attribute attr; ssize_t (*show)(const struct bus_type *, char *); ssize_t (*store)(const struct bus_type *, const char *, size_t); }; struct bus_dma_region { phys_addr_t cpu_start; dma_addr_t dma_start; u64 size; }; struct bus_type { const char *name; const char *dev_name; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, const struct device_driver *); int (*uevent)(const struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); void (*sync_state)(struct device *); void (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t); int (*resume)(struct device *); int (*num_vf)(struct device *); int (*dma_configure)(struct device *); void (*dma_cleanup)(struct device *); const struct dev_pm_ops *pm; bool need_parent_lock; }; struct bvec_iter_all { struct bio_vec bv; int idx; unsigned int done; }; struct byd_data { struct timer_list timer; struct psmouse *psmouse; s32 abs_x; s32 abs_y; volatile long unsigned int last_touch_time; bool btn_left; bool btn_right; bool touch; }; struct cache_map { u64 start; u64 end; u64 flags; u64 type: 8; u64 fixed: 1; }; struct cacheinfo { unsigned int id; enum cache_type type; unsigned int level; unsigned int coherency_line_size; unsigned int number_of_sets; unsigned int ways_of_associativity; unsigned int physical_line_partition; unsigned int size; cpumask_t shared_cpu_map; unsigned int attributes; void *fw_token; bool disable_sysfs; void *priv; }; struct cacheline_padding { char x[0]; }; struct cachestat { __u64 nr_cache; __u64 nr_dirty; __u64 nr_writeback; __u64 nr_evicted; __u64 nr_recently_evicted; }; struct cachestat_range { __u64 off; __u64 len; }; struct calipso_doi { u32 doi; u32 type; refcount_t refcount; struct list_head list; struct callback_head rcu; }; struct calipso_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct netlbl_lsm_cache; struct calipso_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; struct call_function_data { call_single_data_t *csd; cpumask_var_t cpumask; cpumask_var_t cpumask_ipi; }; struct callchain_cpus_entries { struct callback_head callback_head; struct perf_callchain_entry *cpu_entries[0]; }; struct callthunk_sites { s32 *call_start; s32 *call_end; struct alt_instr *alt_start; struct alt_instr *alt_end; }; struct compact_control; struct capture_control { struct compact_control *cc; struct page *page; }; struct cat_datum { u32 value; unsigned char isalias; }; struct clock_event_device; struct ce_unbind { struct clock_event_device *ce; int res; }; struct cea_exception_stacks { char DF_stack_guard[4096]; char DF_stack[8192]; char NMI_stack_guard[4096]; char NMI_stack[8192]; char DB_stack_guard[4096]; char DB_stack[8192]; char MCE_stack_guard[4096]; char MCE_stack[8192]; char VC_stack_guard[4096]; char VC_stack[8192]; char VC2_stack_guard[4096]; char VC2_stack[8192]; char IST_top_guard[4096]; }; struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; }; struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8]; __u8 prio_pg[8]; }; struct cfs_bandwidth { raw_spinlock_t lock; ktime_t period; u64 quota; u64 runtime; u64 burst; u64 runtime_snap; s64 hierarchical_quota; u8 idle; u8 period_active; u8 slack_started; struct hrtimer period_timer; struct hrtimer slack_timer; struct list_head throttled_cfs_rq; int nr_periods; int nr_throttled; int nr_burst; u64 throttled_time; u64 burst_time; }; struct load_weight { long unsigned int weight; u32 inv_weight; }; struct sched_avg { u64 last_update_time; u64 load_sum; u64 runnable_sum; u32 util_sum; u32 period_contrib; long unsigned int load_avg; long unsigned int runnable_avg; long unsigned int util_avg; unsigned int util_est; }; struct sched_entity; struct task_group; struct cfs_rq { struct load_weight load; unsigned int nr_running; unsigned int h_nr_running; unsigned int idle_nr_running; unsigned int idle_h_nr_running; s64 avg_vruntime; u64 avg_load; u64 min_vruntime; struct rb_root_cached tasks_timeline; struct sched_entity *curr; struct sched_entity *next; long: 64; long: 64; long: 64; long: 64; long: 64; struct sched_avg avg; struct { raw_spinlock_t lock; int nr; long unsigned int load_avg; long unsigned int util_avg; long unsigned int runnable_avg; long: 64; long: 64; long: 64; long: 64; } removed; u64 last_update_tg_load_avg; long unsigned int tg_load_avg_contrib; long int propagate; long int prop_runnable_sum; long unsigned int h_load; u64 last_h_load_update; struct sched_entity *h_load_next; struct rq *rq; int on_list; struct list_head leaf_cfs_rq_list; struct task_group *tg; int idle; int runtime_enabled; s64 runtime_remaining; u64 throttled_pelt_idle; u64 throttled_clock; u64 throttled_clock_pelt; u64 throttled_clock_pelt_time; u64 throttled_clock_self; u64 throttled_clock_self_time; int throttled; int throttle_count; struct list_head throttled_list; struct list_head throttled_csd_list; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct cfs_schedulable_data { struct task_group *tg; u64 period; u64 quota; }; struct kernfs_ops; struct kernfs_open_file; struct cftype { char name[64]; long unsigned int private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; int (*open)(struct kernfs_open_file *); void (*release)(struct kernfs_open_file *); u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); struct lock_class_key lockdep_key; }; struct cgroup_file { struct kernfs_node *kn; long unsigned int notified_at; struct timer_list notify_timer; }; struct task_cputime { u64 stime; u64 utime; long long unsigned int sum_exec_runtime; }; struct cgroup_base_stat { struct task_cputime cputime; }; struct prev_cputime { u64 utime; u64 stime; raw_spinlock_t lock; }; struct cgroup_bpf { struct bpf_prog_array *effective[38]; struct hlist_head progs[38]; u8 flags[38]; struct list_head storages; struct bpf_prog_array *inactive; struct percpu_ref refcnt; struct work_struct release_work; }; struct cgroup_freezer_state { bool freeze; int e_freeze; int nr_frozen_descendants; int nr_frozen_tasks; }; struct cgroup_root; struct cgroup_rstat_cpu; struct psi_group; struct cgroup { struct cgroup_subsys_state self; long unsigned int flags; int level; int max_depth; int nr_descendants; int nr_dying_descendants; int max_descendants; int nr_populated_csets; int nr_populated_domain_children; int nr_populated_threaded_children; int nr_threaded_children; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; struct cgroup_file psi_files[0]; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[10]; int nr_dying_subsys[10]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[10]; struct cgroup *dom_cgrp; struct cgroup *old_dom_cgrp; struct cgroup_rstat_cpu *rstat_cpu; struct list_head rstat_css_list; long: 64; struct cacheline_padding _pad_; struct cgroup *rstat_flush_next; struct cgroup_base_stat last_bstat; struct cgroup_base_stat bstat; struct prev_cputime prev_cputime; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; struct psi_group *psi; struct cgroup_bpf bpf; struct cgroup_freezer_state freezer; struct bpf_local_storage *bpf_cgrp_storage; struct cgroup *ancestors[0]; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct cgroup__safe_rcu { struct kernfs_node *kn; }; struct cgroup_cls_state { struct cgroup_subsys_state css; u32 classid; }; struct css_set; struct css_task_iter { struct cgroup_subsys *ss; unsigned int flags; struct list_head *cset_pos; struct list_head *cset_head; struct list_head *tcset_pos; struct list_head *tcset_head; struct list_head *task_pos; struct list_head *cur_tasks_head; struct css_set *cur_cset; struct css_set *cur_dcset; struct task_struct *cur_task; struct list_head iters_node; }; struct cgroup_of_peak { long unsigned int value; struct list_head list; }; struct cgroup_namespace; struct cgroup_pidlist; struct cgroup_file_ctx { struct cgroup_namespace *ns; struct { void *trigger; } psi; struct { bool started; struct css_task_iter iter; } procs; struct { struct cgroup_pidlist *pidlist; } procs1; struct cgroup_of_peak peak; }; struct kernfs_root; struct kernfs_fs_context { struct kernfs_root *root; void *ns_tag; long unsigned int magic; bool new_sb_created; }; struct cgroup_fs_context { struct kernfs_fs_context kfc; struct cgroup_root *root; struct cgroup_namespace *ns; unsigned int flags; bool cpuset_clone_children; bool none; bool all_ss; u16 subsys_mask; char *name; char *release_agent; }; struct cgroup_iter_priv { struct cgroup_subsys_state *start_css; bool visited_all; bool terminate; int order; }; struct cgroup_lsm_atype { u32 attach_btf_id; int refcnt; }; struct cgroup_taskset { struct list_head src_csets; struct list_head dst_csets; int nr_tasks; int ssid; struct list_head *csets; struct css_set *cur_cset; struct task_struct *cur_task; }; struct cgroup_mgctx { struct list_head preloaded_src_csets; struct list_head preloaded_dst_csets; struct cgroup_taskset tset; u16 ss_mask; }; struct proc_ns_operations; struct ns_common { struct dentry *stashed; const struct proc_ns_operations *ops; unsigned int inum; refcount_t count; }; struct ucounts; struct cgroup_namespace { struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; struct css_set *root_cset; }; struct cgroup_pidlist { struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; pid_t *list; int length; struct list_head links; struct cgroup *owner; struct delayed_work destroy_dwork; }; struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct list_head root_list; struct callback_head rcu; long: 64; long: 64; struct cgroup cgrp; struct cgroup *cgrp_ancestor_storage; atomic_t nr_cgrps; unsigned int flags; char release_agent_path[4096]; char name[64]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct cgroup_rstat_cpu { struct u64_stats_sync bsync; struct cgroup_base_stat bstat; struct cgroup_base_stat last_bstat; struct cgroup_base_stat subtree_bstat; struct cgroup_base_stat last_subtree_bstat; struct cgroup *updated_children; struct cgroup *updated_next; }; struct idr { struct xarray idr_rt; unsigned int idr_base; unsigned int idr_next; }; struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); void (*css_rstat_flush)(struct cgroup_subsys_state *, int); int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); int (*css_local_stat_show)(struct seq_file *, struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(void); int (*can_fork)(struct task_struct *, struct css_set *); void (*cancel_fork)(struct task_struct *, struct css_set *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*release)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init: 1; bool implicit_on_dfl: 1; bool threaded: 1; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; }; struct cgroupstats { __u64 nr_sleeping; __u64 nr_running; __u64 nr_stopped; __u64 nr_uninterruptible; __u64 nr_io_wait; }; struct cgrp_cset_link { struct cgroup *cgrp; struct css_set *cset; struct list_head cset_link; struct list_head cgrp_link; }; struct e820_entry; struct change_member { struct e820_entry *entry; long long unsigned int addr; }; struct ethnl_reply_data { struct net_device *dev; }; struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; }; struct channels_reply_data { struct ethnl_reply_data base; struct ethtool_channels channels; }; struct char_device_struct { struct char_device_struct *next; unsigned int major; unsigned int baseminor; int minorct; char name[64]; struct cdev *cdev; }; struct qdisc_walker { int stop; int skip; int count; int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *); }; struct check_loop_arg { struct qdisc_walker w; struct Qdisc *p; int depth; }; struct check_mount { struct vfsmount *mnt; unsigned int mounted; }; struct iolatency_grp; struct child_latency_info { spinlock_t lock; u64 last_scale_event; u64 scale_lat; u64 nr_samples; struct iolatency_grp *scale_grp; atomic_t scale_cookie; }; struct chipset { u32 vendor; u32 device; u32 class; u32 class_mask; u32 flags; void (*f)(int, int, int); }; struct chksum_ctx { u32 key; }; struct chksum_desc_ctx { u32 crc; }; struct chksum_desc_ctx___2 { __u16 crc; }; struct cipher_context { char iv[20]; char rec_seq[8]; }; struct cipso_v4_std_map_tbl; struct cipso_v4_doi { u32 doi; u32 type; union { struct cipso_v4_std_map_tbl *std; } map; u8 tags[5]; refcount_t refcount; struct list_head list; struct callback_head rcu; }; struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; struct cipso_v4_std_map_tbl { struct { u32 *cipso; u32 *local; u32 cipso_size; u32 local_size; } lvl; struct { u32 *cipso; u32 *local; u32 cipso_size; u32 local_size; } cat; }; struct lock_list; struct circular_queue { struct lock_list *element[4096]; unsigned int front; unsigned int rear; }; struct class_attribute_string { struct class_attribute attr; char *str; }; struct class_compat { struct kobject *kobj; }; struct hashtab_node; struct hashtab { struct hashtab_node **htable; u32 size; u32 nel; }; struct symtab { struct hashtab table; u32 nprim; }; struct common_datum; struct constraint_node; struct class_datum { u32 value; char *comkey; struct common_datum *comdatum; struct symtab permissions; struct constraint_node *constraints; struct constraint_node *validatetrans; char default_user; char default_role; char default_type; char default_range; }; struct klist_iter { struct klist *i_klist; struct klist_node *i_cur; }; struct subsys_private; struct class_dev_iter { struct klist_iter ki; const struct device_type *type; struct subsys_private *sp; }; struct class_dir { struct kobject kobj; const struct class *class; }; struct class_interface { struct list_head node; const struct class *class; int (*add_dev)(struct device *); void (*remove_dev)(struct device *); }; struct clear_refs_private { enum clear_refs_types type; }; struct clock_event_device { void (*event_handler)(struct clock_event_device *); int (*set_next_event)(long unsigned int, struct clock_event_device *); int (*set_next_ktime)(ktime_t, struct clock_event_device *); ktime_t next_event; u64 max_delta_ns; u64 min_delta_ns; u32 mult; u32 shift; enum clock_event_state state_use_accessors; unsigned int features; long unsigned int retries; int (*set_state_periodic)(struct clock_event_device *); int (*set_state_oneshot)(struct clock_event_device *); int (*set_state_oneshot_stopped)(struct clock_event_device *); int (*set_state_shutdown)(struct clock_event_device *); int (*tick_resume)(struct clock_event_device *); void (*broadcast)(const struct cpumask *); void (*suspend)(struct clock_event_device *); void (*resume)(struct clock_event_device *); long unsigned int min_delta_ticks; long unsigned int max_delta_ticks; const char *name; int rating; int irq; int bound_on; const struct cpumask *cpumask; struct list_head list; struct module *owner; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct clock_identity { u8 id[8]; }; struct clocksource_base; struct clocksource { u64 (*read)(struct clocksource *); u64 mask; u32 mult; u32 shift; u64 max_idle_ns; u32 maxadj; u32 uncertainty_margin; u64 max_cycles; const char *name; struct list_head list; u32 freq_khz; int rating; enum clocksource_ids id; enum vdso_clock_mode vdso_clock_mode; long unsigned int flags; struct clocksource_base *base; int (*enable)(struct clocksource *); void (*disable)(struct clocksource *); void (*suspend)(struct clocksource *); void (*resume)(struct clocksource *); void (*mark_unstable)(struct clocksource *); void (*tick_stable)(struct clocksource *); struct list_head wd_list; u64 cs_last; u64 wd_last; struct module *owner; }; struct clocksource_base { enum clocksource_ids id; u32 freq_khz; u64 offset; u32 numerator; u32 denominator; }; struct clone_args { __u64 flags; __u64 pidfd; __u64 child_tid; __u64 parent_tid; __u64 exit_signal; __u64 stack; __u64 stack_size; __u64 tls; __u64 set_tid; __u64 set_tid_size; __u64 cgroup; }; struct cls_bpf_head { struct list_head plist; struct idr handle_idr; struct callback_head rcu; }; struct tcf_proto; struct tcf_result { union { struct { long unsigned int class; u32 classid; }; const struct tcf_proto *goto_tp; }; }; struct tc_action; struct tcf_exts_miss_cookie_node; struct tcf_exts { __u32 type; int nr_actions; struct tc_action **actions; struct net *net; netns_tracker ns_tracker; struct tcf_exts_miss_cookie_node *miss_cookie_node; int action; int police; }; struct cls_bpf_prog { struct bpf_prog *filter; struct list_head link; struct tcf_result res; bool exts_integrated; u32 gen_flags; unsigned int in_hw_count; struct tcf_exts exts; u32 handle; u16 bpf_num_ops; struct sock_filter *bpf_ops; const char *bpf_name; struct tcf_proto *tp; struct rcu_work rwork; }; struct tcf_ematch_tree_hdr { __u16 nmatches; __u16 progid; }; struct tcf_ematch; struct tcf_ematch_tree { struct tcf_ematch_tree_hdr hdr; struct tcf_ematch *matches; }; struct cls_cgroup_head { u32 handle; struct tcf_exts exts; struct tcf_ematch_tree ematches; struct tcf_proto *tp; struct rcu_work rwork; }; struct flow_dissector_key_meta { int ingress_ifindex; u16 ingress_iftype; u8 l2_miss; }; struct flow_dissector_key_control { u16 thoff; u16 addr_type; u32 flags; }; struct flow_dissector_key_basic { __be16 n_proto; u8 ip_proto; u8 padding; }; struct flow_dissector_key_eth_addrs { unsigned char dst[6]; unsigned char src[6]; }; struct flow_dissector_key_vlan { union { struct { u16 vlan_id: 12; u16 vlan_dei: 1; u16 vlan_priority: 3; }; __be16 vlan_tci; }; __be16 vlan_tpid; __be16 vlan_eth_type; u16 padding; }; struct flow_dissector_key_ipv4_addrs { __be32 src; __be32 dst; }; struct flow_dissector_key_ipv6_addrs { struct in6_addr src; struct in6_addr dst; }; struct flow_dissector_key_ports { union { __be32 ports; struct { __be16 src; __be16 dst; }; }; }; struct flow_dissector_key_icmp { struct { u8 type; u8 code; }; u16 id; }; struct flow_dissector_key_arp { __u32 sip; __u32 tip; __u8 op; unsigned char sha[6]; unsigned char tha[6]; }; struct flow_dissector_key_keyid { __be32 keyid; }; struct flow_dissector_mpls_lse { u32 mpls_ttl: 8; u32 mpls_bos: 1; u32 mpls_tc: 3; u32 mpls_label: 20; }; struct flow_dissector_key_mpls { struct flow_dissector_mpls_lse ls[7]; u8 used_lses; }; struct flow_dissector_key_tcp { __be16 flags; }; struct flow_dissector_key_ip { __u8 tos; __u8 ttl; }; struct flow_dissector_key_enc_opts { u8 data[255]; u8 len; u32 dst_opt_type; }; struct flow_dissector_key_ports_range { union { struct flow_dissector_key_ports tp; struct { struct flow_dissector_key_ports tp_min; struct flow_dissector_key_ports tp_max; }; }; }; struct flow_dissector_key_ct { u16 ct_state; u16 ct_zone; u32 ct_mark; u32 ct_labels[4]; }; struct flow_dissector_key_hash { u32 hash; }; struct flow_dissector_key_num_of_vlans { u8 num_of_vlans; }; struct flow_dissector_key_pppoe { __be16 session_id; __be16 ppp_proto; __be16 type; }; struct flow_dissector_key_l2tpv3 { __be32 session_id; }; struct flow_dissector_key_ipsec { __be32 spi; }; struct flow_dissector_key_cfm { u8 mdl_ver; u8 opcode; }; struct fl_flow_key { struct flow_dissector_key_meta meta; struct flow_dissector_key_control control; struct flow_dissector_key_control enc_control; struct flow_dissector_key_basic basic; struct flow_dissector_key_eth_addrs eth; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_vlan cvlan; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_ports tp; struct flow_dissector_key_icmp icmp; struct flow_dissector_key_arp arp; struct flow_dissector_key_keyid enc_key_id; union { struct flow_dissector_key_ipv4_addrs enc_ipv4; struct flow_dissector_key_ipv6_addrs enc_ipv6; }; struct flow_dissector_key_ports enc_tp; struct flow_dissector_key_mpls mpls; struct flow_dissector_key_tcp tcp; struct flow_dissector_key_ip ip; struct flow_dissector_key_ip enc_ip; struct flow_dissector_key_enc_opts enc_opts; struct flow_dissector_key_ports_range tp_range; struct flow_dissector_key_ct ct; struct flow_dissector_key_hash hash; struct flow_dissector_key_num_of_vlans num_of_vlans; struct flow_dissector_key_pppoe pppoe; struct flow_dissector_key_l2tpv3 l2tpv3; struct flow_dissector_key_ipsec ipsec; struct flow_dissector_key_cfm cfm; long: 0; }; struct fl_flow_mask; struct cls_fl_filter { struct fl_flow_mask *mask; struct rhash_head ht_node; struct fl_flow_key mkey; struct tcf_exts exts; struct tcf_result res; struct fl_flow_key key; struct list_head list; struct list_head hw_list; u32 handle; u32 flags; u32 in_hw_count; u8 needs_tc_skb_ext: 1; struct rcu_work rwork; struct net_device *hw_dev; refcount_t refcnt; bool deleted; }; typedef u32 (*rht_hashfn_t)(const void *, u32, u32); typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); struct rhashtable_compare_arg; typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); struct rhashtable_params { u16 nelem_hint; u16 key_len; u16 key_offset; u16 head_offset; unsigned int max_size; u16 min_size; bool automatic_shrinking; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; rht_obj_cmpfn_t obj_cmpfn; }; struct rhashtable { struct bucket_table *tbl; unsigned int key_len; unsigned int max_elems; struct rhashtable_params p; bool rhlist; struct work_struct run_work; struct mutex mutex; spinlock_t lock; atomic_t nelems; }; struct cls_fl_head { struct rhashtable ht; spinlock_t masks_lock; struct list_head masks; struct list_head hw_filters; struct rcu_work rwork; struct idr handle_idr; }; struct tc_matchall_pcnt; struct cls_mall_head { struct tcf_exts exts; struct tcf_result res; u32 handle; u32 flags; unsigned int in_hw_count; struct tc_matchall_pcnt *pf; struct rcu_work rwork; bool deleting; }; typedef void tcf_chain_head_change_t(struct tcf_proto *, void *); struct tcf_block_ext_info { enum flow_block_binder_type binder_type; tcf_chain_head_change_t *chain_head_change; void *chain_head_change_priv; u32 block_index; }; struct mini_Qdisc { struct tcf_proto *filter_list; struct tcf_block *block; struct gnet_stats_basic_sync *cpu_bstats; struct gnet_stats_queue *cpu_qstats; long unsigned int rcu_state; }; struct mini_Qdisc_pair { struct mini_Qdisc miniq1; struct mini_Qdisc miniq2; struct mini_Qdisc **p_miniq; }; struct clsact_sched_data { struct tcf_block *ingress_block; struct tcf_block *egress_block; struct tcf_block_ext_info ingress_block_info; struct tcf_block_ext_info egress_block_info; struct mini_Qdisc_pair miniqp_ingress; struct mini_Qdisc_pair miniqp_egress; }; struct cma { long unsigned int base_pfn; long unsigned int count; long unsigned int *bitmap; unsigned int order_per_bit; spinlock_t lock; char name[64]; bool reserve_pages_on_error; }; struct cmis_cdb_advert_rpl { u8 inst_supported; u8 read_write_len_ext; u8 resv1; u8 resv2; }; struct cmis_cdb_fw_mng_features_rpl { u8 resv1; u8 resv2; u8 start_cmd_payload_size; u8 resv3; u8 read_write_len_ext; u8 write_mechanism; u8 resv4; u8 resv5; __be16 max_duration_start; __be16 resv6; __be16 max_duration_write; __be16 max_duration_complete; __be16 resv7; }; struct cmis_cdb_module_features_rpl { u8 resv1[34]; __be16 max_completion_time; }; struct cmis_cdb_query_status_pl { u16 response_delay; }; struct cmis_cdb_query_status_rpl { u8 length; u8 status; }; struct cmis_cdb_run_fw_image_pl { u8 resv1; u8 image_to_run; u16 delay_to_reset; }; struct cmis_cdb_start_fw_download_pl_h { __be32 image_size; __be32 resv1; }; struct cmis_cdb_start_fw_download_pl { union { struct { __be32 image_size; __be32 resv1; }; struct cmis_cdb_start_fw_download_pl_h head; }; u8 vendor_data[112]; }; struct cmis_cdb_write_fw_block_lpl_pl { __be32 block_address; u8 fw_block[116]; }; struct cmis_fw_update_fw_mng_features { u8 start_cmd_payload_size; u16 max_duration_start; u16 max_duration_write; u16 max_duration_complete; }; struct cmis_password_entry_pl { __be32 password; }; struct cmis_rev_rpl { u8 rev; }; struct cmis_wait_for_cond_rpl { u8 state; }; struct cmsghdr { __kernel_size_t cmsg_len; int cmsg_level; int cmsg_type; }; struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; }; struct kernel_ethtool_coalesce { u8 use_cqe_mode_tx; u8 use_cqe_mode_rx; u32 tx_aggr_max_bytes; u32 tx_aggr_max_frames; u32 tx_aggr_time_usecs; }; struct coalesce_reply_data { struct ethnl_reply_data base; struct ethtool_coalesce coalesce; struct kernel_ethtool_coalesce kernel_coalesce; u32 supported_params; }; struct codel_params { codel_time_t target; codel_time_t ce_threshold; codel_time_t interval; u32 mtu; bool ecn; u8 ce_threshold_selector; u8 ce_threshold_mask; }; struct codel_skb_cb { codel_time_t enqueue_time; unsigned int mem_usage; }; struct codel_stats { u32 maxpacket; u32 drop_count; u32 drop_len; u32 ecn_mark; u32 ce_mark; }; struct codel_vars { u32 count; u32 lastcount; bool dropping; u16 rec_inv_sqrt; codel_time_t first_above_time; codel_time_t drop_next; codel_time_t ldelay; }; struct collapse_control { bool is_khugepaged; u32 node_load[64]; nodemask_t alloc_nmask; }; struct commit_header { __be32 h_magic; __be32 h_blocktype; __be32 h_sequence; unsigned char h_chksum_type; unsigned char h_chksum_size; unsigned char h_padding[2]; __be32 h_chksum[8]; __be64 h_commit_sec; __be32 h_commit_nsec; }; struct lsm_network_audit; struct lsm_ioctlop_audit; struct lsm_ibpkey_audit; struct lsm_ibendport_audit; struct selinux_audit_data; struct common_audit_data { char type; union { struct path path; struct dentry *dentry; struct inode *inode; struct lsm_network_audit *net; int cap; int ipc_id; struct task_struct *tsk; struct { key_serial_t key; char *key_desc; } key_struct; char *kmod_name; struct lsm_ioctlop_audit *op; struct file *file; struct lsm_ibpkey_audit *ibpkey; struct lsm_ibendport_audit *ibendport; int reason; const char *anonclass; } u; union { struct selinux_audit_data *selinux_audit_data; }; }; struct common_datum { u32 value; struct symtab permissions; }; struct zone; struct compact_control { struct list_head freepages[11]; struct list_head migratepages; unsigned int nr_freepages; unsigned int nr_migratepages; long unsigned int free_pfn; long unsigned int migrate_pfn; long unsigned int fast_start_pfn; struct zone *zone; long unsigned int total_migrate_scanned; long unsigned int total_free_scanned; short unsigned int fast_search_fail; short int search_order; const gfp_t gfp_mask; int order; int migratetype; const unsigned int alloc_flags; const int highest_zoneidx; enum migrate_mode mode; bool ignore_skip_hint; bool no_set_skip_hint; bool ignore_block_suitable; bool direct_compaction; bool proactive_compaction; bool whole_zone; bool contended; bool finish_pageblock; bool alloc_contig; }; struct compat_group_filter { union { struct { __u32 gf_interface_aux; struct __kernel_sockaddr_storage gf_group_aux; __u32 gf_fmode_aux; __u32 gf_numsrc_aux; struct __kernel_sockaddr_storage gf_slist[1]; } __attribute__((packed)); struct { __u32 gf_interface; struct __kernel_sockaddr_storage gf_group; __u32 gf_fmode; __u32 gf_numsrc; struct __kernel_sockaddr_storage gf_slist_flex[0]; } __attribute__((packed)); }; }; struct compat_group_req { __u32 gr_interface; struct __kernel_sockaddr_storage gr_group; } __attribute__((packed)); struct compat_group_source_req { __u32 gsr_interface; struct __kernel_sockaddr_storage gsr_group; struct __kernel_sockaddr_storage gsr_source; } __attribute__((packed)); struct compat_if_settings { unsigned int type; unsigned int size; compat_uptr_t ifs_ifsu; }; struct compat_ifconf { compat_int_t ifc_len; compat_caddr_t ifcbuf; }; struct compat_ifmap { compat_ulong_t mem_start; compat_ulong_t mem_end; short unsigned int base_addr; unsigned char irq; unsigned char dma; unsigned char port; }; struct compat_ifreq { union { char ifrn_name[16]; } ifr_ifrn; union { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short int ifru_flags; compat_int_t ifru_ivalue; compat_int_t ifru_mtu; struct compat_ifmap ifru_map; char ifru_slave[16]; char ifru_newname[16]; compat_caddr_t ifru_data; struct compat_if_settings ifru_settings; } ifr_ifru; }; struct compat_iovec { compat_uptr_t iov_base; compat_size_t iov_len; }; struct compat_msghdr { compat_uptr_t msg_name; compat_int_t msg_namelen; compat_uptr_t msg_iov; compat_size_t msg_iovlen; compat_uptr_t msg_control; compat_size_t msg_controllen; compat_uint_t msg_flags; }; struct compat_mmsghdr { struct compat_msghdr msg_hdr; compat_uint_t msg_len; }; struct compat_sock_fprog { u16 len; compat_uptr_t filter; }; struct component_ops; struct component { struct list_head node; struct aggregate_device *adev; bool bound; const struct component_ops *ops; int subcomponent; struct device *dev; }; struct component_master_ops { int (*bind)(struct device *); void (*unbind)(struct device *); }; struct component_match_array; struct component_match { size_t alloc; size_t num; struct component_match_array *compare; }; struct component_match_array { void *data; int (*compare)(struct device *, void *); int (*compare_typed)(struct device *, int, void *); void (*release)(struct device *, void *); struct component *component; bool duplicate; }; struct component_ops { int (*bind)(struct device *, struct device *, void *); void (*unbind)(struct device *, struct device *, void *); }; typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *)); struct compress_format { unsigned char magic[2]; const char *name; decompress_fn decompressor; }; struct consw; struct con_driver { const struct consw *con; const char *desc; struct device *dev; int node; int first; int last; int flag; }; struct cond_av_list { struct avtab_node **nodes; u32 len; }; struct cond_bool_datum { __u32 value; int state; }; struct cond_expr_node; struct cond_expr { struct cond_expr_node *nodes; u32 len; }; struct cond_expr_node { u32 expr_type; u32 boolean; }; struct policydb; struct cond_insertf_data { struct policydb *p; struct avtab_node **dst; struct cond_av_list *other; }; struct cond_node { int cur_state; struct cond_expr expr; struct cond_av_list true_list; struct cond_av_list false_list; }; struct conntrack_gc_work { struct delayed_work dwork; u32 next_bucket; u32 avg_timeout; u32 count; u32 start_time; bool exiting; bool early_drop; }; struct console; struct printk_buffers; struct nbcon_context { struct console *console; unsigned int spinwait_max_us; enum nbcon_prio prio; unsigned int allow_unsafe_takeover: 1; unsigned int backlog: 1; struct printk_buffers *pbufs; u64 seq; }; struct tty_driver; struct nbcon_write_context; struct console { char name[16]; void (*write)(struct console *, const char *, unsigned int); int (*read)(struct console *, char *, unsigned int); struct tty_driver * (*device)(struct console *, int *); void (*unblank)(void); int (*setup)(struct console *, char *); int (*exit)(struct console *); int (*match)(struct console *, char *, int, char *); short int flags; short int index; int cflag; uint ispeed; uint ospeed; u64 seq; long unsigned int dropped; void *data; struct hlist_node node; void (*write_atomic)(struct console *, struct nbcon_write_context *); void (*write_thread)(struct console *, struct nbcon_write_context *); void (*device_lock)(struct console *, long unsigned int *); void (*device_unlock)(struct console *, long unsigned int); atomic_t nbcon_state; atomic_long_t nbcon_seq; struct nbcon_context nbcon_device_ctxt; atomic_long_t nbcon_prev_seq; struct printk_buffers *pbufs; struct task_struct *kthread; struct rcuwait rcuwait; struct irq_work irq_work; }; struct console_cmdline { char name[16]; int index; char devname[32]; bool user_specified; char *options; }; struct console_flush_type { bool nbcon_atomic; bool nbcon_offload; bool legacy_direct; bool legacy_offload; }; struct console_font { unsigned int width; unsigned int height; unsigned int charcount; unsigned char *data; }; struct console_font_op { unsigned int op; unsigned int flags; unsigned int width; unsigned int height; unsigned int charcount; unsigned char *data; }; struct constant_table { const char *name; int value; }; struct ebitmap_node; struct ebitmap { struct ebitmap_node *node; u32 highbit; }; struct type_set; struct constraint_expr { u32 expr_type; u32 attr; u32 op; struct ebitmap names; struct type_set *type_names; struct constraint_expr *next; }; struct constraint_node { u32 permissions; struct constraint_expr *expr; struct constraint_node *next; }; struct vc_data; struct consw { struct module *owner; const char * (*con_startup)(void); void (*con_init)(struct vc_data *, bool); void (*con_deinit)(struct vc_data *); void (*con_clear)(struct vc_data *, unsigned int, unsigned int, unsigned int); void (*con_putc)(struct vc_data *, u16, unsigned int, unsigned int); void (*con_putcs)(struct vc_data *, const u16 *, unsigned int, unsigned int, unsigned int); void (*con_cursor)(struct vc_data *, bool); bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int); bool (*con_switch)(struct vc_data *); bool (*con_blank)(struct vc_data *, enum vesa_blank_mode, bool); int (*con_font_set)(struct vc_data *, const struct console_font *, unsigned int, unsigned int); int (*con_font_get)(struct vc_data *, struct console_font *, unsigned int); int (*con_font_default)(struct vc_data *, struct console_font *, const char *); int (*con_resize)(struct vc_data *, unsigned int, unsigned int, bool); void (*con_set_palette)(struct vc_data *, const unsigned char *); void (*con_scrolldelta)(struct vc_data *, int); bool (*con_set_origin)(struct vc_data *); void (*con_save_screen)(struct vc_data *); u8 (*con_build_attr)(struct vc_data *, u8, enum vc_intensity, bool, bool, bool, bool); void (*con_invert_region)(struct vc_data *, u16 *, int); void (*con_debug_enter)(struct vc_data *); void (*con_debug_leave)(struct vc_data *); }; struct microcode_amd; struct cont_desc { struct microcode_amd *mc; u32 psize; u8 *data; size_t size; }; struct container_dev { struct device dev; int (*offline)(struct container_dev *); }; struct mls_level { u32 sens; struct ebitmap cat; }; struct mls_range { struct mls_level level[2]; }; struct context___2 { u32 user; u32 role; u32 type; u32 len; struct mls_range range; char *str; }; struct context_tracking { atomic_t state; long int nesting; long int nmi_nesting; }; struct contig_page_info { long unsigned int free_pages; long unsigned int free_blocks_total; long unsigned int free_blocks_suitable; }; struct convert_context_args { struct policydb *oldp; struct policydb *newp; }; struct cooling_spec { long unsigned int upper; long unsigned int lower; unsigned int weight; }; struct copy_subpage_arg { struct folio *dst; struct folio *src; struct vm_area_struct *vma; }; struct core_name { char *corename; int used; int size; }; struct core_thread { struct task_struct *task; struct core_thread *next; }; struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; }; struct core_text { long unsigned int base; long unsigned int end; const char *name; }; struct core_vma_metadata { long unsigned int start; long unsigned int end; long unsigned int flags; long unsigned int dump_size; long unsigned int pgoff; struct file *file; }; struct kernel_siginfo; typedef struct kernel_siginfo kernel_siginfo_t; struct coredump_params { const kernel_siginfo_t *siginfo; struct file *file; long unsigned int limit; long unsigned int mm_flags; int cpu; loff_t written; loff_t pos; loff_t to_skip; int vma_count; size_t vma_data_size; struct core_vma_metadata *vma_meta; }; struct pgprot { pgprotval_t pgprot; }; typedef struct pgprot pgprot_t; struct cpa_data { long unsigned int *vaddr; pgd_t *pgd; pgprot_t mask_set; pgprot_t mask_clr; long unsigned int numpages; long unsigned int curpage; long unsigned int pfn; unsigned int flags; unsigned int force_split: 1; unsigned int force_static_prot: 1; unsigned int force_flush_all: 1; struct page **pages; }; struct cpc_reg { u8 descriptor; u16 length; u8 space_id; u8 bit_width; u8 bit_offset; u8 access_width; u64 address; } __attribute__((packed)); struct cpc_register_resource { acpi_object_type type; u64 *sys_mem_vaddr; union { struct cpc_reg reg; u64 int_value; } cpc_entry; }; struct cpc_desc { int num_entries; int version; int cpu_id; int write_cmd_status; int write_cmd_id; spinlock_t rmw_lock; struct cpc_register_resource cpc_regs[21]; struct acpi_psd_package domain_info; struct kobject kobj; }; struct cper_sec_proc_arm { u32 validation_bits; u16 err_info_num; u16 context_info_num; u32 section_length; u8 affinity_level; u8 reserved[3]; u64 mpidr; u64 midr; u32 running_state; u32 psci_state; }; struct cpio_data { void *data; size_t size; char name[18]; }; struct cppc_perf_caps { u32 guaranteed_perf; u32 highest_perf; u32 nominal_perf; u32 lowest_perf; u32 lowest_nonlinear_perf; u32 lowest_freq; u32 nominal_freq; u32 energy_perf; bool auto_sel; }; struct cppc_perf_ctrls { u32 max_perf; u32 min_perf; u32 desired_perf; u32 energy_perf; }; struct cppc_perf_fb_ctrs { u64 reference; u64 delivered; u64 reference_perf; u64 wraparound_time; }; struct cppc_cpudata { struct list_head node; struct cppc_perf_caps perf_caps; struct cppc_perf_ctrls perf_ctrls; struct cppc_perf_fb_ctrs perf_fb_ctrs; unsigned int shared_type; cpumask_var_t shared_cpu_map; }; struct pcc_mbox_chan; struct cppc_pcc_data { struct pcc_mbox_chan *pcc_channel; void *pcc_comm_addr; bool pcc_channel_acquired; unsigned int deadline_us; unsigned int pcc_mpar; unsigned int pcc_mrtt; unsigned int pcc_nominal; bool pending_pcc_write_cmd; bool platform_owns_pcc; unsigned int pcc_write_cnt; struct rw_semaphore pcc_lock; wait_queue_head_t pcc_write_wait_q; ktime_t last_cmd_cmpl_time; ktime_t last_mpar_reset; int mpar_count; int refcount; }; struct cpu { int node_id; int hotpluggable; struct device dev; }; struct cpu_attr { struct device_attribute attr; const struct cpumask * const map; }; struct cpu_cacheinfo { struct cacheinfo *info_list; unsigned int per_cpu_data_slice_size; unsigned int num_levels; unsigned int num_leaves; bool cpu_map_populated; bool early_ci_levels; }; struct update_util_data { void (*func)(struct update_util_data *, u64, unsigned int); }; struct policy_dbs_info; struct cpu_dbs_info { u64 prev_cpu_idle; u64 prev_update_time; u64 prev_cpu_nice; unsigned int prev_load; struct update_util_data update_util; struct policy_dbs_info *policy_dbs; }; struct cpuinfo_x86; struct cpu_dev { const char *c_vendor; const char *c_ident[2]; void (*c_early_init)(struct cpuinfo_x86 *); void (*c_bsp_init)(struct cpuinfo_x86 *); void (*c_init)(struct cpuinfo_x86 *); void (*c_identify)(struct cpuinfo_x86 *); void (*c_detect_tlb)(struct cpuinfo_x86 *); int c_x86_vendor; }; struct cpu_down_work { unsigned int cpu; enum cpuhp_state target; }; struct entry_stack { char stack[4096]; }; struct entry_stack_page { struct entry_stack stack; }; struct x86_hw_tss { u32 reserved1; u64 sp0; u64 sp1; u64 sp2; u64 reserved2; u64 ist[7]; u32 reserved3; u32 reserved4; u16 reserved5; u16 io_bitmap_base; } __attribute__((packed)); struct x86_io_bitmap { u64 prev_sequence; unsigned int prev_max; long unsigned int bitmap[1025]; long unsigned int mapall[1025]; }; struct tss_struct { struct x86_hw_tss x86_tss; struct x86_io_bitmap io_bitmap; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct debug_store_buffers { char bts_buffer[65536]; char pebs_buffer[65536]; }; struct cpu_entry_area { char gdt[4096]; struct entry_stack_page entry_stack_page; struct tss_struct tss; struct cea_exception_stacks estacks; struct debug_store cpu_debug_store; struct debug_store_buffers cpu_debug_buffers; }; struct folio_batch { unsigned char nr; unsigned char i; bool percpu_pvec_drained; struct folio *folios[31]; }; struct cpu_fbatches { local_lock_t lock; struct folio_batch lru_add; struct folio_batch lru_deactivate_file; struct folio_batch lru_deactivate; struct folio_batch lru_lazyfree; struct folio_batch lru_activate; local_lock_t lock_irq; struct folio_batch lru_move_tail; }; struct perf_branch_entry { __u64 from; __u64 to; __u64 mispred: 1; __u64 predicted: 1; __u64 in_tx: 1; __u64 abort: 1; __u64 cycles: 16; __u64 type: 4; __u64 spec: 2; __u64 new_type: 4; __u64 priv: 3; __u64 reserved: 31; }; struct perf_branch_stack { __u64 nr; __u64 hw_idx; struct perf_branch_entry entries[0]; }; struct perf_guest_switch_msr { unsigned int msr; u64 host; u64 guest; }; struct er_account; struct intel_shared_regs; struct intel_excl_cntrs; struct cpu_hw_events { struct perf_event *events[64]; long unsigned int active_mask[1]; long unsigned int dirty[1]; int enabled; int n_events; int n_added; int n_txn; int n_txn_pair; int n_txn_metric; int assign[64]; u64 tags[64]; struct perf_event *event_list[64]; struct event_constraint *event_constraint[64]; int n_excl; unsigned int txn_flags; int is_fake; struct debug_store *ds; void *ds_pebs_vaddr; void *ds_bts_vaddr; u64 pebs_enabled; int n_pebs; int n_large_pebs; int n_pebs_via_pt; int pebs_output; u64 pebs_data_cfg; u64 active_pebs_data_cfg; int pebs_record_size; u64 fixed_ctrl_val; u64 active_fixed_ctrl_val; int lbr_users; int lbr_pebs_users; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[32]; u64 lbr_counters[32]; union { struct er_account *lbr_sel; struct er_account *lbr_ctl; }; u64 br_sel; void *last_task_ctx; int last_log_id; int lbr_select; void *lbr_xsave; u64 intel_ctrl_guest_mask; u64 intel_ctrl_host_mask; struct perf_guest_switch_msr guest_switch_msrs[64]; u64 intel_cp_status; struct intel_shared_regs *shared_regs; struct event_constraint *constraint_list; struct intel_excl_cntrs *excl_cntrs; int excl_thread_id; u64 tfa_shadow; int n_metric; struct amd_nb *amd_nb; int brs_active; u64 perf_ctr_virt_mask; int n_pair; void *kfree_on_online[2]; struct pmu *pmu; }; struct cpu_itimer { u64 expires; u64 incr; }; struct cpu_perf_ibs { struct perf_event *event; long unsigned int state[1]; }; struct cpu_rmap { struct kref refcount; u16 size; void **obj; struct { u16 index; u16 dist; } near[0]; }; struct cpu_signature { unsigned int sig; unsigned int pf; unsigned int rev; }; struct cpu_stop_done { atomic_t nr_todo; int ret; struct completion completion; }; typedef int (*cpu_stop_fn_t)(void *); struct cpu_stop_work { struct list_head list; cpu_stop_fn_t fn; long unsigned int caller; void *arg; struct cpu_stop_done *done; }; struct cpu_stopper { struct task_struct *thread; raw_spinlock_t lock; bool enabled; struct list_head works; struct cpu_stop_work stop_work; long unsigned int caller; cpu_stop_fn_t fn; }; struct cpu_timer { struct timerqueue_node node; struct timerqueue_head *head; struct pid *pid; struct list_head elist; int firing; struct task_struct *handling; }; struct cpu_vfs_cap_data { __u32 magic_etc; kuid_t rootid; kernel_cap_t permitted; kernel_cap_t inheritable; }; struct kernel_cpustat; struct cpuacct { struct cgroup_subsys_state css; u64 *cpuusage; struct kernel_cpustat *cpustat; }; struct pstate_data { int current_pstate; int min_pstate; int max_pstate; int max_pstate_physical; int perf_ctl_scaling; int scaling; int turbo_pstate; unsigned int min_freq; unsigned int max_freq; unsigned int turbo_freq; }; struct vid_data { int min; int max; int turbo; int32_t ratio; }; struct sample { int32_t core_avg_perf; int32_t busy_scaled; u64 aperf; u64 mperf; u64 tsc; u64 time; }; struct cpudata { int cpu; unsigned int policy; struct update_util_data update_util; bool update_util_set; struct pstate_data pstate; struct vid_data vid; u64 last_update; u64 last_sample_time; u64 aperf_mperf_shift; u64 prev_aperf; u64 prev_mperf; u64 prev_tsc; struct sample sample; int32_t min_perf_ratio; int32_t max_perf_ratio; struct acpi_processor_performance acpi_perf_data; bool valid_pss_table; unsigned int iowait_boost; s16 epp_powersave; s16 epp_policy; s16 epp_default; s16 epp_cached; u64 hwp_req_cached; u64 hwp_cap_cached; u64 last_io_update; unsigned int capacity_perf; unsigned int sched_flags; u32 hwp_boost_min; bool suspended; struct delayed_work hwp_notify_work; }; struct cpudl_item; struct cpudl { raw_spinlock_t lock; int size; cpumask_var_t free_cpus; struct cpudl_item *elements; }; struct cpudl_item { u64 dl; int cpu; int idx; }; struct cpufreq_cpuinfo { unsigned int max_freq; unsigned int min_freq; unsigned int transition_latency; }; struct cpufreq_policy; struct cpufreq_policy_data; struct freq_attr; struct cpufreq_driver { char name[16]; u16 flags; void *driver_data; int (*init)(struct cpufreq_policy *); int (*verify)(struct cpufreq_policy_data *); int (*setpolicy)(struct cpufreq_policy *); int (*target)(struct cpufreq_policy *, unsigned int, unsigned int); int (*target_index)(struct cpufreq_policy *, unsigned int); unsigned int (*fast_switch)(struct cpufreq_policy *, unsigned int); void (*adjust_perf)(unsigned int, long unsigned int, long unsigned int, long unsigned int); unsigned int (*get_intermediate)(struct cpufreq_policy *, unsigned int); int (*target_intermediate)(struct cpufreq_policy *, unsigned int); unsigned int (*get)(unsigned int); void (*update_limits)(unsigned int); int (*bios_limit)(int, unsigned int *); int (*online)(struct cpufreq_policy *); int (*offline)(struct cpufreq_policy *); void (*exit)(struct cpufreq_policy *); int (*suspend)(struct cpufreq_policy *); int (*resume)(struct cpufreq_policy *); void (*ready)(struct cpufreq_policy *); struct freq_attr **attr; bool boost_enabled; int (*set_boost)(struct cpufreq_policy *, int); void (*register_em)(struct cpufreq_policy *); }; struct cpufreq_freqs { struct cpufreq_policy *policy; unsigned int old; unsigned int new; u8 flags; }; struct cpufreq_frequency_table { unsigned int flags; unsigned int driver_data; unsigned int frequency; }; struct cpufreq_governor { char name[16]; int (*init)(struct cpufreq_policy *); void (*exit)(struct cpufreq_policy *); int (*start)(struct cpufreq_policy *); void (*stop)(struct cpufreq_policy *); void (*limits)(struct cpufreq_policy *); ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); int (*store_setspeed)(struct cpufreq_policy *, unsigned int); struct list_head governor_list; struct module *owner; u8 flags; }; struct plist_head { struct list_head node_list; }; struct pm_qos_constraints { struct plist_head list; s32 target_value; s32 default_value; s32 no_constraint_value; enum pm_qos_type type; struct blocking_notifier_head *notifiers; }; struct freq_constraints { struct pm_qos_constraints min_freq; struct blocking_notifier_head min_freq_notifiers; struct pm_qos_constraints max_freq; struct blocking_notifier_head max_freq_notifiers; }; struct cpufreq_stats; struct cpufreq_policy { cpumask_var_t cpus; cpumask_var_t related_cpus; cpumask_var_t real_cpus; unsigned int shared_type; unsigned int cpu; struct clk *clk; struct cpufreq_cpuinfo cpuinfo; unsigned int min; unsigned int max; unsigned int cur; unsigned int suspend_freq; unsigned int policy; unsigned int last_policy; struct cpufreq_governor *governor; void *governor_data; char last_governor[16]; struct work_struct update; struct freq_constraints constraints; struct freq_qos_request *min_freq_req; struct freq_qos_request *max_freq_req; struct cpufreq_frequency_table *freq_table; enum cpufreq_table_sorting freq_table_sorted; struct list_head policy_list; struct kobject kobj; struct completion kobj_unregister; struct rw_semaphore rwsem; bool fast_switch_possible; bool fast_switch_enabled; bool strict_target; bool efficiencies_available; unsigned int transition_delay_us; bool dvfs_possible_from_any_cpu; bool boost_enabled; unsigned int cached_target_freq; unsigned int cached_resolved_idx; bool transition_ongoing; spinlock_t transition_lock; wait_queue_head_t transition_wait; struct task_struct *transition_task; struct cpufreq_stats *stats; void *driver_data; struct thermal_cooling_device *cdev; struct notifier_block nb_min; struct notifier_block nb_max; }; struct cpufreq_policy_data { struct cpufreq_cpuinfo cpuinfo; struct cpufreq_frequency_table *freq_table; unsigned int cpu; unsigned int min; unsigned int max; }; struct cpufreq_stats { unsigned int total_trans; long long unsigned int last_time; unsigned int max_state; unsigned int state_num; unsigned int last_index; u64 *time_in_state; unsigned int *freq_table; unsigned int *trans_table; unsigned int reset_pending; long long unsigned int reset_time; }; struct cpuhp_cpu_state { enum cpuhp_state state; enum cpuhp_state target; enum cpuhp_state fail; struct task_struct *thread; bool should_run; bool rollback; bool single; bool bringup; struct hlist_node *node; struct hlist_node *last; enum cpuhp_state cb_state; int result; atomic_t ap_sync_state; struct completion done_up; struct completion done_down; }; struct cpuhp_step { const char *name; union { int (*single)(unsigned int); int (*multi)(unsigned int, struct hlist_node *); } startup; union { int (*single)(unsigned int); int (*multi)(unsigned int, struct hlist_node *); } teardown; struct hlist_head list; bool cant_stop; bool multi_instance; }; union cpuid10_eax { struct { unsigned int version_id: 8; unsigned int num_counters: 8; unsigned int bit_width: 8; unsigned int mask_length: 8; } split; unsigned int full; }; union cpuid10_ebx { struct { unsigned int no_unhalted_core_cycles: 1; unsigned int no_instructions_retired: 1; unsigned int no_unhalted_reference_cycles: 1; unsigned int no_llc_reference: 1; unsigned int no_llc_misses: 1; unsigned int no_branch_instruction_retired: 1; unsigned int no_branch_misses_retired: 1; } split; unsigned int full; }; union cpuid10_edx { struct { unsigned int num_counters_fixed: 5; unsigned int bit_width_fixed: 8; unsigned int reserved1: 2; unsigned int anythread_deprecated: 1; unsigned int reserved2: 16; } split; unsigned int full; }; union cpuid28_eax { struct { unsigned int lbr_depth_mask: 8; unsigned int reserved: 22; unsigned int lbr_deep_c_reset: 1; unsigned int lbr_lip: 1; } split; unsigned int full; }; union cpuid28_ebx { struct { unsigned int lbr_cpl: 1; unsigned int lbr_filter: 1; unsigned int lbr_call_stack: 1; } split; unsigned int full; }; union cpuid28_ecx { struct { unsigned int lbr_mispred: 1; unsigned int lbr_timed_lbr: 1; unsigned int lbr_br_type: 1; unsigned int reserved: 13; unsigned int lbr_counters: 4; } split; unsigned int full; }; union cpuid_0x80000022_ebx { struct { unsigned int num_core_pmc: 4; unsigned int lbr_v2_stack_sz: 6; unsigned int num_df_pmc: 6; unsigned int num_umc_pmc: 6; } split; unsigned int full; }; union cpuid_1_eax { struct { __u32 stepping: 4; __u32 model: 4; __u32 family: 4; __u32 __reserved0: 4; __u32 ext_model: 4; __u32 ext_fam: 8; __u32 __reserved1: 4; }; __u32 full; }; struct cpuid_bit { u16 feature; u8 reg; u8 bit; u32 level; u32 sub_leaf; }; struct cpuid_dep { unsigned int feature; unsigned int depends; }; struct cpuid_dependent_feature { u32 feature; u32 level; }; struct cpuid_regs { u32 eax; u32 ebx; u32 ecx; u32 edx; }; struct cpuid_regs_done { struct cpuid_regs regs; struct completion done; }; struct cpuidle_device; struct cpuidle_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_device *, char *); ssize_t (*store)(struct cpuidle_device *, const char *, size_t); }; struct cpuidle_state_usage { long long unsigned int disable; long long unsigned int usage; u64 time_ns; long long unsigned int above; long long unsigned int below; long long unsigned int rejected; long long unsigned int s2idle_usage; long long unsigned int s2idle_time; }; struct cpuidle_driver_kobj; struct cpuidle_state_kobj; struct cpuidle_device_kobj; struct cpuidle_device { unsigned int registered: 1; unsigned int enabled: 1; unsigned int poll_time_limit: 1; unsigned int cpu; ktime_t next_hrtimer; int last_state_idx; u64 last_residency_ns; u64 poll_limit_ns; u64 forced_idle_latency_limit_ns; struct cpuidle_state_usage states_usage[10]; struct cpuidle_state_kobj *kobjs[10]; struct cpuidle_driver_kobj *kobj_driver; struct cpuidle_device_kobj *kobj_dev; struct list_head device_list; }; struct cpuidle_device_kobj { struct cpuidle_device *dev; struct completion kobj_unregister; struct kobject kobj; }; struct cpuidle_driver; struct cpuidle_state { char name[16]; char desc[32]; s64 exit_latency_ns; s64 target_residency_ns; unsigned int flags; unsigned int exit_latency; int power_usage; unsigned int target_residency; int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); int (*enter_dead)(struct cpuidle_device *, int); int (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); }; struct cpuidle_driver { const char *name; struct module *owner; unsigned int bctimer: 1; struct cpuidle_state states[10]; int state_count; int safe_state_index; struct cpumask *cpumask; const char *governor; }; struct cpuidle_governor { char name[16]; struct list_head governor_list; unsigned int rating; int (*enable)(struct cpuidle_driver *, struct cpuidle_device *); void (*disable)(struct cpuidle_driver *, struct cpuidle_device *); int (*select)(struct cpuidle_driver *, struct cpuidle_device *, bool *); void (*reflect)(struct cpuidle_device *, int); }; struct cpuidle_state_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_state *, struct cpuidle_state_usage *, char *); ssize_t (*store)(struct cpuidle_state *, struct cpuidle_state_usage *, const char *, size_t); }; struct cpuidle_state_kobj { struct cpuidle_state *state; struct cpuidle_state_usage *state_usage; struct completion kobj_unregister; struct kobject kobj; struct cpuidle_device *device; }; struct cpuinfo_topology { u32 apicid; u32 initial_apicid; u32 pkg_id; u32 die_id; u32 cu_id; u32 core_id; u32 logical_pkg_id; u32 logical_die_id; u32 amd_node_id; u32 llc_id; u32 l2c_id; }; struct cpuinfo_x86 { union { struct { __u8 x86_model; __u8 x86; __u8 x86_vendor; __u8 x86_reserved; }; __u32 x86_vfm; }; __u8 x86_stepping; int x86_tlbsize; __u32 vmx_capability[5]; __u8 x86_virt_bits; __u8 x86_phys_bits; __u32 extended_cpuid_level; int cpuid_level; union { __u32 x86_capability[24]; long unsigned int x86_capability_alignment; }; char x86_vendor_id[16]; char x86_model_id[64]; struct cpuinfo_topology topo; unsigned int x86_cache_size; int x86_cache_alignment; int x86_cache_max_rmid; int x86_cache_occ_scale; int x86_cache_mbm_width_offset; int x86_power; long unsigned int loops_per_jiffy; u64 ppin; u16 x86_clflush_size; u16 booted_cores; u16 cpu_index; bool smt_active; u32 microcode; u8 x86_cache_bits; unsigned int initialized: 1; }; struct cpumap { unsigned int available; unsigned int allocated; unsigned int managed; unsigned int managed_allocated; bool initialized; bool online; long unsigned int *managed_map; long unsigned int alloc_map[0]; }; union cpumask_rcuhead { cpumask_t cpumask; struct callback_head rcu; }; struct cpupri_vec { atomic_t count; cpumask_var_t mask; }; struct cpupri { struct cpupri_vec pri_to_cpu[101]; int *cpu_to_pri; }; struct fmeter { int cnt; int val; time64_t time; spinlock_t lock; }; struct uf_node { struct uf_node *parent; unsigned int rank; }; struct cpuset { struct cgroup_subsys_state css; long unsigned int flags; cpumask_var_t cpus_allowed; nodemask_t mems_allowed; cpumask_var_t effective_cpus; nodemask_t effective_mems; cpumask_var_t effective_xcpus; cpumask_var_t exclusive_cpus; nodemask_t old_mems_allowed; struct fmeter fmeter; int attach_in_progress; int relax_domain_level; int nr_subparts; int partition_root_state; int nr_deadline_tasks; int nr_migrate_dl_tasks; u64 sum_migrate_dl_bw; enum prs_errcode prs_err; struct cgroup_file partition_file; struct list_head remote_sibling; struct uf_node node; }; struct cpuset_migrate_mm_work { struct work_struct work; struct mm_struct *mm; nodemask_t from; nodemask_t to; }; struct cramfs_info { __u32 crc; __u32 edition; __u32 blocks; __u32 files; }; struct cramfs_inode { __u32 mode: 16; __u32 uid: 16; __u32 size: 24; __u32 gid: 8; __u32 namelen: 6; __u32 offset: 26; }; struct cramfs_super { __u32 magic; __u32 size; __u32 flags; __u32 future; __u8 signature[16]; struct cramfs_info fsid; __u8 name[16]; struct cramfs_inode root; }; struct range { u64 start; u64 end; }; struct crash_mem { unsigned int max_nr_ranges; unsigned int nr_ranges; struct range ranges[0]; }; struct crb_regs_head; struct crb_regs_tail; struct crb_priv { u32 sm; const char *hid; struct crb_regs_head *regs_h; struct crb_regs_tail *regs_t; u8 *cmd; u8 *rsp; u32 cmd_size; u32 smc_func_id; u32 *pluton_start_addr; u32 *pluton_reply_addr; }; struct crb_regs_head { u32 loc_state; u32 reserved1; u32 loc_ctrl; u32 loc_sts; u8 reserved2[32]; u64 intf_id; u64 ctrl_ext; }; struct crb_regs_tail { u32 ctrl_req; u32 ctrl_sts; u32 ctrl_cancel; u32 ctrl_start; u32 ctrl_int_enable; u32 ctrl_int_sts; u32 ctrl_cmd_size; u32 ctrl_cmd_pa_low; u32 ctrl_cmd_pa_high; u32 ctrl_rsp_size; u64 ctrl_rsp_pa; }; struct group_info; struct cred { atomic_long_t usage; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct ucounts *ucounts; struct group_info *group_info; union { int non_rcu; struct callback_head rcu; }; }; struct crng { u8 key[32]; long unsigned int generation; local_lock_t lock; }; struct crs_csi2 { struct list_head entry; acpi_handle handle; struct acpi_device_software_nodes *swnodes; struct list_head connections; u32 port_count; }; struct crs_csi2_connection { struct list_head entry; struct acpi_resource_csi2_serialbus csi2_data; acpi_handle remote_handle; char remote_name[0]; }; struct crypto_tfm { refcount_t refcnt; u32 crt_flags; int node; void (*exit)(struct crypto_tfm *); struct crypto_alg *__crt_alg; void *__crt_ctx[0]; }; struct crypto_acomp { int (*compress)(struct acomp_req *); int (*decompress)(struct acomp_req *); void (*dst_free)(struct scatterlist *); unsigned int reqsize; struct crypto_tfm base; }; struct crypto_aead { unsigned int authsize; unsigned int reqsize; struct crypto_tfm base; }; struct crypto_aead_spawn { struct crypto_spawn base; }; struct crypto_aes_ctx { u32 key_enc[60]; u32 key_dec[60]; u32 key_length; }; struct crypto_ahash { bool using_shash; unsigned int statesize; unsigned int reqsize; struct crypto_tfm base; }; struct crypto_akcipher { unsigned int reqsize; struct crypto_tfm base; }; struct crypto_akcipher_spawn { struct crypto_spawn base; }; struct crypto_akcipher_sync_data { struct crypto_akcipher *tfm; const void *src; void *dst; unsigned int slen; unsigned int dlen; struct akcipher_request *req; struct crypto_wait cwait; struct scatterlist sg; u8 *buf; }; struct crypto_attr_alg { char name[128]; }; struct crypto_attr_type { u32 type; u32 mask; }; struct crypto_skcipher; struct crypto_authenc_ctx { struct crypto_ahash *auth; struct crypto_skcipher *enc; struct crypto_sync_skcipher *null; }; struct crypto_authenc_esn_ctx { unsigned int reqoff; struct crypto_ahash *auth; struct crypto_skcipher *enc; struct crypto_sync_skcipher *null; }; struct crypto_authenc_key_param { __be32 enckeylen; }; struct crypto_authenc_keys { const u8 *authkey; const u8 *enckey; unsigned int authkeylen; unsigned int enckeylen; }; struct crypto_cipher { struct crypto_tfm base; }; struct crypto_cipher_spawn { struct crypto_spawn base; }; struct crypto_comp { struct crypto_tfm base; }; struct crypto_gcm_ctx { struct crypto_skcipher *ctr; struct crypto_ahash *ghash; }; struct crypto_gcm_ghash_ctx { unsigned int cryptlen; struct scatterlist *src; int (*complete)(struct aead_request *, u32); }; struct crypto_gcm_req_priv_ctx { u8 iv[16]; u8 auth_tag[16]; u8 iauth_tag[16]; struct scatterlist src[3]; struct scatterlist dst[3]; struct scatterlist sg; struct crypto_gcm_ghash_ctx ghash_ctx; union { struct ahash_request ahreq; struct skcipher_request skreq; } u; }; struct crypto_hash_walk { char *data; unsigned int offset; unsigned int flags; struct page *pg; unsigned int entrylen; unsigned int total; struct scatterlist *sg; }; struct crypto_kpp { unsigned int reqsize; struct crypto_tfm base; }; struct crypto_kpp_spawn { struct crypto_spawn base; }; struct crypto_larval { struct crypto_alg alg; struct crypto_alg *adult; struct completion completion; u32 mask; bool test_started; }; struct crypto_lskcipher { struct crypto_tfm base; }; struct crypto_lskcipher_spawn { struct crypto_spawn base; }; struct crypto_queue { struct list_head list; struct list_head *backlog; unsigned int qlen; unsigned int max_qlen; }; struct crypto_rfc3686_ctx { struct crypto_skcipher *child; u8 nonce[4]; }; struct crypto_rfc3686_req_ctx { u8 iv[16]; struct skcipher_request subreq; }; struct crypto_rfc4106_ctx { struct crypto_aead *child; u8 nonce[4]; }; struct crypto_rfc4106_req_ctx { struct scatterlist src[3]; struct scatterlist dst[3]; struct aead_request subreq; }; struct crypto_rfc4543_ctx { struct crypto_aead *child; struct crypto_sync_skcipher *null; u8 nonce[4]; }; struct crypto_rfc4543_instance_ctx { struct crypto_aead_spawn aead; }; struct crypto_rfc4543_req_ctx { struct aead_request subreq; }; struct crypto_rng { struct crypto_tfm base; }; struct crypto_scomp { struct crypto_tfm base; }; struct crypto_shash { unsigned int descsize; struct crypto_tfm base; }; struct crypto_shash_spawn { struct crypto_spawn base; }; struct crypto_sig { struct crypto_tfm base; }; struct crypto_skcipher { unsigned int reqsize; struct crypto_tfm base; }; struct crypto_sync_skcipher { struct crypto_skcipher base; }; struct rtattr; struct crypto_template { struct list_head list; struct hlist_head instances; struct module *module; int (*create)(struct crypto_template *, struct rtattr **); char name[128]; }; struct crypto_test_param { char driver[128]; char alg[128]; u32 type; }; struct crypto_type { unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); unsigned int (*extsize)(struct crypto_alg *); int (*init_tfm)(struct crypto_tfm *); void (*show)(struct seq_file *, struct crypto_alg *); int (*report)(struct sk_buff *, struct crypto_alg *); void (*free)(struct crypto_instance *); unsigned int type; unsigned int maskclear; unsigned int maskset; unsigned int tfmsize; }; struct rtattr { short unsigned int rta_len; short unsigned int rta_type; }; struct cryptomgr_param { struct rtattr *tb[34]; struct { struct rtattr attr; struct crypto_attr_type data; } type; struct { struct rtattr attr; struct crypto_attr_alg data; } attrs[32]; char template[128]; struct crypto_larval *larval; u32 otype; u32 omask; }; struct cs_dbs_tuners { unsigned int down_threshold; unsigned int freq_step; }; struct dbs_data; struct policy_dbs_info { struct cpufreq_policy *policy; struct mutex update_mutex; u64 last_sample_time; s64 sample_delay_ns; atomic_t work_count; struct irq_work irq_work; struct work_struct work; struct dbs_data *dbs_data; struct list_head list; unsigned int rate_mult; unsigned int idle_periods; bool is_shared; bool work_in_progress; }; struct cs_policy_dbs_info { struct policy_dbs_info policy_dbs; unsigned int down_skip; unsigned int requested_freq; }; struct csi2_resources_walk_data { acpi_handle handle; struct list_head connections; }; struct css_set { struct cgroup_subsys_state *subsys[10]; refcount_t refcount; struct css_set *dom_cset; struct cgroup *dfl_cgrp; int nr_tasks; struct list_head tasks; struct list_head mg_tasks; struct list_head dying_tasks; struct list_head task_iters; struct list_head e_cset_node[10]; struct list_head threaded_csets; struct list_head threaded_csets_node; struct hlist_node hlist; struct list_head cgrp_links; struct list_head mg_src_preload_node; struct list_head mg_dst_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; bool dead; struct callback_head callback_head; }; struct css_set__safe_rcu { struct cgroup *dfl_cgrp; }; struct cstate { int state; uint32_t rep0; uint32_t rep1; uint32_t rep2; uint32_t rep3; }; struct cstate_entry { struct { unsigned int eax; unsigned int ecx; } states[8]; }; struct cstate_model { long unsigned int core_events; long unsigned int pkg_events; long unsigned int module_events; long unsigned int quirks; }; struct csum_pseudo_header { __be64 data_seq; __be32 subflow_seq; __be16 data_len; __sum16 csum; }; struct csum_state { __wsum csum; size_t off; }; struct ctl_table_root; struct ctl_table_set; struct ctl_dir; struct ctl_node; struct ctl_table_header { union { struct { struct ctl_table *ctl_table; int ctl_table_size; int used; int count; int nreg; }; struct callback_head rcu; }; struct completion *unregistering; const struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; struct hlist_head inodes; enum { SYSCTL_TABLE_TYPE_DEFAULT = 0, SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY = 1, } type; }; struct ctl_dir { struct ctl_table_header header; struct rb_root root; }; struct ctl_node { struct rb_node node; struct ctl_table_header *header; }; typedef int proc_handler(const struct ctl_table *, int, void *, size_t *, loff_t *); struct ctl_table_poll; struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; }; struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; }; struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *); void (*set_ownership)(struct ctl_table_header *, kuid_t *, kgid_t *); int (*permissions)(struct ctl_table_header *, const struct ctl_table *); }; struct netlink_policy_dump_state; struct genl_family; struct genl_op_iter; struct ctrl_dump_policy_ctx { struct netlink_policy_dump_state *state; const struct genl_family *rt; struct genl_op_iter *op_iter; u32 op; u16 fam_id; u8 dump_map: 1; u8 single_op: 1; }; struct ctx_rq_wait { struct completion comp; atomic_t count; }; struct ctx_switch_entry { struct trace_entry ent; unsigned int prev_pid; unsigned int next_pid; unsigned int next_cpu; unsigned char prev_prio; unsigned char prev_state; unsigned char next_prio; unsigned char next_state; }; struct cyc2ns_data { u32 cyc2ns_mul; u32 cyc2ns_shift; u64 cyc2ns_offset; }; struct cyc2ns { struct cyc2ns_data data[2]; seqcount_latch_t seq; }; struct cyclecounter { u64 (*read)(const struct cyclecounter *); u64 mask; u32 mult; u32 shift; }; struct cytp_contact { int x; int y; int z; }; struct cytp_data { int fw_version; int pkt_size; int mode; int tp_min_pressure; int tp_max_pressure; int tp_width; int tp_high; int tp_max_abs_x; int tp_max_abs_y; int tp_res_x; int tp_res_y; int tp_metrics_supported; }; struct cytp_report_data { int contact_cnt; struct cytp_contact contacts[2]; unsigned int left: 1; unsigned int right: 1; unsigned int middle: 1; unsigned int tap: 1; }; struct d_partition { __le32 p_size; __le32 p_offset; __le32 p_fsize; u8 p_fstype; u8 p_frag; __le16 p_cpg; }; struct d_partition___2 { __le32 p_res; u8 p_fstype; u8 p_res2[3]; __le32 p_offset; __le32 p_size; }; struct data_chunk { size_t size; size_t icg; size_t dst_icg; size_t src_icg; }; struct dax_device; struct dax_holder_operations { int (*notify_failure)(struct dax_device *, u64, u64, int); }; struct gov_attr_set { struct kobject kobj; struct list_head policy_list; struct mutex update_lock; int usage_count; }; struct dbs_governor; struct dbs_data { struct gov_attr_set attr_set; struct dbs_governor *gov; void *tuners; unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; unsigned int io_is_busy; }; struct sysfs_ops; struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; const struct attribute_group **default_groups; const struct kobj_ns_type_operations * (*child_ns_type)(const struct kobject *); const void * (*namespace)(const struct kobject *); void (*get_ownership)(const struct kobject *, kuid_t *, kgid_t *); }; struct dbs_governor { struct cpufreq_governor gov; struct kobj_type kobj_type; struct dbs_data *gdbs_data; unsigned int (*gov_dbs_update)(struct cpufreq_policy *); struct policy_dbs_info * (*alloc)(void); void (*free)(struct policy_dbs_info *); int (*init)(struct dbs_data *); void (*exit)(struct dbs_data *); void (*start)(struct cpufreq_policy *); }; struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; }; struct dcb_app_type { int ifindex; struct dcb_app app; struct list_head list; u8 dcbx; }; struct dcb_ieee_app_dscp_map { u8 map[64]; }; struct dcb_ieee_app_prio_map { u64 map[8]; }; struct dcb_peer_app_info { __u8 willing; __u8 error; }; struct dcb_rewr_prio_pcp_map { u16 map[8]; }; struct dcbmsg { __u8 dcb_family; __u8 cmd; __u16 dcb_pad; }; struct dcbnl_buffer { __u8 prio2buffer[8]; __u32 buffer_size[8]; __u32 total_size; }; struct ieee_ets; struct ieee_maxrate; struct ieee_qcn; struct ieee_qcn_stats; struct ieee_pfc; struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8); void (*setpgbwgcfgtx)(struct net_device *, int, u8); void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8); void (*setpgbwgcfgrx)(struct net_device *, int, u8); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8); int (*setapp)(struct net_device *, u8, u16, u8); int (*getapp)(struct net_device *, u8, u16); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *); int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *); int (*dcbnl_setapptrust)(struct net_device *, u8 *, int); int (*dcbnl_getapptrust)(struct net_device *, u8 *, int *); int (*dcbnl_setrewr)(struct net_device *, struct dcb_app *); int (*dcbnl_delrewr)(struct net_device *, struct dcb_app *); }; struct dccp_hdr { __be16 dccph_sport; __be16 dccph_dport; __u8 dccph_doff; __u8 dccph_cscov: 4; __u8 dccph_ccval: 4; __sum16 dccph_checksum; __u8 dccph_x: 1; __u8 dccph_type: 4; __u8 dccph_reserved: 3; __u8 dccph_seq2; __be16 dccph_seq; }; struct dccp_hdr_ack_bits { __be16 dccph_reserved1; __be16 dccph_ack_nr_high; __be32 dccph_ack_nr_low; }; struct dccp_hdr_ext { __be32 dccph_seq_low; }; struct dccp_hdr_request { __be32 dccph_req_service; }; struct dccp_hdr_reset { struct dccp_hdr_ack_bits dccph_reset_ack; __u8 dccph_reset_code; __u8 dccph_reset_data[3]; }; struct dccp_hdr_response { struct dccp_hdr_ack_bits dccph_resp_ack; __be32 dccph_resp_service; }; struct tcp_plb_state { u8 consec_cong_rounds: 5; u8 unused: 3; u32 pause_until; }; struct dctcp { u32 old_delivered; u32 old_delivered_ce; u32 prior_rcv_nxt; u32 dctcp_alpha; u32 next_seq; u32 ce_state; u32 loss_cwnd; struct tcp_plb_state plb; }; struct io_stats_per_prio { uint32_t inserted; uint32_t merged; uint32_t dispatched; atomic_t completed; }; struct dd_per_prio { struct list_head dispatch; struct rb_root sort_list[2]; struct list_head fifo_list[2]; sector_t latest_pos[2]; struct io_stats_per_prio stats; }; struct deadline_data { struct dd_per_prio per_prio[3]; enum dd_data_dir last_dir; unsigned int batching; unsigned int starved; int fifo_expire[2]; int fifo_batch; int writes_starved; int front_merges; u32 async_depth; int prio_aging_expire; spinlock_t lock; }; struct debug_reply_data { struct ethnl_reply_data base; u32 msg_mask; }; struct debugfs_blob_wrapper { void *data; long unsigned int size; }; struct debugfs_cancellation { struct list_head list; void (*cancel)(struct dentry *, void *); void *cancel_data; }; struct debugfs_devm_entry { int (*read)(struct seq_file *, void *); struct device *dev; }; struct debugfs_fs_info { kuid_t uid; kgid_t gid; umode_t mode; unsigned int opts; }; typedef struct vfsmount * (*debugfs_automount_t)(struct dentry *, void *); struct debugfs_fsdata { const struct file_operations *real_fops; union { debugfs_automount_t automount; struct { refcount_t active_users; struct completion active_users_drained; struct mutex cancellations_mtx; struct list_head cancellations; }; }; }; struct debugfs_reg32 { char *name; long unsigned int offset; }; struct debugfs_regset32 { const struct debugfs_reg32 *regs; int nregs; void *base; struct device *dev; }; struct debugfs_u32_array { u32 *array; u32 n_elements; }; struct dma_fence; struct dma_fence_cb; typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); struct dma_fence_cb { struct list_head node; dma_fence_func_t func; }; struct default_wait_cb { struct dma_fence_cb base; struct task_struct *task; }; struct deferred_split { spinlock_t split_queue_lock; struct list_head split_queue; long unsigned int split_queue_len; }; struct delayed_call { void (*fn)(void *); void *arg; }; struct pending_free { struct list_head zapped; long unsigned int lock_chains_being_freed[1024]; }; struct delayed_free { struct callback_head callback_head; int index; int scheduled; struct pending_free pf[2]; }; struct delayed_uprobe { struct list_head list; struct uprobe *uprobe; struct mm_struct *mm; }; struct demotion_nodes { nodemask_t preferred; }; struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; }; struct qstr { union { struct { u32 hash; u32 len; }; u64 hash_len; }; const unsigned char *name; }; struct lockref { union { struct { spinlock_t lock; int count; }; }; }; struct dentry_operations; struct dentry { unsigned int d_flags; seqcount_spinlock_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[40]; const struct dentry_operations *d_op; struct super_block *d_sb; long unsigned int d_time; void *d_fsdata; struct lockref d_lockref; union { struct list_head d_lru; wait_queue_head_t *d_wait; }; struct hlist_node d_sib; struct hlist_head d_children; union { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } d_u; }; struct dentry__safe_trusted { struct inode *d_inode; }; struct dentry_info_args { int parent_ino; int dname_len; int ino; int inode_len; char *dname; }; struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(const struct path *, bool); struct dentry * (*d_real)(struct dentry *, enum d_real_type); long: 64; long: 64; long: 64; }; struct dentry_stat_t { long int nr_dentry; long int nr_unused; long int age_limit; long int want_pages; long int nr_negative; long int dummy; }; struct desc_ptr { short unsigned int size; long unsigned int address; } __attribute__((packed)); struct desc_struct { u16 limit0; u16 base0; u16 base1: 8; u16 type: 4; u16 s: 1; u16 dpl: 2; u16 p: 1; u16 limit1: 4; u16 avl: 1; u16 l: 1; u16 d: 1; u16 g: 1; u16 base2: 8; }; struct slab; struct detached_freelist { struct slab *slab; void *tail; void *freelist; int cnt; struct kmem_cache *s; }; struct dev_cgroup { struct cgroup_subsys_state css; struct list_head exceptions; enum devcg_behavior behavior; }; struct dev_exception_item { u32 major; u32 minor; short int type; short int access; struct list_head list; struct callback_head rcu; }; struct dev_ext_attribute { struct device_attribute attr; void *var; }; struct dev_ifalias { struct callback_head rcuhead; char ifalias[0]; }; struct iommu_fault_param; struct iommu_fwspec; struct iommu_device; struct dev_iommu { struct mutex lock; struct iommu_fault_param *fault_param; struct iommu_fwspec *fwspec; struct iommu_device *iommu_dev; void *priv; u32 max_pasids; u32 attach_deferred: 1; u32 pci_32bit_workaround: 1; u32 require_direct: 1; u32 shadow_on_flush: 1; }; struct dev_kfree_skb_cb { enum skb_drop_reason reason; }; struct vmem_altmap { long unsigned int base_pfn; const long unsigned int end_pfn; const long unsigned int reserve; long unsigned int free; long unsigned int align; long unsigned int alloc; bool inaccessible; }; struct dev_pagemap_ops; struct dev_pagemap { struct vmem_altmap altmap; struct percpu_ref ref; struct completion done; enum memory_type type; unsigned int flags; long unsigned int vmemmap_shift; const struct dev_pagemap_ops *ops; void *owner; int nr_range; union { struct range range; struct { struct {} __empty_ranges; struct range ranges[0]; }; }; }; struct vm_fault; struct dev_pagemap_ops { void (*page_free)(struct page *); vm_fault_t (*migrate_to_ram)(struct vm_fault *); int (*memory_failure)(struct dev_pagemap *, long unsigned int, long unsigned int, int); }; struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); }; struct dev_pm_domain { struct dev_pm_ops ops; int (*start)(struct device *); void (*detach)(struct device *, bool); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); int (*set_performance_state)(struct device *, unsigned int); }; struct dev_pm_domain_attach_data { const char * const *pd_names; const u32 num_pd_names; const u32 pd_flags; }; struct device_link; struct dev_pm_domain_list { struct device **pd_devs; struct device_link **pd_links; u32 num_pds; }; struct pm_qos_flags { struct list_head list; s32 effective_flags; }; struct dev_pm_qos_request; struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; struct freq_constraints freq; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; }; struct pm_qos_flags_request { struct list_head node; s32 flags; }; struct dev_pm_qos_request { enum dev_pm_qos_req_type type; union { struct plist_node pnode; struct pm_qos_flags_request flr; struct freq_qos_request freq; } data; struct device *dev; }; struct dev_printk_info { char subsystem[16]; char device[48]; }; struct device_attach_data { struct device *dev; bool check_async; bool want_async; bool have_async; }; union device_attr_group_devres { const struct attribute_group *group; const struct attribute_group **groups; }; struct device_dma_parameters { unsigned int max_segment_size; unsigned int min_align_mask; long unsigned int segment_boundary_mask; }; struct device_link { struct device *supplier; struct list_head s_node; struct device *consumer; struct list_head c_node; struct device link_dev; enum device_link_state status; u32 flags; refcount_t rpm_active; struct kref kref; struct work_struct rm_work; bool supplier_preactivated; }; struct property; struct device_node { const char *name; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; long unsigned int _flags; void *data; }; struct device_physical_location { enum device_physical_location_panel panel; enum device_physical_location_vertical_position vertical_position; enum device_physical_location_horizontal_position horizontal_position; bool dock; bool lid; }; struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; }; struct device_private { struct klist klist_children; struct klist_node knode_parent; struct klist_node knode_driver; struct klist_node knode_bus; struct klist_node knode_class; struct list_head deferred_probe; const struct device_driver *async_driver; char *deferred_probe_reason; struct device *device; u8 dead: 1; }; struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(const struct device *, struct kobj_uevent_env *); char * (*devnode)(const struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; }; struct devinet_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table devinet_vars[33]; }; struct ratelimit_state { raw_spinlock_t lock; int interval; int burst; int printed; int missed; unsigned int flags; long unsigned int begin; }; struct printk_buffers { char outbuf[2048]; char scratchbuf[1024]; }; struct devkmsg_user { atomic64_t seq; struct ratelimit_state rs; struct mutex lock; struct printk_buffers pbufs; }; struct devlink_dev_stats { u32 reload_stats[6]; u32 remote_reload_stats[6]; }; struct devlink_dpipe_headers; struct devlink_ops; struct devlink_rel; struct devlink { u32 index; struct xarray ports; struct list_head rate_list; struct list_head sb_list; struct list_head dpipe_table_list; struct list_head resource_list; struct xarray params; struct list_head region_list; struct list_head reporter_list; struct devlink_dpipe_headers *dpipe_headers; struct list_head trap_list; struct list_head trap_group_list; struct list_head trap_policer_list; struct list_head linecard_list; const struct devlink_ops *ops; struct xarray snapshot_ids; struct devlink_dev_stats stats; struct device *dev; possible_net_t _net; struct mutex lock; struct lock_class_key lock_key; u8 reload_failed: 1; refcount_t refcount; struct rcu_work rwork; struct devlink_rel *rel; struct xarray nested_rels; long: 64; long: 64; long: 64; char priv[0]; }; struct devlink_dpipe_header; struct devlink_dpipe_action { enum devlink_dpipe_action_type type; unsigned int header_index; struct devlink_dpipe_header *header; unsigned int field_id; }; struct genl_info; struct devlink_dpipe_dump_ctx { struct genl_info *info; enum devlink_command cmd; struct sk_buff *skb; struct nlattr *nest; void *hdr; }; struct devlink_dpipe_value; struct devlink_dpipe_entry { u64 index; struct devlink_dpipe_value *match_values; unsigned int match_values_count; struct devlink_dpipe_value *action_values; unsigned int action_values_count; u64 counter; bool counter_valid; }; struct devlink_dpipe_field { const char *name; unsigned int id; unsigned int bitwidth; enum devlink_dpipe_field_mapping_type mapping_type; }; struct devlink_dpipe_header { const char *name; unsigned int id; struct devlink_dpipe_field *fields; unsigned int fields_count; bool global; }; struct devlink_dpipe_headers { struct devlink_dpipe_header **headers; unsigned int headers_count; }; struct devlink_dpipe_match { enum devlink_dpipe_match_type type; unsigned int header_index; struct devlink_dpipe_header *header; unsigned int field_id; }; struct devlink_dpipe_table_ops; struct devlink_dpipe_table { void *priv; struct list_head list; const char *name; bool counters_enabled; bool counter_control_extern; bool resource_valid; u64 resource_id; u64 resource_units; const struct devlink_dpipe_table_ops *table_ops; struct callback_head rcu; }; struct devlink_dpipe_table_ops { int (*actions_dump)(void *, struct sk_buff *); int (*matches_dump)(void *, struct sk_buff *); int (*entries_dump)(void *, bool, struct devlink_dpipe_dump_ctx *); int (*counters_set_update)(void *, bool); u64 (*size_get)(void *); }; struct devlink_dpipe_value { union { struct devlink_dpipe_action *action; struct devlink_dpipe_match *match; }; unsigned int mapping_value; bool mapping_valid; unsigned int value_size; void *value; void *mask; }; struct devlink_flash_component_lookup_ctx { const char *lookup_name; bool lookup_name_found; }; struct devlink_flash_notify { const char *status_msg; const char *component; long unsigned int done; long unsigned int total; long unsigned int timeout; }; struct firmware; struct devlink_flash_update_params { const struct firmware *fw; const char *component; u32 overwrite_mask; }; struct devlink_fmsg { struct list_head item_list; int err; bool putting_binary; }; struct devlink_fmsg_item { struct list_head list; int attrtype; u8 nla_type; u16 len; int value[0]; }; struct devlink_health_reporter_ops; struct devlink_port; struct devlink_health_reporter { struct list_head list; void *priv; const struct devlink_health_reporter_ops *ops; struct devlink *devlink; struct devlink_port *devlink_port; struct devlink_fmsg *dump_fmsg; u64 graceful_period; bool auto_recover; bool auto_dump; u8 health_state; u64 dump_ts; u64 dump_real_ts; u64 error_count; u64 recovery_count; u64 last_recovery_ts; }; struct devlink_health_reporter_ops { char *name; int (*recover)(struct devlink_health_reporter *, void *, struct netlink_ext_ack *); int (*dump)(struct devlink_health_reporter *, struct devlink_fmsg *, void *, struct netlink_ext_ack *); int (*diagnose)(struct devlink_health_reporter *, struct devlink_fmsg *, struct netlink_ext_ack *); int (*test)(struct devlink_health_reporter *, struct netlink_ext_ack *); }; struct devlink_info_req { struct sk_buff *msg; void (*version_cb)(const char *, enum devlink_info_version_type, void *); void *version_cb_priv; }; struct devlink_linecard_ops; struct devlink_linecard_type; struct devlink_linecard { struct list_head list; struct devlink *devlink; unsigned int index; const struct devlink_linecard_ops *ops; void *priv; enum devlink_linecard_state state; struct mutex state_lock; const char *type; struct devlink_linecard_type *types; unsigned int types_count; u32 rel_index; }; struct devlink_linecard_ops { int (*provision)(struct devlink_linecard *, void *, const char *, const void *, struct netlink_ext_ack *); int (*unprovision)(struct devlink_linecard *, void *, struct netlink_ext_ack *); bool (*same_provision)(struct devlink_linecard *, void *, const char *, const void *); unsigned int (*types_count)(struct devlink_linecard *, void *); void (*types_get)(struct devlink_linecard *, void *, unsigned int, const char **, const void **); }; struct devlink_linecard_type { const char *type; const void *priv; }; struct devlink_nl_dump_state { long unsigned int instance; int idx; union { struct { u64 start_offset; }; struct { u64 dump_ts; }; }; }; struct devlink_obj_desc; struct devlink_nl_sock_priv { struct devlink_obj_desc *flt; spinlock_t flt_lock; }; struct devlink_obj_desc { struct callback_head rcu; const char *bus_name; const char *dev_name; unsigned int port_index; bool port_index_valid; long int data[0]; }; struct devlink_sb_pool_info; struct devlink_trap; struct devlink_trap_group; struct devlink_trap_policer; struct devlink_port_new_attrs; struct devlink_rate; struct devlink_ops { u32 supported_flash_update_params; long unsigned int reload_actions; long unsigned int reload_limits; int (*reload_down)(struct devlink *, bool, enum devlink_reload_action, enum devlink_reload_limit, struct netlink_ext_ack *); int (*reload_up)(struct devlink *, enum devlink_reload_action, enum devlink_reload_limit, u32 *, struct netlink_ext_ack *); int (*sb_pool_get)(struct devlink *, unsigned int, u16, struct devlink_sb_pool_info *); int (*sb_pool_set)(struct devlink *, unsigned int, u16, u32, enum devlink_sb_threshold_type, struct netlink_ext_ack *); int (*sb_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *); int (*sb_port_pool_set)(struct devlink_port *, unsigned int, u16, u32, struct netlink_ext_ack *); int (*sb_tc_pool_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16 *, u32 *); int (*sb_tc_pool_bind_set)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16, u32, struct netlink_ext_ack *); int (*sb_occ_snapshot)(struct devlink *, unsigned int); int (*sb_occ_max_clear)(struct devlink *, unsigned int); int (*sb_occ_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *, u32 *); int (*sb_occ_tc_port_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u32 *, u32 *); int (*eswitch_mode_get)(struct devlink *, u16 *); int (*eswitch_mode_set)(struct devlink *, u16, struct netlink_ext_ack *); int (*eswitch_inline_mode_get)(struct devlink *, u8 *); int (*eswitch_inline_mode_set)(struct devlink *, u8, struct netlink_ext_ack *); int (*eswitch_encap_mode_get)(struct devlink *, enum devlink_eswitch_encap_mode *); int (*eswitch_encap_mode_set)(struct devlink *, enum devlink_eswitch_encap_mode, struct netlink_ext_ack *); int (*info_get)(struct devlink *, struct devlink_info_req *, struct netlink_ext_ack *); int (*flash_update)(struct devlink *, struct devlink_flash_update_params *, struct netlink_ext_ack *); int (*trap_init)(struct devlink *, const struct devlink_trap *, void *); void (*trap_fini)(struct devlink *, const struct devlink_trap *, void *); int (*trap_action_set)(struct devlink *, const struct devlink_trap *, enum devlink_trap_action, struct netlink_ext_ack *); int (*trap_group_init)(struct devlink *, const struct devlink_trap_group *); int (*trap_group_set)(struct devlink *, const struct devlink_trap_group *, const struct devlink_trap_policer *, struct netlink_ext_ack *); int (*trap_group_action_set)(struct devlink *, const struct devlink_trap_group *, enum devlink_trap_action, struct netlink_ext_ack *); int (*trap_drop_counter_get)(struct devlink *, const struct devlink_trap *, u64 *); int (*trap_policer_init)(struct devlink *, const struct devlink_trap_policer *); void (*trap_policer_fini)(struct devlink *, const struct devlink_trap_policer *); int (*trap_policer_set)(struct devlink *, const struct devlink_trap_policer *, u64, u64, struct netlink_ext_ack *); int (*trap_policer_counter_get)(struct devlink *, const struct devlink_trap_policer *, u64 *); int (*port_new)(struct devlink *, const struct devlink_port_new_attrs *, struct netlink_ext_ack *, struct devlink_port **); int (*rate_leaf_tx_share_set)(struct devlink_rate *, void *, u64, struct netlink_ext_ack *); int (*rate_leaf_tx_max_set)(struct devlink_rate *, void *, u64, struct netlink_ext_ack *); int (*rate_leaf_tx_priority_set)(struct devlink_rate *, void *, u32, struct netlink_ext_ack *); int (*rate_leaf_tx_weight_set)(struct devlink_rate *, void *, u32, struct netlink_ext_ack *); int (*rate_node_tx_share_set)(struct devlink_rate *, void *, u64, struct netlink_ext_ack *); int (*rate_node_tx_max_set)(struct devlink_rate *, void *, u64, struct netlink_ext_ack *); int (*rate_node_tx_priority_set)(struct devlink_rate *, void *, u32, struct netlink_ext_ack *); int (*rate_node_tx_weight_set)(struct devlink_rate *, void *, u32, struct netlink_ext_ack *); int (*rate_node_new)(struct devlink_rate *, void **, struct netlink_ext_ack *); int (*rate_node_del)(struct devlink_rate *, void *, struct netlink_ext_ack *); int (*rate_leaf_parent_set)(struct devlink_rate *, struct devlink_rate *, void *, void *, struct netlink_ext_ack *); int (*rate_node_parent_set)(struct devlink_rate *, struct devlink_rate *, void *, void *, struct netlink_ext_ack *); bool (*selftest_check)(struct devlink *, unsigned int, struct netlink_ext_ack *); enum devlink_selftest_status (*selftest_run)(struct devlink *, unsigned int, struct netlink_ext_ack *); }; struct devlink_param_gset_ctx; union devlink_param_value; struct devlink_param { u32 id; const char *name; bool generic; enum devlink_param_type type; long unsigned int supported_cmodes; int (*get)(struct devlink *, u32, struct devlink_param_gset_ctx *); int (*set)(struct devlink *, u32, struct devlink_param_gset_ctx *, struct netlink_ext_ack *); int (*validate)(struct devlink *, u32, union devlink_param_value, struct netlink_ext_ack *); }; union devlink_param_value { u8 vu8; u16 vu16; u32 vu32; char vstr[32]; bool vbool; }; struct devlink_param_gset_ctx { union devlink_param_value val; enum devlink_param_cmode cmode; }; struct devlink_param_item { struct list_head list; const struct devlink_param *param; union devlink_param_value driverinit_value; bool driverinit_value_valid; union devlink_param_value driverinit_value_new; bool driverinit_value_new_valid; }; struct netdev_phys_item_id { unsigned char id[32]; unsigned char id_len; }; struct devlink_port_phys_attrs { u32 port_number; u32 split_subport_number; }; struct devlink_port_pci_pf_attrs { u32 controller; u16 pf; u8 external: 1; }; struct devlink_port_pci_vf_attrs { u32 controller; u16 pf; u16 vf; u8 external: 1; }; struct devlink_port_pci_sf_attrs { u32 controller; u32 sf; u16 pf; u8 external: 1; }; struct devlink_port_attrs { u8 split: 1; u8 splittable: 1; u32 lanes; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; union { struct devlink_port_phys_attrs phys; struct devlink_port_pci_pf_attrs pci_pf; struct devlink_port_pci_vf_attrs pci_vf; struct devlink_port_pci_sf_attrs pci_sf; }; }; struct devlink_port_ops; struct ib_device; struct devlink_port { struct list_head list; struct list_head region_list; struct devlink *devlink; const struct devlink_port_ops *ops; unsigned int index; spinlock_t type_lock; enum devlink_port_type type; enum devlink_port_type desired_type; union { struct { struct net_device *netdev; int ifindex; char ifname[16]; } type_eth; struct { struct ib_device *ibdev; } type_ib; }; struct devlink_port_attrs attrs; u8 attrs_set: 1; u8 switch_port: 1; u8 registered: 1; u8 initialized: 1; struct delayed_work type_warn_dw; struct list_head reporter_list; struct devlink_rate *devlink_rate; struct devlink_linecard *linecard; u32 rel_index; }; struct devlink_port_new_attrs { enum devlink_port_flavour flavour; unsigned int port_index; u32 controller; u32 sfnum; u16 pfnum; u8 port_index_valid: 1; u8 controller_valid: 1; u8 sfnum_valid: 1; }; struct devlink_port_ops { int (*port_split)(struct devlink *, struct devlink_port *, unsigned int, struct netlink_ext_ack *); int (*port_unsplit)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); int (*port_type_set)(struct devlink_port *, enum devlink_port_type); int (*port_del)(struct devlink *, struct devlink_port *, struct netlink_ext_ack *); int (*port_fn_hw_addr_get)(struct devlink_port *, u8 *, int *, struct netlink_ext_ack *); int (*port_fn_hw_addr_set)(struct devlink_port *, const u8 *, int, struct netlink_ext_ack *); int (*port_fn_roce_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); int (*port_fn_roce_set)(struct devlink_port *, bool, struct netlink_ext_ack *); int (*port_fn_migratable_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); int (*port_fn_migratable_set)(struct devlink_port *, bool, struct netlink_ext_ack *); int (*port_fn_state_get)(struct devlink_port *, enum devlink_port_fn_state *, enum devlink_port_fn_opstate *, struct netlink_ext_ack *); int (*port_fn_state_set)(struct devlink_port *, enum devlink_port_fn_state, struct netlink_ext_ack *); int (*port_fn_ipsec_crypto_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); int (*port_fn_ipsec_crypto_set)(struct devlink_port *, bool, struct netlink_ext_ack *); int (*port_fn_ipsec_packet_get)(struct devlink_port *, bool *, struct netlink_ext_ack *); int (*port_fn_ipsec_packet_set)(struct devlink_port *, bool, struct netlink_ext_ack *); int (*port_fn_max_io_eqs_get)(struct devlink_port *, u32 *, struct netlink_ext_ack *); int (*port_fn_max_io_eqs_set)(struct devlink_port *, u32, struct netlink_ext_ack *); }; struct devlink_port_region_ops { const char *name; void (*destructor)(const void *); int (*snapshot)(struct devlink_port *, const struct devlink_port_region_ops *, struct netlink_ext_ack *, u8 **); int (*read)(struct devlink_port *, const struct devlink_port_region_ops *, struct netlink_ext_ack *, u64, u32, u8 *); void *priv; }; struct devlink_rate { struct list_head list; enum devlink_rate_type type; struct devlink *devlink; void *priv; u64 tx_share; u64 tx_max; struct devlink_rate *parent; union { struct devlink_port *devlink_port; struct { char *name; refcount_t refcnt; }; }; u32 tx_priority; u32 tx_weight; }; struct devlink_region_ops; struct devlink_region { struct devlink *devlink; struct devlink_port *port; struct list_head list; union { const struct devlink_region_ops *ops; const struct devlink_port_region_ops *port_ops; }; struct mutex snapshot_lock; struct list_head snapshot_list; u32 max_snapshots; u32 cur_snapshots; u64 size; }; struct devlink_region_ops { const char *name; void (*destructor)(const void *); int (*snapshot)(struct devlink *, const struct devlink_region_ops *, struct netlink_ext_ack *, u8 **); int (*read)(struct devlink *, const struct devlink_region_ops *, struct netlink_ext_ack *, u64, u32, u8 *); void *priv; }; typedef void devlink_rel_notify_cb_t(struct devlink *, u32); typedef void devlink_rel_cleanup_cb_t(struct devlink *, u32, u32); struct devlink_rel { u32 index; refcount_t refcount; u32 devlink_index; struct { u32 devlink_index; u32 obj_index; devlink_rel_notify_cb_t *notify_cb; devlink_rel_cleanup_cb_t *cleanup_cb; struct delayed_work notify_work; } nested_in; }; struct devlink_reload_combination { enum devlink_reload_action action; enum devlink_reload_limit limit; }; struct devlink_resource_size_params { u64 size_min; u64 size_max; u64 size_granularity; enum devlink_resource_unit unit; }; typedef u64 devlink_resource_occ_get_t(void *); struct devlink_resource { const char *name; u64 id; u64 size; u64 size_new; bool size_valid; struct devlink_resource *parent; struct devlink_resource_size_params size_params; struct list_head list; struct list_head resource_list; devlink_resource_occ_get_t *occ_get; void *occ_get_priv; }; struct devlink_sb { struct list_head list; unsigned int index; u32 size; u16 ingress_pools_count; u16 egress_pools_count; u16 ingress_tc_count; u16 egress_tc_count; }; struct devlink_sb_pool_info { enum devlink_sb_pool_type pool_type; u32 size; enum devlink_sb_threshold_type threshold_type; u32 cell_size; }; struct devlink_snapshot { struct list_head list; struct devlink_region *region; u8 *data; u32 id; }; struct devlink_stats { u64_stats_t rx_bytes; u64_stats_t rx_packets; struct u64_stats_sync syncp; }; struct devlink_trap { enum devlink_trap_type type; enum devlink_trap_action init_action; bool generic; u16 id; const char *name; u16 init_group_id; u32 metadata_cap; }; struct devlink_trap_group { const char *name; u16 id; bool generic; u32 init_policer_id; }; struct devlink_trap_policer_item; struct devlink_trap_group_item { const struct devlink_trap_group *group; struct devlink_trap_policer_item *policer_item; struct list_head list; struct devlink_stats *stats; }; struct devlink_trap_item { const struct devlink_trap *trap; struct devlink_trap_group_item *group_item; struct list_head list; enum devlink_trap_action action; struct devlink_stats *stats; void *priv; }; struct flow_action_cookie; struct devlink_trap_metadata { const char *trap_name; const char *trap_group_name; struct net_device *input_dev; netdevice_tracker dev_tracker; const struct flow_action_cookie *fa_cookie; enum devlink_trap_type trap_type; }; struct devlink_trap_policer { u32 id; u64 init_rate; u64 init_burst; u64 max_rate; u64 min_rate; u64 max_burst; u64 min_burst; }; struct devlink_trap_policer_item { const struct devlink_trap_policer *policer; u64 rate; u64 burst; struct list_head list; }; typedef void (*dr_release_t)(struct device *, void *); struct devres_node { struct list_head entry; dr_release_t release; const char *name; size_t size; }; struct devres { struct devres_node node; u8 data[0]; }; struct devres_group { struct devres_node node[2]; void *id; int color; }; struct dictionary { uint8_t *buf; size_t start; size_t pos; size_t full; size_t limit; size_t end; uint32_t size; uint32_t size_max; uint32_t allocated; enum xz_mode mode; }; struct die_args { struct pt_regs *regs; const char *str; long int err; int trapnr; int signr; }; struct dim_stats { int ppms; int bpms; int epms; int cpms; int cpe_ratio; }; struct dim_sample { ktime_t time; u32 pkt_ctr; u32 byte_ctr; u16 event_ctr; u32 comp_ctr; }; struct dim { u8 state; struct dim_stats prev_stats; struct dim_sample start_sample; struct dim_sample measuring_sample; struct work_struct work; void *priv; u8 profile_ix; u8 mode; u8 tune_state; u8 steps_right; u8 steps_left; u8 tired; }; struct dim_cq_moder { u16 usec; u16 pkts; u16 comps; u8 cq_period_mode; struct callback_head rcu; }; struct dim_irq_moder { u8 profile_flags; u8 coal_flags; u8 dim_rx_mode; u8 dim_tx_mode; struct dim_cq_moder *rx_profile; struct dim_cq_moder *tx_profile; void (*rx_dim_work)(struct work_struct *); void (*tx_dim_work)(struct work_struct *); }; struct dir_context; typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); struct dir_context { filldir_t actor; loff_t pos; }; struct dir_entry { struct list_head list; time64_t mtime; char name[0]; }; struct fname; struct dir_private_info { struct rb_root root; struct rb_node *curr_node; struct fname *extra_fname; loff_t last_pos; __u32 curr_hash; __u32 curr_minor_hash; __u32 next_hash; u64 cookie; bool initialized; }; struct wb_domain; struct dirty_throttle_control { struct wb_domain *dom; struct dirty_throttle_control *gdtc; struct bdi_writeback *wb; struct fprop_local_percpu *wb_completions; long unsigned int avail; long unsigned int dirty; long unsigned int thresh; long unsigned int bg_thresh; long unsigned int wb_dirty; long unsigned int wb_thresh; long unsigned int wb_bg_thresh; long unsigned int pos_ratio; bool freerun; bool dirty_exceeded; }; struct disk_events { struct list_head node; struct gendisk *disk; spinlock_t lock; struct mutex block_mutex; int block; unsigned int pending; unsigned int clearing; long int poll_msecs; struct delayed_work dwork; }; struct disk_stats { u64 nsecs[4]; long unsigned int sectors[4]; long unsigned int ios[4]; long unsigned int merges[4]; long unsigned int io_ticks; local_t in_flight[2]; }; struct disklabel { __le32 d_magic; __le16 d_type; __le16 d_subtype; u8 d_typename[16]; u8 d_packname[16]; __le32 d_secsize; __le32 d_nsectors; __le32 d_ntracks; __le32 d_ncylinders; __le32 d_secpercyl; __le32 d_secprtunit; __le16 d_sparespertrack; __le16 d_sparespercyl; __le32 d_acylinders; __le16 d_rpm; __le16 d_interleave; __le16 d_trackskew; __le16 d_cylskew; __le32 d_headswitch; __le32 d_trkseek; __le32 d_flags; __le32 d_drivedata[5]; __le32 d_spare[5]; __le32 d_magic2; __le16 d_checksum; __le16 d_npartitions; __le32 d_bbsize; __le32 d_sbsize; struct d_partition d_partitions[18]; }; struct disklabel___2 { u8 d_reserved[270]; struct d_partition___2 d_partitions[2]; u8 d_blank[208]; __le16 d_magic; } __attribute__((packed)); struct dispatch_rq_data { struct blk_mq_hw_ctx *hctx; struct request *rq; }; struct dl_bw { raw_spinlock_t lock; u64 bw; u64 total_bw; }; struct dl_rq { struct rb_root_cached root; unsigned int dl_nr_running; struct { u64 curr; u64 next; } earliest_dl; bool overloaded; struct rb_root_cached pushable_dl_tasks_root; u64 running_bw; u64 this_bw; u64 extra_bw; u64 max_bw; u64 bw_ratio; }; typedef void (*dma_async_tx_callback)(void *); struct dmaengine_result; typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); struct dma_chan; struct dmaengine_unmap_data; struct dma_descriptor_metadata_ops; struct dma_async_tx_descriptor { dma_cookie_t cookie; enum dma_ctrl_flags flags; dma_addr_t phys; struct dma_chan *chan; dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); int (*desc_free)(struct dma_async_tx_descriptor *); dma_async_tx_callback callback; dma_async_tx_callback_result callback_result; void *callback_param; struct dmaengine_unmap_data *unmap; enum dma_desc_metadata_mode desc_metadata_mode; struct dma_descriptor_metadata_ops *metadata_ops; }; struct dma_block { struct dma_block *next_block; dma_addr_t dma; }; struct iosys_map { union { void *vaddr_iomem; void *vaddr; }; bool is_iomem; }; struct dma_buf_poll_cb_t { struct dma_fence_cb cb; wait_queue_head_t *poll; __poll_t active; }; struct dma_buf_ops; struct dma_resv; struct dma_buf { size_t size; struct file *file; struct list_head attachments; const struct dma_buf_ops *ops; unsigned int vmapping_counter; struct iosys_map vmap_ptr; const char *exp_name; const char *name; spinlock_t name_lock; struct module *owner; struct list_head list_node; void *priv; struct dma_resv *resv; wait_queue_head_t poll; struct dma_buf_poll_cb_t cb_in; struct dma_buf_poll_cb_t cb_out; }; struct dma_buf_attachment; struct dma_buf_attach_ops { bool allow_peer2peer; void (*move_notify)(struct dma_buf_attachment *); }; struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; struct sg_table *sgt; enum dma_data_direction dir; bool peer2peer; const struct dma_buf_attach_ops *importer_ops; void *importer_priv; void *priv; }; struct dma_buf_export_info { const char *exp_name; struct module *owner; const struct dma_buf_ops *ops; size_t size; int flags; struct dma_resv *resv; void *priv; }; struct dma_buf_export_sync_file { __u32 flags; __s32 fd; }; struct dma_buf_import_sync_file { __u32 flags; __s32 fd; }; struct dma_buf_ops { bool cache_sgt_mapping; int (*attach)(struct dma_buf *, struct dma_buf_attachment *); void (*detach)(struct dma_buf *, struct dma_buf_attachment *); int (*pin)(struct dma_buf_attachment *); void (*unpin)(struct dma_buf_attachment *); struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); void (*release)(struct dma_buf *); int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); int (*mmap)(struct dma_buf *, struct vm_area_struct *); int (*vmap)(struct dma_buf *, struct iosys_map *); void (*vunmap)(struct dma_buf *, struct iosys_map *); }; struct dma_buf_sync { __u64 flags; }; struct dma_device; struct dma_chan_dev; struct dma_chan_percpu; struct dma_router; struct dma_chan { struct dma_device *device; struct device *slave; dma_cookie_t cookie; dma_cookie_t completed_cookie; int chan_id; struct dma_chan_dev *dev; const char *name; char *dbg_client_name; struct list_head device_node; struct dma_chan_percpu *local; int client_count; int table_count; struct dma_router *router; void *route_data; void *private; }; struct dma_chan___2 { int lock; const char *device_id; }; struct dma_chan_dev { struct dma_chan *chan; struct device device; int dev_id; bool chan_dma_dev; }; struct dma_chan_percpu { long unsigned int memcpy_count; long unsigned int bytes_transferred; }; struct dma_descriptor_metadata_ops { int (*attach)(struct dma_async_tx_descriptor *, void *, size_t); void * (*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *); int (*set_len)(struct dma_async_tx_descriptor *, size_t); }; typedef bool (*dma_filter_fn)(struct dma_chan *, void *); struct dma_slave_map; struct dma_filter { dma_filter_fn fn; int mapcnt; const struct dma_slave_map *map; }; struct dma_vec; struct dma_interleaved_template; struct dma_slave_caps; struct dma_slave_config; struct dma_tx_state; struct dma_device { struct kref ref; unsigned int chancnt; unsigned int privatecnt; struct list_head channels; struct list_head global_node; struct dma_filter filter; dma_cap_mask_t cap_mask; enum dma_desc_metadata_mode desc_metadata_modes; short unsigned int max_xor; short unsigned int max_pq; enum dmaengine_alignment copy_align; enum dmaengine_alignment xor_align; enum dmaengine_alignment pq_align; enum dmaengine_alignment fill_align; int dev_id; struct device *dev; struct module *owner; struct ida chan_ida; u32 src_addr_widths; u32 dst_addr_widths; u32 directions; u32 min_burst; u32 max_burst; u32 max_sg_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *); int (*device_router_config)(struct dma_chan *); void (*device_free_chan_resources)(struct dma_chan *); struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan *, dma_addr_t, dma_addr_t, size_t, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan *, dma_addr_t, int, size_t, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan *, struct scatterlist *, unsigned int, int, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan *, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_peripheral_dma_vec)(struct dma_chan *, const struct dma_vec *, size_t, enum dma_transfer_direction, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *); struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan *, struct dma_interleaved_template *, long unsigned int); struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan *, dma_addr_t, u64, long unsigned int); void (*device_caps)(struct dma_chan *, struct dma_slave_caps *); int (*device_config)(struct dma_chan *, struct dma_slave_config *); int (*device_pause)(struct dma_chan *); int (*device_resume)(struct dma_chan *); int (*device_terminate_all)(struct dma_chan *); void (*device_synchronize)(struct dma_chan *); enum dma_status (*device_tx_status)(struct dma_chan *, dma_cookie_t, struct dma_tx_state *); void (*device_issue_pending)(struct dma_chan *); void (*device_release)(struct dma_device *); void (*dbg_summary_show)(struct seq_file *, struct dma_device *); struct dentry *dbg_dev_root; }; struct dma_devres { size_t size; void *vaddr; dma_addr_t dma_handle; long unsigned int attrs; }; struct dma_fence_ops; struct dma_fence { spinlock_t *lock; const struct dma_fence_ops *ops; union { struct list_head cb_list; ktime_t timestamp; struct callback_head rcu; }; u64 context; u64 seqno; long unsigned int flags; struct kref refcount; int error; }; struct dma_fence_array; struct dma_fence_array_cb { struct dma_fence_cb cb; struct dma_fence_array *array; }; struct dma_fence_array { struct dma_fence base; spinlock_t lock; unsigned int num_fences; atomic_t num_pending; struct dma_fence **fences; struct irq_work work; struct dma_fence_array_cb callbacks[0]; }; struct dma_fence_chain { struct dma_fence base; struct dma_fence *prev; u64 prev_seqno; struct dma_fence *fence; union { struct dma_fence_cb cb; struct irq_work work; }; spinlock_t lock; }; struct dma_fence_ops { bool use_64bit_seqno; const char * (*get_driver_name)(struct dma_fence *); const char * (*get_timeline_name)(struct dma_fence *); bool (*enable_signaling)(struct dma_fence *); bool (*signaled)(struct dma_fence *); long int (*wait)(struct dma_fence *, bool, long int); void (*release)(struct dma_fence *); void (*fence_value_str)(struct dma_fence *, char *, int); void (*timeline_value_str)(struct dma_fence *, char *, int); void (*set_deadline)(struct dma_fence *, ktime_t); }; struct dma_fence_unwrap { struct dma_fence *chain; struct dma_fence *array; unsigned int index; }; struct dma_interleaved_template { dma_addr_t src_start; dma_addr_t dst_start; enum dma_transfer_direction dir; bool src_inc; bool dst_inc; bool src_sgl; bool dst_sgl; size_t numf; size_t frame_size; struct data_chunk sgl[0]; }; struct dma_map_ops { void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int); void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int); struct page * (*alloc_pages_op)(struct device *, size_t, dma_addr_t *, enum dma_data_direction, gfp_t); void (*free_pages)(struct device *, size_t, struct page *, dma_addr_t, enum dma_data_direction); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int); dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int); void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int); void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction); void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction); void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); int (*dma_supported)(struct device *, u64); u64 (*get_required_mask)(struct device *); size_t (*max_mapping_size)(struct device *); size_t (*opt_mapping_size)(void); long unsigned int (*get_merge_boundary)(struct device *); }; struct dma_page { struct list_head page_list; void *vaddr; dma_addr_t dma; }; struct dma_pool { struct list_head page_list; spinlock_t lock; struct dma_block *next_block; size_t nr_blocks; size_t nr_active; size_t nr_pages; struct device *dev; unsigned int size; unsigned int allocation; unsigned int boundary; char name[32]; struct list_head pools; }; struct ww_acquire_ctx; struct ww_class; struct ww_mutex { struct mutex base; struct ww_acquire_ctx *ctx; struct ww_class *ww_class; }; struct dma_resv_list; struct dma_resv { struct ww_mutex lock; struct dma_resv_list *fences; }; struct dma_resv_iter { struct dma_resv *obj; enum dma_resv_usage usage; struct dma_fence *fence; enum dma_resv_usage fence_usage; unsigned int index; struct dma_resv_list *fences; unsigned int num_fences; bool is_restarted; }; struct dma_resv_list { struct callback_head rcu; u32 num_fences; u32 max_fences; struct dma_fence *table[0]; }; struct dma_router { struct device *dev; void (*route_free)(struct device *, void *); }; struct dma_sgt_handle { struct sg_table sgt; struct page **pages; }; struct dma_slave_caps { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; u32 min_burst; u32 max_burst; u32 max_sg_burst; bool cmd_pause; bool cmd_resume; bool cmd_terminate; enum dma_residue_granularity residue_granularity; bool descriptor_reuse; }; struct dma_slave_config { enum dma_transfer_direction direction; phys_addr_t src_addr; phys_addr_t dst_addr; enum dma_slave_buswidth src_addr_width; enum dma_slave_buswidth dst_addr_width; u32 src_maxburst; u32 dst_maxburst; u32 src_port_window_size; u32 dst_port_window_size; bool device_fc; void *peripheral_config; size_t peripheral_size; }; struct dma_slave_map { const char *devname; const char *slave; void *param; }; struct dma_tx_state { dma_cookie_t last; dma_cookie_t used; u32 residue; u32 in_flight_bytes; }; struct dma_vec { dma_addr_t addr; size_t len; }; struct dmabuf_cmsg { __u64 frag_offset; __u32 frag_size; __u32 frag_token; __u32 dmabuf_id; __u32 flags; }; struct net_iov; struct net_devmem_dmabuf_binding; struct dmabuf_genpool_chunk_owner { long unsigned int base_virtual; dma_addr_t base_dma_addr; struct net_iov *niovs; size_t num_niovs; struct net_devmem_dmabuf_binding *binding; }; struct dmabuf_token { __u32 token_start; __u32 token_count; }; struct dmaengine_result { enum dmaengine_tx_result result; u32 residue; }; struct dmaengine_unmap_data { u8 map_cnt; u8 to_cnt; u8 from_cnt; u8 bidi_cnt; struct device *dev; struct kref kref; size_t len; dma_addr_t addr[0]; }; struct dmi_device { struct list_head list; int type; const char *name; void *device_data; }; struct dmi_dev_onboard { struct dmi_device dev; int instance; int segment; int bus; int devfn; }; struct dmi_device_attribute { struct device_attribute dev_attr; int field; }; struct dmi_header { u8 type; u8 length; u16 handle; }; struct dmi_memdev_info { const char *device; const char *bank; u64 size; u16 handle; u8 type; }; struct dmi_strmatch { unsigned char slot: 7; unsigned char exact_match: 1; char substr[79]; }; struct dmi_system_id { int (*callback)(const struct dmi_system_id *); const char *ident; struct dmi_strmatch matches[4]; void *driver_data; }; struct fb_videomode; struct dmt_videomode { u32 dmt_id; u32 std_2byte_code; u32 cvt_3byte_code; const struct fb_videomode *mode; }; struct dnotify_struct; struct dnotify_mark { struct fsnotify_mark fsn_mark; struct dnotify_struct *dn; }; typedef void *fl_owner_t; struct dnotify_struct { struct dnotify_struct *dn_next; __u32 dn_mask; int dn_fd; struct file *dn_filp; fl_owner_t dn_owner; }; struct dns_payload_header { __u8 zero; __u8 content; __u8 version; }; struct dns_server_list_v1_header { struct dns_payload_header hdr; __u8 source; __u8 status; __u8 nr_servers; }; struct do_proc_dointvec_minmax_conv_param { int *min; int *max; }; struct do_proc_douintvec_minmax_conv_param { unsigned int *min; unsigned int *max; }; struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; short unsigned int stall_thrs; long unsigned int history_head; long unsigned int history[4]; long: 64; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; long unsigned int slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; short unsigned int stall_max; long unsigned int last_reap; long unsigned int stall_cnt; }; struct kqid { union { kuid_t uid; kgid_t gid; kprojid_t projid; }; enum quota_type type; }; struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; }; struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; spinlock_t dq_dqb_lock; atomic_t dq_count; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; long unsigned int dq_flags; struct mem_dqblk dq_dqb; }; struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_inode_usage)(struct inode *, qsize_t *); int (*get_next_id)(struct super_block *, struct kqid *); }; struct drbg_core { drbg_flag_t flags; __u8 statelen; __u8 blocklen_bytes; char cra_name[128]; char backend_cra_name[128]; }; struct drbg_string { const unsigned char *buf; size_t len; struct list_head list; }; struct drbg_state_ops; struct drbg_state { struct mutex drbg_mutex; unsigned char *V; unsigned char *Vbuf; unsigned char *C; unsigned char *Cbuf; size_t reseed_ctr; size_t reseed_threshold; unsigned char *scratchpad; unsigned char *scratchpadbuf; void *priv_data; struct crypto_skcipher *ctr_handle; struct skcipher_request *ctr_req; __u8 *outscratchpadbuf; __u8 *outscratchpad; struct crypto_wait ctr_wait; struct scatterlist sg_in; struct scatterlist sg_out; enum drbg_seed_state seeded; long unsigned int last_seed_time; bool pr; bool fips_primed; unsigned char *prev; struct crypto_rng *jent; const struct drbg_state_ops *d_ops; const struct drbg_core *core; struct drbg_string test_data; }; struct drbg_state_ops { int (*update)(struct drbg_state *, struct list_head *, int); int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); int (*crypto_init)(struct drbg_state *); int (*crypto_fini)(struct drbg_state *); }; struct driver_attribute { struct attribute attr; ssize_t (*show)(struct device_driver *, char *); ssize_t (*store)(struct device_driver *, const char *, size_t); }; struct module_kobject; struct driver_private { struct kobject kobj; struct klist klist_devices; struct klist_node knode_bus; struct module_kobject *mkobj; struct device_driver *driver; }; struct drop_reason_list { const char * const *reasons; size_t n_reasons; }; struct drv_cmd { struct acpi_pct_register *reg; u32 val; union { void (*write)(struct acpi_pct_register *, u32); u32 (*read)(struct acpi_pct_register *); } func; }; struct pci_driver; struct pci_device_id; struct drv_dev_and_id { struct pci_driver *drv; struct pci_dev *dev; const struct pci_device_id *id; }; struct dst_cache_pcpu; struct dst_cache { struct dst_cache_pcpu *cache; long unsigned int reset_ts; }; struct in_addr { __be32 s_addr; }; struct dst_cache_pcpu { long unsigned int refresh_ts; struct dst_entry *dst; u32 cookie; union { struct in_addr in_saddr; struct in6_addr in6_saddr; }; }; struct dst_ops; struct xfrm_state; struct uncached_list; struct lwtunnel_state; struct dst_entry { struct net_device *dev; struct dst_ops *ops; long unsigned int _metrics; long unsigned int expires; struct xfrm_state *xfrm; int (*input)(struct sk_buff *); int (*output)(struct net *, struct sock *, struct sk_buff *); short unsigned int flags; short int obsolete; short unsigned int header_len; short unsigned int trailer_len; rcuref_t __rcuref; int __use; long unsigned int lastuse; struct callback_head callback_head; short int error; short int __pad; __u32 tclassid; netdevice_tracker dev_tracker; struct list_head rt_uncached; struct uncached_list *rt_uncached_list; struct lwtunnel_state *lwtstate; }; struct dst_metrics { u32 metrics[17]; refcount_t refcnt; }; struct neighbour; struct dst_ops { short unsigned int family; unsigned int gc_thresh; void (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *); void (*negative_advice)(struct sock *, struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); void (*confirm_neigh)(const struct dst_entry *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; long: 64; long: 64; long: 64; long: 64; }; struct uart_8250_port; struct uart_8250_dma { int (*tx_dma)(struct uart_8250_port *); int (*rx_dma)(struct uart_8250_port *); void (*prepare_tx_dma)(struct uart_8250_port *); void (*prepare_rx_dma)(struct uart_8250_port *); dma_filter_fn fn; void *rx_param; void *tx_param; struct dma_slave_config rxconf; struct dma_slave_config txconf; struct dma_chan *rxchan; struct dma_chan *txchan; phys_addr_t rx_dma_addr; phys_addr_t tx_dma_addr; dma_addr_t rx_addr; dma_addr_t tx_addr; dma_cookie_t rx_cookie; dma_cookie_t tx_cookie; void *rx_buf; size_t rx_size; size_t tx_size; unsigned char tx_running; unsigned char tx_err; unsigned char rx_running; }; struct dw8250_port_data { int line; struct uart_8250_dma dma; u32 cpr_value; u8 dlf_size; bool hw_rs485_support; }; struct dw_dma; struct dw_dma_platform_data; struct dw_dma_chip { struct device *dev; int id; int irq; void *regs; struct clk *clk; struct dw_dma *dw; const struct dw_dma_platform_data *pdata; }; struct dw_dma_platform_data { u32 nr_masters; u32 nr_channels; u32 chan_allocation_order; u32 chan_priority; u32 block_size; u32 data_width[4]; u32 multi_block[8]; u32 max_burst[8]; u32 protctl; u32 quirks; }; struct dw_dma_slave { struct device *dma_dev; u8 src_id; u8 dst_id; u8 m_master; u8 p_master; u8 channels; bool hs_polarity; }; struct dx_countlimit { __le16 limit; __le16 count; }; struct dx_entry { __le32 hash; __le32 block; }; struct dx_frame { struct buffer_head *bh; struct dx_entry *entries; struct dx_entry *at; }; struct dx_hash_info { u32 hash; u32 minor_hash; int hash_version; u32 *seed; }; struct dx_map_entry { u32 hash; u16 offs; u16 size; }; struct fake_dirent { __le32 inode; __le16 rec_len; u8 name_len; u8 file_type; }; struct dx_node { struct fake_dirent fake; struct dx_entry entries[0]; }; struct dx_root_info { __le32 reserved_zero; u8 hash_version; u8 info_length; u8 indirect_levels; u8 unused_flags; }; struct dx_root { struct fake_dirent dot; char dot_name[4]; struct fake_dirent dotdot; char dotdot_name[4]; struct dx_root_info info; struct dx_entry entries[0]; }; struct dx_tail { u32 dt_reserved; __le32 dt_checksum; }; struct dyn_arch_ftrace {}; struct dyn_event_operations; struct dyn_event { struct list_head list; struct dyn_event_operations *ops; }; struct dyn_event_operations { struct list_head list; int (*create)(const char *); int (*show)(struct seq_file *, struct dyn_event *); bool (*is_busy)(struct dyn_event *); int (*free)(struct dyn_event *); bool (*match)(const char *, const char *, int, const char **, struct dyn_event *); }; struct dyn_ftrace { long unsigned int ip; long unsigned int flags; struct dyn_arch_ftrace arch; }; struct dynevent_arg { const char *str; char separator; }; struct dynevent_arg_pair { const char *lhs; const char *rhs; char operator; char separator; }; struct seq_buf { char *buffer; size_t size; size_t len; }; struct dynevent_cmd; typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *); struct dynevent_cmd { struct seq_buf seq; const char *event_name; unsigned int n_fields; enum dynevent_type type; dynevent_create_fn_t run_command; void *private_data; }; struct e820_entry { u64 addr; u64 size; enum e820_type type; } __attribute__((packed)); struct e820_table { __u32 nr_entries; struct e820_entry entries[320]; }; struct early_boot_kfree_rcu { struct callback_head rh; }; struct early_load_data { u32 old_rev; u32 new_rev; }; struct uart_icount { __u32 cts; __u32 dsr; __u32 rng; __u32 dcd; __u32 rx; __u32 tx; __u32 frame; __u32 overrun; __u32 parity; __u32 brk; __u32 buf_overrun; }; struct serial_rs485 { __u32 flags; __u32 delay_rts_before_send; __u32 delay_rts_after_send; union { __u32 padding[5]; struct { __u8 addr_recv; __u8 addr_dest; __u8 padding0[2]; __u32 padding1[4]; }; }; }; struct gpio_desc; struct serial_iso7816 { __u32 flags; __u32 tg; __u32 sc_fi; __u32 sc_di; __u32 clk; __u32 reserved[5]; }; struct ktermios; struct uart_state; struct uart_ops; struct serial_port_device; struct uart_port { spinlock_t lock; long unsigned int iobase; unsigned char *membase; unsigned int (*serial_in)(struct uart_port *, int); void (*serial_out)(struct uart_port *, int, int); void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); void (*set_ldisc)(struct uart_port *, struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *); void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int); int (*startup)(struct uart_port *); void (*shutdown)(struct uart_port *); void (*throttle)(struct uart_port *); void (*unthrottle)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int, unsigned int); void (*handle_break)(struct uart_port *); int (*rs485_config)(struct uart_port *, struct ktermios *, struct serial_rs485 *); int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *); unsigned int ctrl_id; unsigned int port_id; unsigned int irq; long unsigned int irqflags; unsigned int uartclk; unsigned int fifosize; unsigned char x_char; unsigned char regshift; unsigned char iotype; unsigned char quirks; unsigned int read_status_mask; unsigned int ignore_status_mask; struct uart_state *state; struct uart_icount icount; struct console *cons; upf_t flags; upstat_t status; bool hw_stopped; unsigned int mctrl; unsigned int frame_time; unsigned int type; const struct uart_ops *ops; unsigned int custom_divisor; unsigned int line; unsigned int minor; resource_size_t mapbase; resource_size_t mapsize; struct device *dev; struct serial_port_device *port_dev; long unsigned int sysrq; u8 sysrq_ch; unsigned char has_sysrq; unsigned char sysrq_seq; unsigned char hub6; unsigned char suspended; unsigned char console_reinit; const char *name; struct attribute_group *attr_group; const struct attribute_group **tty_groups; struct serial_rs485 rs485; struct serial_rs485 rs485_supported; struct gpio_desc *rs485_term_gpio; struct gpio_desc *rs485_rx_during_tx_gpio; struct serial_iso7816 iso7816; void *private_data; }; struct earlycon_device { struct console *con; struct uart_port port; char options[32]; unsigned int baud; }; struct earlycon_id { char name[15]; char name_term; char compatible[128]; int (*setup)(struct earlycon_device *, const char *); }; struct ebitmap_node { struct ebitmap_node *next; long unsigned int maps[6]; u32 startbit; }; struct ecc_point { u64 *x; u64 *y; u8 ndigits; }; struct ecc_curve { char *name; u32 nbits; struct ecc_point g; u64 *p; u64 *n; u64 *a; u64 *b; }; struct ecdh { char *key; short unsigned int key_size; }; struct ecdh_ctx { unsigned int curve_id; unsigned int ndigits; u64 private_key[9]; }; struct eee_config { u32 tx_lpi_timer; bool tx_lpi_enabled; bool eee_enabled; }; struct ethtool_keee { long unsigned int supported[2]; long unsigned int advertised[2]; long unsigned int lp_advertised[2]; u32 tx_lpi_timer; bool tx_lpi_enabled; bool eee_active; bool eee_enabled; }; struct eee_reply_data { struct ethnl_reply_data base; struct ethtool_keee eee; }; struct eeprom_reply_data { struct ethnl_reply_data base; u32 length; u8 *data; }; struct ethnl_req_info { struct net_device *dev; netdevice_tracker dev_tracker; u32 flags; u32 phy_index; }; struct eeprom_req_info { struct ethnl_req_info base; u32 offset; u32 length; u8 page; u8 bank; u8 i2c_address; }; typedef efi_status_t efi_get_time_t(efi_time_t *, efi_time_cap_t *); typedef efi_status_t efi_set_time_t(efi_time_t *); typedef efi_status_t efi_get_wakeup_time_t(efi_bool_t *, efi_bool_t *, efi_time_t *); typedef efi_status_t efi_set_wakeup_time_t(efi_bool_t, efi_time_t *); typedef efi_status_t efi_get_variable_t(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *); typedef efi_status_t efi_get_next_variable_t(long unsigned int *, efi_char16_t *, efi_guid_t *); typedef efi_status_t efi_set_variable_t(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *); typedef efi_status_t efi_query_variable_info_t(u32, u64 *, u64 *, u64 *); typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **, long unsigned int, long unsigned int); typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **, long unsigned int, u64 *, int *); typedef efi_status_t efi_get_next_high_mono_count_t(u32 *); typedef void efi_reset_system_t(int, efi_status_t, long unsigned int, efi_char16_t *); struct efi_memory_map { phys_addr_t phys_map; void *map; void *map_end; int nr_map; long unsigned int desc_version; long unsigned int desc_size; long unsigned int flags; }; struct efi { const efi_runtime_services_t *runtime; unsigned int runtime_version; unsigned int runtime_supported_mask; long unsigned int acpi; long unsigned int acpi20; long unsigned int smbios; long unsigned int smbios3; long unsigned int esrt; long unsigned int tpm_log; long unsigned int tpm_final_log; long unsigned int mokvar_table; long unsigned int coco_secret; long unsigned int unaccepted; efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; efi_set_wakeup_time_t *set_wakeup_time; efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; efi_set_variable_t *set_variable_nonblocking; efi_query_variable_info_t *query_variable_info; efi_query_variable_info_t *query_variable_info_nonblocking; efi_update_capsule_t *update_capsule; efi_query_capsule_caps_t *query_capsule_caps; efi_get_next_high_mono_count_t *get_next_high_mono_count; efi_reset_system_t *reset_system; struct efi_memory_map memmap; long unsigned int flags; }; struct efi_mem_range { struct range range; u64 attribute; }; struct efi_memory_map_data { phys_addr_t phys_map; long unsigned int size; long unsigned int desc_version; long unsigned int desc_size; long unsigned int flags; }; union efi_rts_args { struct { efi_time_t *time; efi_time_cap_t *capabilities; } GET_TIME; struct { efi_time_t *time; } SET_TIME; struct { efi_bool_t *enabled; efi_bool_t *pending; efi_time_t *time; } GET_WAKEUP_TIME; struct { efi_bool_t enable; efi_time_t *time; } SET_WAKEUP_TIME; struct { efi_char16_t *name; efi_guid_t *vendor; u32 *attr; long unsigned int *data_size; void *data; } GET_VARIABLE; struct { long unsigned int *name_size; efi_char16_t *name; efi_guid_t *vendor; } GET_NEXT_VARIABLE; struct { efi_char16_t *name; efi_guid_t *vendor; u32 attr; long unsigned int data_size; void *data; } SET_VARIABLE; struct { u32 attr; u64 *storage_space; u64 *remaining_space; u64 *max_variable_size; } QUERY_VARIABLE_INFO; struct { u32 *high_count; } GET_NEXT_HIGH_MONO_COUNT; struct { efi_capsule_header_t **capsules; long unsigned int count; long unsigned int sg_list; } UPDATE_CAPSULE; struct { efi_capsule_header_t **capsules; long unsigned int count; u64 *max_size; int *reset_type; } QUERY_CAPSULE_CAPS; struct { efi_status_t (*acpi_prm_handler)(u64, void *); u64 param_buffer_addr; void *context; } ACPI_PRM_HANDLER; }; struct efi_runtime_map_entry { efi_memory_desc_t md; struct kobject kobj; }; struct efi_runtime_work { union efi_rts_args *args; efi_status_t status; struct work_struct work; enum efi_rts_ids efi_rts_id; struct completion efi_rts_comp; const void *caller; }; struct efi_setup_data { u64 fw_vendor; u64 __unused; u64 tables; u64 smbios; u64 reserved[8]; }; struct efi_system_resource_entry_v1 { efi_guid_t fw_class; u32 fw_type; u32 fw_version; u32 lowest_supported_fw_version; u32 capsule_flags; u32 last_attempt_version; u32 last_attempt_status; }; struct efi_system_resource_table { u32 fw_resource_count; u32 fw_resource_count_max; u64 fw_resource_version; u8 entries[0]; }; struct efi_tcg2_final_events_table { u64 version; u64 nr_events; u8 events[0]; }; struct efi_unaccepted_memory { u32 version; u32 unit_size; u64 phys_base; u64 size; long unsigned int bitmap[0]; }; struct efifb_dmi_info { char *optname; long unsigned int base; int stride; int width; int height; int flags; }; typedef efi_status_t efi_query_variable_store_t(u32, long unsigned int, bool); struct efivar_operations { efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; efi_set_variable_t *set_variable_nonblocking; efi_query_variable_store_t *query_variable_store; efi_query_variable_info_t *query_variable_info; }; struct efivars { struct kset *kset; const struct efivar_operations *ops; }; struct ei_entry { struct list_head list; long unsigned int start_addr; long unsigned int end_addr; int etype; void *priv; }; struct elevator_queue; struct io_cq; struct elevator_mq_ops { int (*init_sched)(struct request_queue *, struct elevator_type *); void (*exit_sched)(struct elevator_queue *); int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); void (*depth_updated)(struct blk_mq_hw_ctx *); bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); int (*request_merge)(struct request_queue *, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *); void (*prepare_request)(struct request *); void (*finish_request)(struct request *); void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, blk_insert_t); struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); bool (*has_work)(struct blk_mq_hw_ctx *); void (*completed_request)(struct request *, u64); void (*requeue_request)(struct request *); struct request * (*former_request)(struct request_queue *, struct request *); struct request * (*next_request)(struct request_queue *, struct request *); void (*init_icq)(struct io_cq *); void (*exit_icq)(struct io_cq *); }; struct elevator_queue { struct elevator_type *type; void *elevator_data; struct kobject kobj; struct mutex sysfs_lock; long unsigned int flags; struct hlist_head hash[64]; }; struct elv_fs_entry; struct elevator_type { struct kmem_cache *icq_cache; struct elevator_mq_ops ops; size_t icq_size; size_t icq_align; struct elv_fs_entry *elevator_attrs; const char *elevator_name; const char *elevator_alias; struct module *elevator_owner; const struct blk_mq_debugfs_attr *queue_debugfs_attrs; const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; char icq_cache_name[22]; struct list_head list; }; struct elf32_hdr { unsigned char e_ident[16]; Elf32_Half e_type; Elf32_Half e_machine; Elf32_Word e_version; Elf32_Addr e_entry; Elf32_Off e_phoff; Elf32_Off e_shoff; Elf32_Word e_flags; Elf32_Half e_ehsize; Elf32_Half e_phentsize; Elf32_Half e_phnum; Elf32_Half e_shentsize; Elf32_Half e_shnum; Elf32_Half e_shstrndx; }; typedef struct elf32_hdr Elf32_Ehdr; struct elf32_note { Elf32_Word n_namesz; Elf32_Word n_descsz; Elf32_Word n_type; }; typedef struct elf32_note Elf32_Nhdr; struct elf32_phdr { Elf32_Word p_type; Elf32_Off p_offset; Elf32_Addr p_vaddr; Elf32_Addr p_paddr; Elf32_Word p_filesz; Elf32_Word p_memsz; Elf32_Word p_flags; Elf32_Word p_align; }; typedef struct elf32_phdr Elf32_Phdr; struct elf64_hdr { unsigned char e_ident[16]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; }; typedef struct elf64_hdr Elf64_Ehdr; struct elf64_note { Elf64_Word n_namesz; Elf64_Word n_descsz; Elf64_Word n_type; }; typedef struct elf64_note Elf64_Nhdr; struct elf64_phdr { Elf64_Word p_type; Elf64_Word p_flags; Elf64_Off p_offset; Elf64_Addr p_vaddr; Elf64_Addr p_paddr; Elf64_Xword p_filesz; Elf64_Xword p_memsz; Elf64_Xword p_align; }; typedef struct elf64_phdr Elf64_Phdr; struct elf64_rela { Elf64_Addr r_offset; Elf64_Xword r_info; Elf64_Sxword r_addend; }; typedef struct elf64_rela Elf64_Rela; struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; }; typedef struct elf64_shdr Elf64_Shdr; struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; }; typedef struct elf64_sym Elf64_Sym; struct memelfnote { const char *name; int type; unsigned int datasz; void *data; }; struct siginfo { union { struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; }; int _si_pad[32]; }; }; typedef struct siginfo siginfo_t; struct elf_thread_core_info; struct elf_note_info { struct elf_thread_core_info *thread; struct memelfnote psinfo; struct memelfnote signote; struct memelfnote auxv; struct memelfnote files; siginfo_t csigdata; size_t size; int thread_notes; }; struct elf_prpsinfo { char pr_state; char pr_sname; char pr_zomb; char pr_nice; long unsigned int pr_flag; __kernel_uid_t pr_uid; __kernel_gid_t pr_gid; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; char pr_fname[16]; char pr_psargs[80]; }; struct elf_siginfo { int si_signo; int si_code; int si_errno; }; struct elf_prstatus_common { struct elf_siginfo pr_info; short int pr_cursig; long unsigned int pr_sigpend; long unsigned int pr_sighold; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct __kernel_old_timeval pr_utime; struct __kernel_old_timeval pr_stime; struct __kernel_old_timeval pr_cutime; struct __kernel_old_timeval pr_cstime; }; struct elf_prstatus { struct elf_prstatus_common common; elf_gregset_t pr_reg; int pr_fpvalid; }; struct elf_thread_core_info { struct elf_thread_core_info *next; struct task_struct *task; struct elf_prstatus prstatus; struct memelfnote notes[0]; }; struct elv_fs_entry { struct attribute attr; ssize_t (*show)(struct elevator_queue *, char *); ssize_t (*store)(struct elevator_queue *, const char *, size_t); }; struct em_perf_table; struct em_perf_domain { struct em_perf_table *em_table; int nr_perf_states; long unsigned int flags; long unsigned int cpus[0]; }; struct em_perf_state { long unsigned int performance; long unsigned int frequency; long unsigned int power; long unsigned int cost; long unsigned int flags; }; struct em_perf_table { struct callback_head rcu; struct kref kref; struct em_perf_state state[0]; }; struct trace_event_file; struct enable_trigger_data { struct trace_event_file *file; bool enable; bool hist; }; struct energy_env { long unsigned int task_busy_time; long unsigned int pd_busy_time; long unsigned int cpu_cap; long unsigned int pd_cap; }; struct entropy_timer_state { long unsigned int entropy; struct timer_list timer; atomic_t samples; unsigned int samples_per_bit; }; typedef struct poll_table_struct poll_table; struct epitem; struct ep_pqueue { poll_table pt; struct epitem *epi; }; struct epoll_filefd { struct file *file; int fd; } __attribute__((packed)); struct epoll_event { __poll_t events; __u64 data; } __attribute__((packed)); struct eppoll_entry; struct eventpoll; struct epitem { union { struct rb_node rbn; struct callback_head rcu; }; struct list_head rdllink; struct epitem *next; struct epoll_filefd ffd; bool dying; struct eppoll_entry *pwqlist; struct eventpoll *ep; struct hlist_node fllink; struct wakeup_source *ws; struct epoll_event event; }; struct epitems_head { struct hlist_head epitems; struct epitems_head *next; }; struct epoll_params { __u32 busy_poll_usecs; __u16 busy_poll_budget; __u8 prefer_busy_poll; __u8 __pad; }; struct eppoll_entry { struct eppoll_entry *next; struct epitem *base; wait_queue_entry_t wait; wait_queue_head_t *whead; }; struct trace_eprobe; struct eprobe_data { struct trace_event_file *file; struct trace_eprobe *ep; }; struct eprobe_trace_entry_head { struct trace_entry ent; }; struct equiv_cpu_entry { u32 installed_cpu; u32 fixed_errata_mask; u32 fixed_errata_compare; u16 equiv_cpu; u16 res; }; struct equiv_cpu_table { unsigned int num_entries; struct equiv_cpu_entry *entry; }; struct er_account { raw_spinlock_t lock; u64 config; u64 reg; atomic_t ref; }; struct err_info { const char **errs; u8 type; u16 pos; u64 ts; }; struct error_injection_entry { long unsigned int addr; int etype; }; struct erspan_base_hdr { __u8 vlan_upper: 4; __u8 ver: 4; __u8 vlan: 8; __u8 session_id_upper: 2; __u8 t: 1; __u8 en: 2; __u8 cos: 3; __u8 session_id: 8; }; struct erspan_md2 { __be32 timestamp; __be16 sgt; __u8 hwid_upper: 2; __u8 ft: 5; __u8 p: 1; __u8 o: 1; __u8 gra: 2; __u8 dir: 1; __u8 hwid: 4; }; struct erspan_metadata { int version; union { __be32 index; struct erspan_md2 md2; } u; }; struct ip_esp_hdr; struct esp_info { struct ip_esp_hdr *esph; __be64 seqno; int tfclen; int tailen; int plen; int clen; int len; int nfrags; __u8 proto; bool inplace; }; struct esp_output_extra { __be32 seqhi; u32 esphoff; }; struct ip_options { __be32 faddr; __be32 nexthop; unsigned char optlen; unsigned char srr; unsigned char rr; unsigned char ts; unsigned char is_strictroute: 1; unsigned char srr_is_hit: 1; unsigned char is_changed: 1; unsigned char rr_needaddr: 1; unsigned char ts_needtime: 1; unsigned char ts_needaddr: 1; unsigned char router_alert; unsigned char cipso; unsigned char __pad2; unsigned char __data[0]; }; struct inet_skb_parm { int iif; struct ip_options opt; u16 flags; u16 frag_max_size; }; struct inet6_skb_parm { int iif; __be16 ra; __u16 dst0; __u16 srcrt; __u16 dst1; __u16 lastopt; __u16 nhoff; __u16 flags; __u16 dsthao; __u16 frag_max_size; __u16 srhoff; }; struct ip_tunnel; struct ip6_tnl; struct xfrm_tunnel_skb_cb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; union { struct ip_tunnel *ip4; struct ip6_tnl *ip6; } tunnel; }; struct xfrm_skb_cb { struct xfrm_tunnel_skb_cb header; union { struct { __u32 low; __u32 hi; } output; struct { __be32 low; __be32 hi; } input; } seq; }; struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; struct esre_entry; struct esre_attribute { struct attribute attr; ssize_t (*show)(struct esre_entry *, char *); ssize_t (*store)(struct esre_entry *, const char *, size_t); }; struct esre_entry { union { struct efi_system_resource_entry_v1 *esre1; } esre; struct kobject kobj; struct list_head list; }; struct estack_pages { u32 offs; u16 size; u16 type; }; struct ethnl_request_ops; struct ethnl_dump_ctx { const struct ethnl_request_ops *ops; struct ethnl_req_info *req_info; struct ethnl_reply_data *reply_data; long unsigned int pos_ifindex; }; struct ethnl_module_fw_flash_ntf_params { u32 portid; u32 seq; bool closed_sock; }; struct phy_req_info; struct ethnl_phy_dump_ctx { struct phy_req_info *phy_req_info; long unsigned int ifindex; long unsigned int phy_index; }; struct ethnl_request_ops { u8 request_cmd; u8 reply_cmd; u16 hdr_attr; unsigned int req_info_size; unsigned int reply_data_size; bool allow_nodev_do; u8 set_ntf_cmd; int (*parse_request)(struct ethnl_req_info *, struct nlattr **, struct netlink_ext_ack *); int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, const struct genl_info *); int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *); int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, const struct ethnl_reply_data *); void (*cleanup_data)(struct ethnl_reply_data *); int (*set_validate)(struct ethnl_req_info *, struct genl_info *); int (*set)(struct ethnl_req_info *, struct genl_info *); }; struct ethnl_sock_priv { struct net_device *dev; u32 portid; enum ethnl_sock_type type; }; struct ethnl_tunnel_info_dump_ctx { struct ethnl_req_info req_info; long unsigned int ifindex; }; struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; }; struct ethtool_ah_espip6_spec { __be32 ip6src[4]; __be32 ip6dst[4]; __be32 spi; __u8 tclass; }; struct ethtool_c33_pse_ext_state_info { enum ethtool_c33_pse_ext_state c33_pse_ext_state; union { enum ethtool_c33_pse_ext_substate_error_condition error_condition; enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable; enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted; enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim; enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected; enum ethtool_c33_pse_ext_substate_power_not_available power_not_available; enum ethtool_c33_pse_ext_substate_short_detected short_detected; u32 __c33_pse_ext_substate; }; }; struct ethtool_c33_pse_pw_limit_range { u32 min; u32 max; }; struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2]; }; struct ethtool_cmis_cdb { u8 cmis_rev; u8 read_write_len_ext; u16 max_completion_time; }; struct ethtool_cmis_cdb_request { __be16 id; union { struct { __be16 epl_len; u8 lpl_len; u8 chk_code; u8 resv1; u8 resv2; u8 payload[120]; }; struct { __be16 epl_len; u8 lpl_len; u8 chk_code; u8 resv1; u8 resv2; u8 payload[120]; } body; }; }; struct ethtool_cmis_cdb_cmd_args { struct ethtool_cmis_cdb_request req; u16 max_duration; u8 read_write_len_ext; u8 msleep_pre_rpl; u8 rpl_exp_len; u8 flags; char *err_msg; }; struct ethtool_cmis_cdb_rpl_hdr { u8 rpl_len; u8 rpl_chk_code; }; struct ethtool_cmis_cdb_rpl { struct ethtool_cmis_cdb_rpl_hdr hdr; u8 payload[120]; }; struct ethtool_module_fw_flash_params { __be32 password; u8 password_valid: 1; }; struct ethtool_cmis_fw_update_params { struct net_device *dev; struct ethtool_module_fw_flash_params params; struct ethnl_module_fw_flash_ntf_params ntf_params; const struct firmware *fw; }; struct ethtool_flash { __u32 cmd; __u32 region; char data[128]; }; struct ethtool_drvinfo { __u32 cmd; char driver[32]; char version[32]; char fw_version[32]; char bus_info[32]; char erom_version[32]; char reserved2[12]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; }; struct ethtool_devlink_compat { struct devlink *devlink; union { struct ethtool_flash efl; struct ethtool_drvinfo info; }; }; struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0]; }; struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2]; }; struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0]; }; struct ethtool_eth_ctrl_stats { enum ethtool_mac_stats_src src; union { struct { u64 MACControlFramesTransmitted; u64 MACControlFramesReceived; u64 UnsupportedOpcodesReceived; }; struct { u64 MACControlFramesTransmitted; u64 MACControlFramesReceived; u64 UnsupportedOpcodesReceived; } stats; }; }; struct ethtool_eth_mac_stats { enum ethtool_mac_stats_src src; union { struct { u64 FramesTransmittedOK; u64 SingleCollisionFrames; u64 MultipleCollisionFrames; u64 FramesReceivedOK; u64 FrameCheckSequenceErrors; u64 AlignmentErrors; u64 OctetsTransmittedOK; u64 FramesWithDeferredXmissions; u64 LateCollisions; u64 FramesAbortedDueToXSColls; u64 FramesLostDueToIntMACXmitError; u64 CarrierSenseErrors; u64 OctetsReceivedOK; u64 FramesLostDueToIntMACRcvError; u64 MulticastFramesXmittedOK; u64 BroadcastFramesXmittedOK; u64 FramesWithExcessiveDeferral; u64 MulticastFramesReceivedOK; u64 BroadcastFramesReceivedOK; u64 InRangeLengthErrors; u64 OutOfRangeLengthField; u64 FrameTooLongErrors; }; struct { u64 FramesTransmittedOK; u64 SingleCollisionFrames; u64 MultipleCollisionFrames; u64 FramesReceivedOK; u64 FrameCheckSequenceErrors; u64 AlignmentErrors; u64 OctetsTransmittedOK; u64 FramesWithDeferredXmissions; u64 LateCollisions; u64 FramesAbortedDueToXSColls; u64 FramesLostDueToIntMACXmitError; u64 CarrierSenseErrors; u64 OctetsReceivedOK; u64 FramesLostDueToIntMACRcvError; u64 MulticastFramesXmittedOK; u64 BroadcastFramesXmittedOK; u64 FramesWithExcessiveDeferral; u64 MulticastFramesReceivedOK; u64 BroadcastFramesReceivedOK; u64 InRangeLengthErrors; u64 OutOfRangeLengthField; u64 FrameTooLongErrors; } stats; }; }; struct ethtool_eth_phy_stats { enum ethtool_mac_stats_src src; union { struct { u64 SymbolErrorDuringCarrier; }; struct { u64 SymbolErrorDuringCarrier; } stats; }; }; struct ethtool_fec_stat { u64 total; u64 lanes[8]; }; struct ethtool_fec_stats { struct ethtool_fec_stat corrected_blocks; struct ethtool_fec_stat uncorrectable_blocks; struct ethtool_fec_stat corrected_bits; }; struct ethtool_fecparam { __u32 cmd; __u32 active_fec; __u32 fec; __u32 reserved; }; struct ethtool_flow_ext { __u8 padding[2]; unsigned char h_dest[6]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2]; }; struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; }; struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; }; struct ethtool_tcpip6_spec { __be32 ip6src[4]; __be32 ip6dst[4]; __be16 psrc; __be16 pdst; __u8 tclass; }; struct ethtool_usrip6_spec { __be32 ip6src[4]; __be32 ip6dst[4]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52]; }; struct ethtool_forced_speed_map { u32 speed; long unsigned int caps[2]; const u32 *cap_arr; u32 arr_size; }; struct ethtool_get_features_block { __u32 available; __u32 requested; __u32 active; __u32 never_changed; }; struct ethtool_gfeatures { __u32 cmd; __u32 size; struct ethtool_get_features_block features[0]; }; struct ethtool_gstrings { __u32 cmd; __u32 string_set; __u32 len; __u8 data[0]; }; struct ethtool_link_ext_state_info { enum ethtool_link_ext_state link_ext_state; union { enum ethtool_link_ext_substate_autoneg autoneg; enum ethtool_link_ext_substate_link_training link_training; enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; enum ethtool_link_ext_substate_cable_issue cable_issue; enum ethtool_link_ext_substate_module module; u32 __link_ext_substate; }; }; struct ethtool_link_ext_stats { u64 link_down_events; }; struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u8 transceiver; __u8 master_slave_cfg; __u8 master_slave_state; __u8 rate_matching; __u32 reserved[7]; __u32 link_mode_masks[0]; }; struct ethtool_link_ksettings { struct ethtool_link_settings base; struct { long unsigned int supported[2]; long unsigned int advertising[2]; long unsigned int lp_advertising[2]; } link_modes; u32 lanes; }; struct ethtool_link_usettings { struct ethtool_link_settings base; struct { __u32 supported[4]; __u32 advertising[4]; __u32 lp_advertising[4]; } link_modes; }; struct ethtool_mm_cfg { u32 verify_time; bool verify_enabled; bool tx_enabled; bool pmac_enabled; u32 tx_min_frag_size; }; struct ethtool_mm_state { u32 verify_time; u32 max_verify_time; enum ethtool_mm_verify_status verify_status; bool tx_enabled; bool tx_active; bool pmac_enabled; bool verify_enabled; u32 tx_min_frag_size; u32 rx_min_frag_size; }; struct ethtool_mm_stats { u64 MACMergeFrameAssErrorCount; u64 MACMergeFrameSmdErrorCount; u64 MACMergeFrameAssOkCount; u64 MACMergeFragCountRx; u64 MACMergeFragCountTx; u64 MACMergeHoldCount; }; struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8]; }; struct ethtool_module_eeprom { u32 offset; u32 length; u8 page; u8 bank; u8 i2c_address; u8 *data; }; struct ethtool_module_fw_flash { struct list_head list; netdevice_tracker dev_tracker; struct work_struct work; struct ethtool_cmis_fw_update_params fw_update; }; struct ethtool_module_power_mode_params { enum ethtool_module_power_mode_policy policy; enum ethtool_module_power_mode mode; }; struct ethtool_netdev_state { struct xarray rss_ctx; struct mutex rss_lock; unsigned int wol_enabled: 1; unsigned int module_fw_flash_in_progress: 1; }; struct ethtool_regs; struct ethtool_wolinfo; struct ethtool_ringparam; struct kernel_ethtool_ringparam; struct ethtool_pause_stats; struct ethtool_pauseparam; struct ethtool_test; struct ethtool_stats; struct ethtool_rxnfc; struct ethtool_rxfh_param; struct ethtool_rxfh_context; struct kernel_ethtool_ts_info; struct ethtool_ts_stats; struct ethtool_tunable; struct ethtool_rmon_stats; struct ethtool_rmon_hist_range; struct ethtool_ops { u32 cap_link_lanes_supported: 1; u32 cap_rss_ctx_supported: 1; u32 cap_rss_sym_xor_supported: 1; u32 rxfh_per_ctx_key: 1; u32 rxfh_indir_space; u16 rxfh_key_space; u16 rxfh_priv_size; u32 rxfh_max_num_contexts; u32 supported_coalesce_params; u32 supported_ring_params; void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_link_ext_state)(struct net_device *, struct ethtool_link_ext_state_info *); void (*get_link_ext_stats)(struct net_device *, struct ethtool_link_ext_stats *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *); void (*get_pause_stats)(struct net_device *, struct ethtool_pause_stats *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32, u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *); int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *, struct netlink_ext_ack *); int (*create_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); int (*modify_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, const struct ethtool_rxfh_param *, struct netlink_ext_ack *); int (*remove_rxfh_context)(struct net_device *, struct ethtool_rxfh_context *, u32, struct netlink_ext_ack *); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *); void (*get_ts_stats)(struct net_device *, struct ethtool_ts_stats *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_keee *); int (*set_eee)(struct net_device *, struct ethtool_keee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); void (*get_fec_stats)(struct net_device *, struct ethtool_fec_stats *); int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*get_phy_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); int (*set_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); void (*get_eth_phy_stats)(struct net_device *, struct ethtool_eth_phy_stats *); void (*get_eth_mac_stats)(struct net_device *, struct ethtool_eth_mac_stats *); void (*get_eth_ctrl_stats)(struct net_device *, struct ethtool_eth_ctrl_stats *); void (*get_rmon_stats)(struct net_device *, struct ethtool_rmon_stats *, const struct ethtool_rmon_hist_range **); int (*get_module_power_mode)(struct net_device *, struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); int (*set_module_power_mode)(struct net_device *, const struct ethtool_module_power_mode_params *, struct netlink_ext_ack *); int (*get_mm)(struct net_device *, struct ethtool_mm_state *); int (*set_mm)(struct net_device *, struct ethtool_mm_cfg *, struct netlink_ext_ack *); void (*get_mm_stats)(struct net_device *, struct ethtool_mm_stats *); }; struct ethtool_pause_stats { enum ethtool_mac_stats_src src; union { struct { u64 tx_pause_frames; u64 rx_pause_frames; }; struct { u64 tx_pause_frames; u64 rx_pause_frames; } stats; }; }; struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; }; struct ethtool_per_queue_op { __u32 cmd; __u32 sub_command; __u32 queue_mask[128]; char data[0]; }; struct ethtool_perm_addr { __u32 cmd; __u32 size; __u8 data[0]; }; struct phy_device; struct phy_plca_cfg; struct phy_plca_status; struct phy_tdr_config; struct ethtool_phy_ops { int (*get_sset_count)(struct phy_device *); int (*get_strings)(struct phy_device *, u8 *); int (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *, struct netlink_ext_ack *); int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); int (*start_cable_test)(struct phy_device *, struct netlink_ext_ack *); int (*start_cable_test_tdr)(struct phy_device *, struct netlink_ext_ack *, const struct phy_tdr_config *); }; struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0]; }; struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; }; struct ethtool_rmon_hist_range { u16 low; u16 high; }; struct ethtool_rmon_stats { enum ethtool_mac_stats_src src; union { struct { u64 undersize_pkts; u64 oversize_pkts; u64 fragments; u64 jabbers; u64 hist[10]; u64 hist_tx[10]; }; struct { u64 undersize_pkts; u64 oversize_pkts; u64 fragments; u64 jabbers; u64 hist[10]; u64 hist_tx[10]; } stats; }; }; struct ethtool_rx_flow_key { struct flow_dissector_key_basic basic; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_ports tp; struct flow_dissector_key_ip ip; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_eth_addrs eth_addrs; }; struct flow_dissector { long long unsigned int used_keys; short unsigned int offset[33]; }; struct ethtool_rx_flow_match { struct flow_dissector dissector; struct ethtool_rx_flow_key key; struct ethtool_rx_flow_key mask; }; struct flow_rule; struct ethtool_rx_flow_rule { struct flow_rule *rule; long unsigned int priv[0]; }; struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; }; struct ethtool_rx_flow_spec_input { const struct ethtool_rx_flow_spec *fs; u32 rss_ctx; }; struct ethtool_rxfh { __u32 cmd; __u32 rss_context; __u32 indir_size; __u32 key_size; __u8 hfunc; __u8 input_xfrm; __u8 rsvd8[2]; __u32 rsvd32; __u32 rss_config[0]; }; struct ethtool_rxfh_context { u32 indir_size; u32 key_size; u16 priv_size; u8 hfunc; u8 input_xfrm; u8 indir_configured: 1; u8 key_configured: 1; u32 key_off; long: 0; u8 data[0]; }; struct ethtool_rxfh_param { u8 hfunc; u32 indir_size; u32 *indir; u32 key_size; u8 *key; u32 rss_context; u8 rss_delete; u8 input_xfrm; }; struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; union { __u32 rule_cnt; __u32 rss_context; }; __u32 rule_locs[0]; }; struct ethtool_set_features_block { __u32 valid; __u32 requested; }; struct ethtool_sfeatures { __u32 cmd; __u32 size; struct ethtool_set_features_block features[0]; }; struct ethtool_sset_info { __u32 cmd; __u32 reserved; __u64 sset_mask; __u32 data[0]; }; struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0]; }; struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0]; }; struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3]; __u32 rx_filters; __u32 rx_reserved[3]; }; struct ethtool_ts_stats { union { struct { u64 pkts; u64 lost; u64 err; }; struct { u64 pkts; u64 lost; u64 err; } tx_stats; }; }; struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0]; }; struct ethtool_value { __u32 cmd; __u32 data; }; struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6]; }; struct input_handler; struct input_handle { void *private; int open; const char *name; struct input_dev *dev; struct input_handler *handler; struct list_head d_node; struct list_head h_node; }; struct evdev_client; struct evdev { int open; struct input_handle handle; struct evdev_client *grab; struct list_head client_list; spinlock_t client_lock; struct mutex mutex; struct device dev; struct cdev cdev; bool exist; }; struct input_event { __kernel_ulong_t __sec; __kernel_ulong_t __usec; __u16 type; __u16 code; __s32 value; }; struct fasync_struct; struct evdev_client { unsigned int head; unsigned int tail; unsigned int packet_head; spinlock_t buffer_lock; wait_queue_head_t wait; struct fasync_struct *fasync; struct evdev *evdev; struct list_head node; enum input_clock_type clk_type; bool revoked; long unsigned int *evmasks[32]; unsigned int bufsize; struct input_event buffer[0]; }; struct event_trigger_data; struct event_trigger_ops; struct event_command { struct list_head list; char *name; enum event_trigger_type trigger_type; int flags; int (*parse)(struct event_command *, struct trace_event_file *, char *, char *, char *); int (*reg)(char *, struct event_trigger_data *, struct trace_event_file *); void (*unreg)(char *, struct event_trigger_data *, struct trace_event_file *); void (*unreg_all)(struct trace_event_file *); int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); struct event_trigger_ops * (*get_trigger_ops)(char *, char *); }; struct event_counter { u32 count; u32 flags; }; struct event_file_link { struct trace_event_file *file; struct list_head list; }; struct prog_entry; struct event_filter { struct prog_entry *prog; char *filter_string; }; struct perf_cpu_context; struct perf_event_context; typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *); struct event_function_struct { struct perf_event *event; event_f func; void *data; }; struct event_probe_data { struct trace_event_file *file; long unsigned int count; int ref; bool enable; }; struct event_subsystem { struct list_head list; const char *name; struct event_filter *filter; int ref_count; }; struct event_trigger_data { long unsigned int count; int ref; int flags; struct event_trigger_ops *ops; struct event_command *cmd_ops; struct event_filter *filter; char *filter_str; void *private_data; bool paused; bool paused_tmp; struct list_head list; char *name; struct list_head named_list; struct event_trigger_data *named_data; }; struct ring_buffer_event; struct event_trigger_ops { void (*trigger)(struct event_trigger_data *, struct trace_buffer *, void *, struct ring_buffer_event *); int (*init)(struct event_trigger_data *); void (*free)(struct event_trigger_data *); int (*print)(struct seq_file *, struct event_trigger_data *); }; struct eventfd_ctx { struct kref kref; wait_queue_head_t wqh; __u64 count; unsigned int flags; int id; }; struct eventfs_attr { int mode; kuid_t uid; kgid_t gid; }; typedef int (*eventfs_callback)(const char *, umode_t *, void **, const struct file_operations **); typedef void (*eventfs_release)(const char *, void *); struct eventfs_entry { const char *name; eventfs_callback callback; eventfs_release release; }; struct eventfs_inode { union { struct list_head list; struct callback_head rcu; }; struct list_head children; const struct eventfs_entry *entries; const char *name; struct eventfs_attr *entry_attrs; void *data; struct eventfs_attr attr; struct kref kref; unsigned int is_freed: 1; unsigned int is_events: 1; unsigned int nr_entries: 30; unsigned int ino; }; struct eventfs_root_inode { struct eventfs_inode ei; struct dentry *events_dir; }; struct eventpoll { struct mutex mtx; wait_queue_head_t wq; wait_queue_head_t poll_wait; struct list_head rdllist; rwlock_t lock; struct rb_root_cached rbr; struct epitem *ovflist; struct wakeup_source *ws; struct user_struct *user; struct file *file; u64 gen; struct hlist_head refs; refcount_t refcount; unsigned int napi_id; u32 busy_poll_usecs; u16 busy_poll_budget; bool prefer_busy_poll; u8 nests; }; struct evm_ima_xattr_data_hdr { u8 type; }; struct evm_ima_xattr_data { union { struct { u8 type; }; struct evm_ima_xattr_data_hdr hdr; }; u8 data[0]; }; struct exar8250_board; struct exar8250 { unsigned int nr; unsigned int osc_freq; struct exar8250_board *board; void *virt; int line[0]; }; struct exar8250_board { unsigned int num_ports; unsigned int reg_shift; int (*setup)(struct exar8250 *, struct pci_dev *, struct uart_8250_port *, int); void (*exit)(struct pci_dev *); }; struct exar8250_platform { int (*rs485_config)(struct uart_port *, struct ktermios *, struct serial_rs485 *); const struct serial_rs485 *rs485_supported; int (*register_gpio)(struct pci_dev *, struct uart_8250_port *); void (*unregister_gpio)(struct uart_8250_port *); }; struct exception_stacks { char DF_stack_guard[0]; char DF_stack[8192]; char NMI_stack_guard[0]; char NMI_stack[8192]; char DB_stack_guard[0]; char DB_stack[8192]; char MCE_stack_guard[0]; char MCE_stack[8192]; char VC_stack_guard[0]; char VC_stack[0]; char VC2_stack_guard[0]; char VC2_stack[0]; char IST_top_guard[0]; }; struct exception_table_entry { int insn; int fixup; int data; }; struct execmem_range { long unsigned int start; long unsigned int end; long unsigned int fallback_start; long unsigned int fallback_end; pgprot_t pgprot; unsigned int alignment; enum execmem_range_flags flags; }; struct execmem_info { struct execmem_range ranges[5]; }; struct execute_work { struct work_struct work; }; struct fid; struct iomap; struct iattr; struct export_operations { int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); int (*get_name)(struct dentry *, char *, struct dentry *); struct dentry * (*get_parent)(struct dentry *); int (*commit_metadata)(struct inode *); int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); long unsigned int flags; }; struct ext4_free_extent { ext4_lblk_t fe_logical; ext4_grpblk_t fe_start; ext4_group_t fe_group; ext4_grpblk_t fe_len; }; struct ext4_prealloc_space; struct ext4_locality_group; struct ext4_allocation_context { struct inode *ac_inode; struct super_block *ac_sb; struct ext4_free_extent ac_o_ex; struct ext4_free_extent ac_g_ex; struct ext4_free_extent ac_b_ex; struct ext4_free_extent ac_f_ex; ext4_grpblk_t ac_orig_goal_len; __u32 ac_flags; __u32 ac_groups_linear_remaining; __u16 ac_groups_scanned; __u16 ac_found; __u16 ac_cX_found[5]; __u16 ac_tail; __u16 ac_buddy; __u8 ac_status; __u8 ac_criteria; __u8 ac_2order; __u8 ac_op; struct folio *ac_bitmap_folio; struct folio *ac_buddy_folio; struct ext4_prealloc_space *ac_pa; struct ext4_locality_group *ac_lg; }; struct ext4_allocation_request { struct inode *inode; unsigned int len; ext4_lblk_t logical; ext4_lblk_t lleft; ext4_lblk_t lright; ext4_fsblk_t goal; ext4_fsblk_t pleft; ext4_fsblk_t pright; unsigned int flags; }; struct ext4_attr { struct attribute attr; short int attr_id; short int attr_ptr; short unsigned int attr_size; union { int offset; void *explicit_ptr; } u; }; struct ext4_group_info; struct ext4_buddy { struct folio *bd_buddy_folio; void *bd_buddy; struct folio *bd_bitmap_folio; void *bd_bitmap; struct ext4_group_info *bd_info; struct super_block *bd_sb; __u16 bd_blkbits; ext4_group_t bd_group; }; struct ext4_dir_entry { __le32 inode; __le16 rec_len; __le16 name_len; char name[255]; }; struct ext4_dir_entry_2 { __le32 inode; __le16 rec_len; __u8 name_len; __u8 file_type; char name[255]; }; struct ext4_dir_entry_hash { __le32 hash; __le32 minor_hash; }; struct ext4_dir_entry_tail { __le32 det_reserved_zero1; __le16 det_rec_len; __u8 det_reserved_zero2; __u8 det_reserved_ft; __le32 det_checksum; }; struct ext4_err_translation { int code; int errno; }; struct ext4_es_stats { long unsigned int es_stats_shrunk; struct percpu_counter es_stats_cache_hits; struct percpu_counter es_stats_cache_misses; u64 es_stats_scan_time; u64 es_stats_max_scan_time; struct percpu_counter es_stats_all_cnt; struct percpu_counter es_stats_shk_cnt; }; struct extent_status; struct ext4_es_tree { struct rb_root root; struct extent_status *cache_es; }; struct ext4_extent; struct ext4_extent_idx; struct ext4_extent_header; struct ext4_ext_path { ext4_fsblk_t p_block; __u16 p_depth; __u16 p_maxdepth; struct ext4_extent *p_ext; struct ext4_extent_idx *p_idx; struct ext4_extent_header *p_hdr; struct buffer_head *p_bh; }; struct ext4_extent { __le32 ee_block; __le16 ee_len; __le16 ee_start_hi; __le32 ee_start_lo; }; struct ext4_extent_header { __le16 eh_magic; __le16 eh_entries; __le16 eh_max; __le16 eh_depth; __le32 eh_generation; }; struct ext4_extent_idx { __le32 ei_block; __le32 ei_leaf_lo; __le16 ei_leaf_hi; __u16 ei_unused; }; struct ext4_extent_tail { __le32 et_checksum; }; struct ext4_fc_add_range { __le32 fc_ino; __u8 fc_ex[12]; }; struct ext4_fc_alloc_region { ext4_lblk_t lblk; ext4_fsblk_t pblk; int ino; int len; }; struct ext4_fc_del_range { __le32 fc_ino; __le32 fc_lblk; __le32 fc_len; }; struct ext4_fc_dentry_info { __le32 fc_parent_ino; __le32 fc_ino; __u8 fc_dname[0]; }; struct ext4_fc_dentry_update { int fcd_op; int fcd_parent; int fcd_ino; struct qstr fcd_name; unsigned char fcd_iname[40]; struct list_head fcd_list; struct list_head fcd_dilist; }; struct ext4_fc_head { __le32 fc_features; __le32 fc_tid; }; struct ext4_fc_inode { __le32 fc_ino; __u8 fc_raw_inode[0]; }; struct ext4_fc_replay_state { int fc_replay_num_tags; int fc_replay_expected_off; int fc_current_pass; int fc_cur_tag; int fc_crc; struct ext4_fc_alloc_region *fc_regions; int fc_regions_size; int fc_regions_used; int fc_regions_valid; int *fc_modified_inodes; int fc_modified_inodes_used; int fc_modified_inodes_size; }; struct ext4_fc_stats { unsigned int fc_ineligible_reason_count[10]; long unsigned int fc_num_commits; long unsigned int fc_ineligible_commits; long unsigned int fc_failed_commits; long unsigned int fc_skipped_commits; long unsigned int fc_numblks; u64 s_fc_avg_commit_time; }; struct ext4_fc_tail { __le32 fc_tid; __le32 fc_crc; }; struct ext4_fc_tl { __le16 fc_tag; __le16 fc_len; }; struct ext4_fc_tl_mem { u16 fc_tag; u16 fc_len; }; struct fscrypt_str { unsigned char *name; u32 len; }; struct ext4_filename { const struct qstr *usr_fname; struct fscrypt_str disk_name; struct dx_hash_info hinfo; }; struct ext4_free_data { struct list_head efd_list; struct rb_node efd_node; ext4_group_t efd_group; ext4_grpblk_t efd_start_cluster; ext4_grpblk_t efd_count; tid_t efd_tid; }; struct fscrypt_dummy_policy {}; struct ext4_fs_context { char *s_qf_names[3]; struct fscrypt_dummy_policy dummy_enc_policy; int s_jquota_fmt; short unsigned int qname_spec; long unsigned int vals_s_flags; long unsigned int mask_s_flags; long unsigned int journal_devnum; long unsigned int s_commit_interval; long unsigned int s_stripe; unsigned int s_inode_readahead_blks; unsigned int s_want_extra_isize; unsigned int s_li_wait_mult; unsigned int s_max_dir_size_kb; unsigned int journal_ioprio; unsigned int vals_s_mount_opt; unsigned int mask_s_mount_opt; unsigned int vals_s_mount_opt2; unsigned int mask_s_mount_opt2; unsigned int opt_flags; unsigned int spec; u32 s_max_batch_time; u32 s_min_batch_time; kuid_t s_resuid; kgid_t s_resgid; ext4_fsblk_t s_sb_block; }; struct ext4_fsmap { struct list_head fmr_list; dev_t fmr_device; uint32_t fmr_flags; uint64_t fmr_physical; uint64_t fmr_owner; uint64_t fmr_length; }; struct ext4_fsmap_head { uint32_t fmh_iflags; uint32_t fmh_oflags; unsigned int fmh_count; unsigned int fmh_entries; struct ext4_fsmap fmh_keys[2]; }; struct ext4_getfsmap_info; struct ext4_getfsmap_dev { int (*gfd_fn)(struct super_block *, struct ext4_fsmap *, struct ext4_getfsmap_info *); u32 gfd_dev; }; typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *); struct ext4_getfsmap_info { struct ext4_fsmap_head *gfi_head; ext4_fsmap_format_t gfi_formatter; void *gfi_format_arg; ext4_fsblk_t gfi_next_fsblk; u32 gfi_dev; ext4_group_t gfi_agno; struct ext4_fsmap gfi_low; struct ext4_fsmap gfi_high; struct ext4_fsmap gfi_lastfree; struct list_head gfi_meta_list; bool gfi_last; }; struct ext4_group_desc { __le32 bg_block_bitmap_lo; __le32 bg_inode_bitmap_lo; __le32 bg_inode_table_lo; __le16 bg_free_blocks_count_lo; __le16 bg_free_inodes_count_lo; __le16 bg_used_dirs_count_lo; __le16 bg_flags; __le32 bg_exclude_bitmap_lo; __le16 bg_block_bitmap_csum_lo; __le16 bg_inode_bitmap_csum_lo; __le16 bg_itable_unused_lo; __le16 bg_checksum; __le32 bg_block_bitmap_hi; __le32 bg_inode_bitmap_hi; __le32 bg_inode_table_hi; __le16 bg_free_blocks_count_hi; __le16 bg_free_inodes_count_hi; __le16 bg_used_dirs_count_hi; __le16 bg_itable_unused_hi; __le32 bg_exclude_bitmap_hi; __le16 bg_block_bitmap_csum_hi; __le16 bg_inode_bitmap_csum_hi; __u32 bg_reserved; }; struct ext4_group_info { long unsigned int bb_state; struct rb_root bb_free_root; ext4_grpblk_t bb_first_free; ext4_grpblk_t bb_free; ext4_grpblk_t bb_fragments; int bb_avg_fragment_size_order; ext4_grpblk_t bb_largest_free_order; ext4_group_t bb_group; struct list_head bb_prealloc_list; struct rw_semaphore alloc_sem; struct list_head bb_avg_fragment_size_node; struct list_head bb_largest_free_order_node; ext4_grpblk_t bb_counters[0]; }; struct ext4_iloc { struct buffer_head *bh; long unsigned int offset; ext4_group_t block_group; }; struct ext4_inode { __le16 i_mode; __le16 i_uid; __le32 i_size_lo; __le32 i_atime; __le32 i_ctime; __le32 i_mtime; __le32 i_dtime; __le16 i_gid; __le16 i_links_count; __le32 i_blocks_lo; __le32 i_flags; union { struct { __le32 l_i_version; } linux1; struct { __u32 h_i_translator; } hurd1; struct { __u32 m_i_reserved1; } masix1; } osd1; __le32 i_block[15]; __le32 i_generation; __le32 i_file_acl_lo; __le32 i_size_high; __le32 i_obso_faddr; union { struct { __le16 l_i_blocks_high; __le16 l_i_file_acl_high; __le16 l_i_uid_high; __le16 l_i_gid_high; __le16 l_i_checksum_lo; __le16 l_i_reserved; } linux2; struct { __le16 h_i_reserved1; __u16 h_i_mode_high; __u16 h_i_uid_high; __u16 h_i_gid_high; __u32 h_i_author; } hurd2; struct { __le16 h_i_reserved1; __le16 m_i_file_acl_high; __u32 m_i_reserved2[2]; } masix2; } osd2; __le16 i_extra_isize; __le16 i_checksum_hi; __le32 i_ctime_extra; __le32 i_mtime_extra; __le32 i_atime_extra; __le32 i_crtime; __le32 i_crtime_extra; __le32 i_version_hi; __le32 i_projid; }; struct ext4_pending_tree { struct rb_root root; }; struct jbd2_inode; struct ext4_inode_info { __le32 i_data[15]; __u32 i_dtime; ext4_fsblk_t i_file_acl; ext4_group_t i_block_group; ext4_lblk_t i_dir_start_lookup; long unsigned int i_flags; struct rw_semaphore xattr_sem; union { struct list_head i_orphan; unsigned int i_orphan_idx; }; struct list_head i_fc_dilist; struct list_head i_fc_list; ext4_lblk_t i_fc_lblk_start; ext4_lblk_t i_fc_lblk_len; atomic_t i_fc_updates; atomic_t i_unwritten; wait_queue_head_t i_fc_wait; struct mutex i_fc_lock; loff_t i_disksize; struct rw_semaphore i_data_sem; struct inode vfs_inode; struct jbd2_inode *jinode; spinlock_t i_raw_lock; struct timespec64 i_crtime; atomic_t i_prealloc_active; unsigned int i_reserved_data_blocks; struct rb_root i_prealloc_node; rwlock_t i_prealloc_lock; struct ext4_es_tree i_es_tree; rwlock_t i_es_lock; struct list_head i_es_list; unsigned int i_es_all_nr; unsigned int i_es_shk_nr; ext4_lblk_t i_es_shrink_lblk; ext4_group_t i_last_alloc_group; struct ext4_pending_tree i_pending_tree; __u16 i_extra_isize; u16 i_inline_off; u16 i_inline_size; spinlock_t i_completed_io_lock; struct list_head i_rsv_conversion_list; struct work_struct i_rsv_conversion_work; spinlock_t i_block_reservation_lock; tid_t i_sync_tid; tid_t i_datasync_tid; __u32 i_csum_seed; kprojid_t i_projid; }; struct jbd2_journal_handle; typedef struct jbd2_journal_handle handle_t; struct ext4_io_end { struct list_head list; handle_t *handle; struct inode *inode; struct bio *bio; unsigned int flag; refcount_t count; struct list_head list_vec; }; typedef struct ext4_io_end ext4_io_end_t; struct ext4_io_end_vec { struct list_head list; loff_t offset; ssize_t size; }; struct ext4_io_submit { struct writeback_control *io_wbc; struct bio *io_bio; ext4_io_end_t *io_end; sector_t io_next_block; }; struct ext4_journal_cb_entry { struct list_head jce_list; void (*jce_func)(struct super_block *, struct ext4_journal_cb_entry *, int); }; struct jbd2_buffer_trigger_type { void (*t_frozen)(struct jbd2_buffer_trigger_type *, struct buffer_head *, void *, size_t); void (*t_abort)(struct jbd2_buffer_trigger_type *, struct buffer_head *); }; struct ext4_journal_trigger { struct jbd2_buffer_trigger_type tr_triggers; struct super_block *sb; }; struct ext4_lazy_init { long unsigned int li_state; struct list_head li_request_list; struct mutex li_list_mtx; }; struct ext4_li_request { struct super_block *lr_super; enum ext4_li_mode lr_mode; ext4_group_t lr_first_not_zeroed; ext4_group_t lr_next_group; struct list_head lr_request; long unsigned int lr_next_sched; long unsigned int lr_timeout; }; struct ext4_locality_group { struct mutex lg_mutex; struct list_head lg_prealloc_list[10]; spinlock_t lg_prealloc_lock; }; struct ext4_map_blocks { ext4_fsblk_t m_pblk; ext4_lblk_t m_lblk; unsigned int m_len; unsigned int m_flags; }; struct ext4_mount_options { long unsigned int s_mount_opt; long unsigned int s_mount_opt2; kuid_t s_resuid; kgid_t s_resgid; long unsigned int s_commit_interval; u32 s_min_batch_time; u32 s_max_batch_time; }; struct ext4_new_group_data; struct ext4_new_flex_group_data { struct ext4_new_group_data *groups; __u16 *bg_flags; ext4_group_t resize_bg; ext4_group_t count; }; struct ext4_new_group_data { __u32 group; __u64 block_bitmap; __u64 inode_bitmap; __u64 inode_table; __u32 blocks_count; __u16 reserved_blocks; __u16 mdata_blocks; __u32 free_clusters_count; }; struct ext4_new_group_input { __u32 group; __u64 block_bitmap; __u64 inode_bitmap; __u64 inode_table; __u32 blocks_count; __u16 reserved_blocks; __u16 unused; }; struct ext4_orphan_block { atomic_t ob_free_entries; struct buffer_head *ob_bh; }; struct ext4_orphan_block_tail { __le32 ob_magic; __le32 ob_checksum; }; struct ext4_orphan_info { int of_blocks; __u32 of_csum_seed; struct ext4_orphan_block *of_binfo; }; struct ext4_prealloc_space { union { struct rb_node inode_node; struct list_head lg_list; } pa_node; struct list_head pa_group_list; union { struct list_head pa_tmp_list; struct callback_head pa_rcu; } u; spinlock_t pa_lock; atomic_t pa_count; unsigned int pa_deleted; ext4_fsblk_t pa_pstart; ext4_lblk_t pa_lstart; ext4_grpblk_t pa_len; ext4_grpblk_t pa_free; short unsigned int pa_type; union { rwlock_t *inode_lock; spinlock_t *lg_lock; } pa_node_lock; struct inode *pa_inode; }; struct ext4_rcu_ptr { struct callback_head rcu; void *ptr; }; struct ext4_renament { struct inode *dir; struct dentry *dentry; struct inode *inode; bool is_dir; int dir_nlink_delta; struct buffer_head *bh; struct ext4_dir_entry_2 *de; int inlined; struct buffer_head *dir_bh; struct ext4_dir_entry_2 *parent_de; int dir_inlined; }; struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; struct callback_head cb_head; }; struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rcuwait writer; wait_queue_head_t waiters; atomic_t block; struct lockdep_map dep_map; }; struct ext4_super_block; struct journal_s; struct ext4_system_blocks; struct flex_groups; struct shrinker; struct mb_cache; struct ext4_sb_info { long unsigned int s_desc_size; long unsigned int s_inodes_per_block; long unsigned int s_blocks_per_group; long unsigned int s_clusters_per_group; long unsigned int s_inodes_per_group; long unsigned int s_itb_per_group; long unsigned int s_gdb_count; long unsigned int s_desc_per_block; ext4_group_t s_groups_count; ext4_group_t s_blockfile_groups; long unsigned int s_overhead; unsigned int s_cluster_ratio; unsigned int s_cluster_bits; loff_t s_bitmap_maxbytes; struct buffer_head *s_sbh; struct ext4_super_block *s_es; struct buffer_head **s_group_desc; unsigned int s_mount_opt; unsigned int s_mount_opt2; long unsigned int s_mount_flags; unsigned int s_def_mount_opt; unsigned int s_def_mount_opt2; ext4_fsblk_t s_sb_block; atomic64_t s_resv_clusters; kuid_t s_resuid; kgid_t s_resgid; short unsigned int s_mount_state; short unsigned int s_pad; int s_addr_per_block_bits; int s_desc_per_block_bits; int s_inode_size; int s_first_ino; unsigned int s_inode_readahead_blks; unsigned int s_inode_goal; u32 s_hash_seed[4]; int s_def_hash_version; int s_hash_unsigned; struct percpu_counter s_freeclusters_counter; struct percpu_counter s_freeinodes_counter; struct percpu_counter s_dirs_counter; struct percpu_counter s_dirtyclusters_counter; struct percpu_counter s_sra_exceeded_retry_limit; struct blockgroup_lock *s_blockgroup_lock; struct proc_dir_entry *s_proc; struct kobject s_kobj; struct completion s_kobj_unregister; struct super_block *s_sb; struct buffer_head *s_mmp_bh; struct journal_s *s_journal; long unsigned int s_ext4_flags; struct mutex s_orphan_lock; struct list_head s_orphan; struct ext4_orphan_info s_orphan_info; long unsigned int s_commit_interval; u32 s_max_batch_time; u32 s_min_batch_time; struct file *s_journal_bdev_file; unsigned int s_want_extra_isize; struct ext4_system_blocks *s_system_blks; struct ext4_group_info ***s_group_info; struct inode *s_buddy_cache; spinlock_t s_md_lock; short unsigned int *s_mb_offsets; unsigned int *s_mb_maxs; unsigned int s_group_info_size; unsigned int s_mb_free_pending; struct list_head s_freed_data_list[2]; struct list_head s_discard_list; struct work_struct s_discard_work; atomic_t s_retry_alloc_pending; struct list_head *s_mb_avg_fragment_size; rwlock_t *s_mb_avg_fragment_size_locks; struct list_head *s_mb_largest_free_orders; rwlock_t *s_mb_largest_free_orders_locks; long unsigned int s_stripe; unsigned int s_mb_max_linear_groups; unsigned int s_mb_stream_request; unsigned int s_mb_max_to_scan; unsigned int s_mb_min_to_scan; unsigned int s_mb_stats; unsigned int s_mb_order2_reqs; unsigned int s_mb_group_prealloc; unsigned int s_max_dir_size_kb; long unsigned int s_mb_last_group; long unsigned int s_mb_last_start; unsigned int s_mb_prefetch; unsigned int s_mb_prefetch_limit; unsigned int s_mb_best_avail_max_trim_order; atomic_t s_bal_reqs; atomic_t s_bal_success; atomic_t s_bal_allocated; atomic_t s_bal_ex_scanned; atomic_t s_bal_cX_ex_scanned[5]; atomic_t s_bal_groups_scanned; atomic_t s_bal_goals; atomic_t s_bal_len_goals; atomic_t s_bal_breaks; atomic_t s_bal_2orders; atomic_t s_bal_p2_aligned_bad_suggestions; atomic_t s_bal_goal_fast_bad_suggestions; atomic_t s_bal_best_avail_bad_suggestions; atomic64_t s_bal_cX_groups_considered[5]; atomic64_t s_bal_cX_hits[5]; atomic64_t s_bal_cX_failed[5]; atomic_t s_mb_buddies_generated; atomic64_t s_mb_generation_time; atomic_t s_mb_lost_chunks; atomic_t s_mb_preallocated; atomic_t s_mb_discarded; atomic_t s_lock_busy; struct ext4_locality_group *s_locality_groups; long unsigned int s_sectors_written_start; u64 s_kbytes_written; unsigned int s_extent_max_zeroout_kb; unsigned int s_log_groups_per_flex; struct flex_groups **s_flex_groups; ext4_group_t s_flex_groups_allocated; struct workqueue_struct *rsv_conversion_wq; struct timer_list s_err_report; struct ext4_li_request *s_li_request; unsigned int s_li_wait_mult; struct task_struct *s_mmp_tsk; long unsigned int s_last_trim_minblks; struct crypto_shash *s_chksum_driver; __u32 s_csum_seed; struct shrinker *s_es_shrinker; struct list_head s_es_list; long int s_es_nr_inode; struct ext4_es_stats s_es_stats; struct mb_cache *s_ea_block_cache; struct mb_cache *s_ea_inode_cache; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t s_es_lock; struct ext4_journal_trigger s_journal_triggers[1]; struct ratelimit_state s_err_ratelimit_state; struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state; atomic_t s_warning_count; atomic_t s_msg_count; struct fscrypt_dummy_policy s_dummy_enc_policy; struct percpu_rw_semaphore s_writepages_rwsem; struct dax_device *s_daxdev; u64 s_dax_part_off; errseq_t s_bdev_wb_err; spinlock_t s_bdev_wb_lock; spinlock_t s_error_lock; int s_add_error_count; int s_first_error_code; __u32 s_first_error_line; __u32 s_first_error_ino; __u64 s_first_error_block; const char *s_first_error_func; time64_t s_first_error_time; int s_last_error_code; __u32 s_last_error_line; __u32 s_last_error_ino; __u64 s_last_error_block; const char *s_last_error_func; time64_t s_last_error_time; struct work_struct s_sb_upd_work; atomic_t s_fc_subtid; struct list_head s_fc_q[2]; struct list_head s_fc_dentry_q[2]; unsigned int s_fc_bytes; spinlock_t s_fc_lock; struct buffer_head *s_fc_bh; struct ext4_fc_stats s_fc_stats; tid_t s_fc_ineligible_tid; struct ext4_fc_replay_state s_fc_replay_state; long: 64; long: 64; long: 64; long: 64; }; struct ext4_super_block { __le32 s_inodes_count; __le32 s_blocks_count_lo; __le32 s_r_blocks_count_lo; __le32 s_free_blocks_count_lo; __le32 s_free_inodes_count; __le32 s_first_data_block; __le32 s_log_block_size; __le32 s_log_cluster_size; __le32 s_blocks_per_group; __le32 s_clusters_per_group; __le32 s_inodes_per_group; __le32 s_mtime; __le32 s_wtime; __le16 s_mnt_count; __le16 s_max_mnt_count; __le16 s_magic; __le16 s_state; __le16 s_errors; __le16 s_minor_rev_level; __le32 s_lastcheck; __le32 s_checkinterval; __le32 s_creator_os; __le32 s_rev_level; __le16 s_def_resuid; __le16 s_def_resgid; __le32 s_first_ino; __le16 s_inode_size; __le16 s_block_group_nr; __le32 s_feature_compat; __le32 s_feature_incompat; __le32 s_feature_ro_compat; __u8 s_uuid[16]; char s_volume_name[16]; char s_last_mounted[64]; __le32 s_algorithm_usage_bitmap; __u8 s_prealloc_blocks; __u8 s_prealloc_dir_blocks; __le16 s_reserved_gdt_blocks; __u8 s_journal_uuid[16]; __le32 s_journal_inum; __le32 s_journal_dev; __le32 s_last_orphan; __le32 s_hash_seed[4]; __u8 s_def_hash_version; __u8 s_jnl_backup_type; __le16 s_desc_size; __le32 s_default_mount_opts; __le32 s_first_meta_bg; __le32 s_mkfs_time; __le32 s_jnl_blocks[17]; __le32 s_blocks_count_hi; __le32 s_r_blocks_count_hi; __le32 s_free_blocks_count_hi; __le16 s_min_extra_isize; __le16 s_want_extra_isize; __le32 s_flags; __le16 s_raid_stride; __le16 s_mmp_update_interval; __le64 s_mmp_block; __le32 s_raid_stripe_width; __u8 s_log_groups_per_flex; __u8 s_checksum_type; __u8 s_encryption_level; __u8 s_reserved_pad; __le64 s_kbytes_written; __le32 s_snapshot_inum; __le32 s_snapshot_id; __le64 s_snapshot_r_blocks_count; __le32 s_snapshot_list; __le32 s_error_count; __le32 s_first_error_time; __le32 s_first_error_ino; __le64 s_first_error_block; __u8 s_first_error_func[32]; __le32 s_first_error_line; __le32 s_last_error_time; __le32 s_last_error_ino; __le32 s_last_error_line; __le64 s_last_error_block; __u8 s_last_error_func[32]; __u8 s_mount_opts[64]; __le32 s_usr_quota_inum; __le32 s_grp_quota_inum; __le32 s_overhead_clusters; __le32 s_backup_bgs[2]; __u8 s_encrypt_algos[4]; __u8 s_encrypt_pw_salt[16]; __le32 s_lpf_ino; __le32 s_prj_quota_inum; __le32 s_checksum_seed; __u8 s_wtime_hi; __u8 s_mtime_hi; __u8 s_mkfs_time_hi; __u8 s_lastcheck_hi; __u8 s_first_error_time_hi; __u8 s_last_error_time_hi; __u8 s_first_error_errcode; __u8 s_last_error_errcode; __le16 s_encoding; __le16 s_encoding_flags; __le32 s_orphan_file_inum; __le32 s_reserved[94]; __le32 s_checksum; }; struct ext4_system_blocks { struct rb_root root; struct callback_head rcu; }; struct ext4_system_zone { struct rb_node node; ext4_fsblk_t start_blk; unsigned int count; u32 ino; }; struct ext4_xattr_entry; struct ext4_xattr_search { struct ext4_xattr_entry *first; void *base; void *end; struct ext4_xattr_entry *here; int not_found; }; struct ext4_xattr_block_find { struct ext4_xattr_search s; struct buffer_head *bh; }; struct ext4_xattr_entry { __u8 e_name_len; __u8 e_name_index; __le16 e_value_offs; __le32 e_value_inum; __le32 e_value_size; __le32 e_hash; char e_name[0]; }; struct ext4_xattr_header { __le32 h_magic; __le32 h_refcount; __le32 h_blocks; __le32 h_hash; __le32 h_checksum; __u32 h_reserved[3]; }; struct ext4_xattr_ibody_find { struct ext4_xattr_search s; struct ext4_iloc iloc; }; struct ext4_xattr_ibody_header { __le32 h_magic; }; struct ext4_xattr_info { const char *name; const void *value; size_t value_len; int name_index; int in_inode; }; struct ext4_xattr_inode_array { unsigned int count; struct inode *inodes[0]; }; struct ext_arg { size_t argsz; struct __kernel_timespec *ts; const sigset_t *sig; ktime_t min_time; }; struct msg_msg; struct ext_wait_queue { struct task_struct *task; struct list_head list; struct msg_msg *msg; int state; }; struct extended_signature { unsigned int sig; unsigned int pf; unsigned int cksum; }; struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[0]; }; struct extent_status { struct rb_node rb_node; ext4_lblk_t es_lblk; ext4_lblk_t es_len; ext4_fsblk_t es_pblk; }; struct external_name { union { atomic_t count; struct callback_head head; } u; unsigned char name[0]; }; struct extra_reg { unsigned int event; unsigned int msr; u64 config_mask; u64 valid_mask; int idx; bool extra_msr_access; }; struct f815xxa_data { spinlock_t lock; int idx; }; struct f_owner_ex { int type; __kernel_pid_t pid; }; struct fanotify_response_info_header { __u8 type; __u8 pad; __u16 len; }; struct fanotify_response_info_audit_rule { struct fanotify_response_info_header hdr; __u32 rule_number; __u32 subj_trust; __u32 obj_trust; }; struct fanout_args { __u16 id; __u16 type_flags; __u32 max_num_members; }; struct fast_pool { long unsigned int pool[4]; long unsigned int last; unsigned int count; struct timer_list mix; }; struct request_sock; struct tcp_fastopen_context; struct fastopen_queue { struct request_sock *rskq_rst_head; struct request_sock *rskq_rst_tail; spinlock_t lock; int qlen; int max_qlen; struct tcp_fastopen_context *ctx; }; struct fasync_struct { rwlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; }; struct fat_boot_sector { __u8 ignored[3]; __u8 system_id[8]; __u8 sector_size[2]; __u8 sec_per_clus; __le16 reserved; __u8 fats; __u8 dir_entries[2]; __u8 sectors[2]; __u8 media; __le16 fat_length; __le16 secs_track; __le16 heads; __le32 hidden; __le32 total_sect; union { struct { __u8 drive_number; __u8 state; __u8 signature; __u8 vol_id[4]; __u8 vol_label[11]; __u8 fs_type[8]; } fat16; struct { __le32 length; __le16 flags; __u8 version[2]; __le32 root_cluster; __le16 info_sector; __le16 backup_boot; __le16 reserved2[6]; __u8 drive_number; __u8 state; __u8 signature; __u8 vol_id[4]; __u8 vol_label[11]; __u8 fs_type[8]; } fat32; }; }; struct fault_attr { long unsigned int probability; long unsigned int interval; atomic_t times; atomic_t space; long unsigned int verbose; bool task_filter; long unsigned int stacktrace_depth; long unsigned int require_start; long unsigned int require_end; long unsigned int reject_start; long unsigned int reject_end; long unsigned int count; struct ratelimit_state ratelimit_state; struct dentry *dname; }; struct fb_bitfield { __u32 offset; __u32 length; __u32 msb_right; }; struct fb_blit_caps { long unsigned int x[1]; long unsigned int y[2]; u32 len; u32 flags; }; struct fb_chroma { __u32 redx; __u32 greenx; __u32 bluex; __u32 whitex; __u32 redy; __u32 greeny; __u32 bluey; __u32 whitey; }; struct fb_cmap { __u32 start; __u32 len; __u16 *red; __u16 *green; __u16 *blue; __u16 *transp; }; struct fb_cmap_user { __u32 start; __u32 len; __u16 *red; __u16 *green; __u16 *blue; __u16 *transp; }; struct fb_con2fbmap { __u32 console; __u32 framebuffer; }; struct fb_copyarea { __u32 dx; __u32 dy; __u32 width; __u32 height; __u32 sx; __u32 sy; }; struct fbcurpos { __u16 x; __u16 y; }; struct fb_image { __u32 dx; __u32 dy; __u32 width; __u32 height; __u32 fg_color; __u32 bg_color; __u8 depth; const char *data; struct fb_cmap cmap; }; struct fb_cursor { __u16 set; __u16 enable; __u16 rop; const char *mask; struct fbcurpos hot; struct fb_image image; }; struct fb_cvt_data { u32 xres; u32 yres; u32 refresh; u32 f_refresh; u32 pixclock; u32 hperiod; u32 hblank; u32 hfreq; u32 htotal; u32 vtotal; u32 vsync; u32 hsync; u32 h_front_porch; u32 h_back_porch; u32 v_front_porch; u32 v_back_porch; u32 h_margin; u32 v_margin; u32 interlace; u32 aspect_ratio; u32 active_pixels; u32 flags; u32 status; }; struct fb_info; struct fb_event { struct fb_info *info; void *data; }; struct fb_fillrect { __u32 dx; __u32 dy; __u32 width; __u32 height; __u32 color; __u32 rop; }; struct fb_fix_screeninfo { char id[16]; long unsigned int smem_start; __u32 smem_len; __u32 type; __u32 type_aux; __u32 visual; __u16 xpanstep; __u16 ypanstep; __u16 ywrapstep; __u32 line_length; long unsigned int mmio_start; __u32 mmio_len; __u32 accel; __u16 capabilities; __u16 reserved[2]; }; struct fb_var_screeninfo { __u32 xres; __u32 yres; __u32 xres_virtual; __u32 yres_virtual; __u32 xoffset; __u32 yoffset; __u32 bits_per_pixel; __u32 grayscale; struct fb_bitfield red; struct fb_bitfield green; struct fb_bitfield blue; struct fb_bitfield transp; __u32 nonstd; __u32 activate; __u32 height; __u32 width; __u32 accel_flags; __u32 pixclock; __u32 left_margin; __u32 right_margin; __u32 upper_margin; __u32 lower_margin; __u32 hsync_len; __u32 vsync_len; __u32 sync; __u32 vmode; __u32 rotate; __u32 colorspace; __u32 reserved[4]; }; struct fb_monspecs { struct fb_chroma chroma; struct fb_videomode *modedb; __u8 manufacturer[4]; __u8 monitor[14]; __u8 serial_no[14]; __u8 ascii[14]; __u32 modedb_len; __u32 model; __u32 serial; __u32 year; __u32 week; __u32 hfmin; __u32 hfmax; __u32 dclkmin; __u32 dclkmax; __u16 input; __u16 dpms; __u16 signal; __u16 vfmin; __u16 vfmax; __u16 gamma; __u16 gtf: 1; __u16 misc; __u8 version; __u8 revision; __u8 max_x; __u8 max_y; }; struct fb_pixmap { u8 *addr; u32 size; u32 offset; u32 buf_align; u32 scan_align; u32 access_align; u32 flags; long unsigned int blit_x[1]; long unsigned int blit_y[2]; void (*writeio)(struct fb_info *, void *, void *, unsigned int); void (*readio)(struct fb_info *, void *, void *, unsigned int); }; struct fb_ops; struct fb_tile_ops; struct fb_info { refcount_t count; int node; int flags; int fbcon_rotate_hint; struct mutex lock; struct mutex mm_lock; struct fb_var_screeninfo var; struct fb_fix_screeninfo fix; struct fb_monspecs monspecs; struct fb_pixmap pixmap; struct fb_pixmap sprite; struct fb_cmap cmap; struct list_head modelist; struct fb_videomode *mode; const struct fb_ops *fbops; struct device *device; struct device *dev; int class_flag; struct fb_tile_ops *tileops; union { char *screen_base; char *screen_buffer; }; long unsigned int screen_size; void *pseudo_palette; u32 state; void *fbcon_par; void *par; bool skip_vt_switch; bool skip_panic; }; struct fb_videomode { const char *name; u32 refresh; u32 xres; u32 yres; u32 pixclock; u32 left_margin; u32 right_margin; u32 upper_margin; u32 lower_margin; u32 hsync_len; u32 vsync_len; u32 sync; u32 vmode; u32 flag; }; struct fb_modelist { struct list_head list; struct fb_videomode mode; }; struct fb_ops { struct module *owner; int (*fb_open)(struct fb_info *, int); int (*fb_release)(struct fb_info *, int); ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *); ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *); int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); int (*fb_set_par)(struct fb_info *); int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *); int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); int (*fb_blank)(int, struct fb_info *); int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); void (*fb_imageblit)(struct fb_info *, const struct fb_image *); int (*fb_cursor)(struct fb_info *, struct fb_cursor *); int (*fb_sync)(struct fb_info *); int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int); int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int); int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *); void (*fb_destroy)(struct fb_info *); int (*fb_debug_enter)(struct fb_info *); int (*fb_debug_leave)(struct fb_info *); }; struct fb_tilemap; struct fb_tilearea; struct fb_tilerect; struct fb_tileblit; struct fb_tilecursor; struct fb_tile_ops { void (*fb_settile)(struct fb_info *, struct fb_tilemap *); void (*fb_tilecopy)(struct fb_info *, struct fb_tilearea *); void (*fb_tilefill)(struct fb_info *, struct fb_tilerect *); void (*fb_tileblit)(struct fb_info *, struct fb_tileblit *); void (*fb_tilecursor)(struct fb_info *, struct fb_tilecursor *); int (*fb_get_tilemax)(struct fb_info *); }; struct fb_tilearea { __u32 sx; __u32 sy; __u32 dx; __u32 dy; __u32 width; __u32 height; }; struct fb_tileblit { __u32 sx; __u32 sy; __u32 width; __u32 height; __u32 fg; __u32 bg; __u32 length; __u32 *indices; }; struct fb_tilecursor { __u32 sx; __u32 sy; __u32 mode; __u32 shape; __u32 fg; __u32 bg; }; struct fb_tilemap { __u32 width; __u32 height; __u32 depth; __u32 length; const __u8 *data; }; struct fb_tilerect { __u32 sx; __u32 sy; __u32 width; __u32 height; __u32 index; __u32 fg; __u32 bg; __u32 rop; }; struct fbcon_display { const u_char *fontdata; int userfont; u_short inverse; short int yscroll; int vrows; int cursor_shape; int con_rotate; u32 xres_virtual; u32 yres_virtual; u32 height; u32 width; u32 bits_per_pixel; u32 grayscale; u32 nonstd; u32 accel_flags; u32 rotate; struct fb_bitfield red; struct fb_bitfield green; struct fb_bitfield blue; struct fb_bitfield transp; const struct fb_videomode *mode; }; struct fbcon_ops { void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int); void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int); void (*putcs)(struct vc_data *, struct fb_info *, const short unsigned int *, int, int, int, int, int); void (*clear_margins)(struct vc_data *, struct fb_info *, int, int); void (*cursor)(struct vc_data *, struct fb_info *, bool, int, int); int (*update_start)(struct fb_info *); int (*rotate_font)(struct fb_info *, struct vc_data *); struct fb_var_screeninfo var; struct delayed_work cursor_work; struct fb_cursor cursor_state; struct fbcon_display *p; struct fb_info *info; int currcon; int cur_blink_jiffies; int cursor_flash; int cursor_reset; int blank_state; int graphics; int save_graphics; bool initialized; int rotate; int cur_rotate; char *cursor_data; u8 *fontbuffer; u8 *fontdata; u8 *cursor_src; u32 cursor_size; u32 fd_size; }; struct fc_log { refcount_t usage; u8 head; u8 tail; u8 need_free; struct module *owner; char *buffer[8]; }; struct fd { long unsigned int word; }; typedef struct fd class_fd_raw_t; typedef struct fd class_fd_t; struct fd_data { fmode_t mode; unsigned int fd; }; struct fdtable { unsigned int max_fds; struct file **fd; long unsigned int *close_on_exec; long unsigned int *open_fds; long unsigned int *full_fds_bits; struct callback_head rcu; }; struct features_reply_data { struct ethnl_reply_data base; u32 hw[2]; u32 wanted[2]; u32 active[2]; u32 nochange[2]; u32 all[2]; }; struct fec_stat_grp { u64 stats[9]; u8 cnt; }; struct fec_reply_data { struct ethnl_reply_data base; long unsigned int fec_link_modes[2]; u32 active_fec; u8 fec_auto; struct fec_stat_grp corr; struct fec_stat_grp uncorr; struct fec_stat_grp corr_bits; }; typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int); struct kprobe { struct hlist_node hlist; struct list_head list; long unsigned int nmissed; kprobe_opcode_t *addr; const char *symbol_name; unsigned int offset; kprobe_pre_handler_t pre_handler; kprobe_post_handler_t post_handler; kprobe_opcode_t opcode; struct arch_specific_insn ainsn; u32 flags; }; struct fei_attr { struct list_head list; struct kprobe kp; long unsigned int retval; }; struct fentry_trace_entry_head { struct trace_entry ent; long unsigned int ip; }; struct fetch_insn { enum fetch_op op; union { unsigned int param; struct { unsigned int size; int offset; }; struct { unsigned char basesize; unsigned char lshift; unsigned char rshift; }; long unsigned int immediate; void *data; }; }; struct trace_seq; typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); struct fetch_type { const char *name; size_t size; bool is_signed; bool is_string; print_type_func_t print; const char *fmt; const char *fmttype; }; struct fexit_trace_entry_head { struct trace_entry ent; long unsigned int func; long unsigned int ret_ip; }; struct ff_condition_effect { __u16 right_saturation; __u16 left_saturation; __s16 right_coeff; __s16 left_coeff; __u16 deadband; __s16 center; }; struct ff_envelope { __u16 attack_length; __u16 attack_level; __u16 fade_length; __u16 fade_level; }; struct ff_constant_effect { __s16 level; struct ff_envelope envelope; }; struct ff_effect; struct ff_device { int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); int (*erase)(struct input_dev *, int); int (*playback)(struct input_dev *, int, int); void (*set_gain)(struct input_dev *, u16); void (*set_autocenter)(struct input_dev *, u16); void (*destroy)(struct ff_device *); void *private; long unsigned int ffbit[2]; struct mutex mutex; int max_effects; struct ff_effect *effects; struct file *effect_owners[0]; }; struct ff_trigger { __u16 button; __u16 interval; }; struct ff_replay { __u16 length; __u16 delay; }; struct ff_ramp_effect { __s16 start_level; __s16 end_level; struct ff_envelope envelope; }; struct ff_periodic_effect { __u16 waveform; __u16 period; __s16 magnitude; __s16 offset; __u16 phase; struct ff_envelope envelope; __u32 custom_len; __s16 *custom_data; }; struct ff_rumble_effect { __u16 strong_magnitude; __u16 weak_magnitude; }; struct ff_effect { __u16 type; __s16 id; __u16 direction; struct ff_trigger trigger; struct ff_replay replay; union { struct ff_constant_effect constant; struct ff_ramp_effect ramp; struct ff_periodic_effect periodic; struct ff_condition_effect condition[2]; struct ff_rumble_effect rumble; } u; }; struct fgraph_cpu_data { pid_t last_pid; int depth; int depth_irq; int ignore; long unsigned int enter_funcs[50]; }; struct ftrace_graph_ent { long unsigned int func; int depth; } __attribute__((packed)); struct ftrace_graph_ent_entry { struct trace_entry ent; struct ftrace_graph_ent graph_ent; }; struct ftrace_graph_ret { long unsigned int func; int depth; unsigned int overrun; long long unsigned int calltime; long long unsigned int rettime; }; struct ftrace_graph_ret_entry { struct trace_entry ent; struct ftrace_graph_ret ret; }; struct fgraph_data { struct fgraph_cpu_data *cpu_data; struct ftrace_graph_ent_entry ent; struct ftrace_graph_ret_entry ret; int failed; int cpu; long: 0; } __attribute__((packed)); struct fgraph_ops; typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, struct fgraph_ops *); typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, struct fgraph_ops *); struct fgraph_ops { trace_func_graph_ent_t entryfunc; trace_func_graph_ret_t retfunc; struct ftrace_ops ops; void *private; trace_func_graph_ent_t saved_func; int idx; }; struct fgraph_ret_regs { long unsigned int ax; long unsigned int dx; long unsigned int bp; }; struct fib_kuid_range { kuid_t start; kuid_t end; }; struct fib_rule_port_range { __u16 start; __u16 end; }; struct fib_rule { struct list_head list; int iifindex; int oifindex; u32 mark; u32 mark_mask; u32 flags; u32 table; u8 action; u8 l3mdev; u8 proto; u8 ip_proto; u32 target; __be64 tun_id; struct fib_rule *ctarget; struct net *fr_net; refcount_t refcnt; u32 pref; int suppress_ifgroup; int suppress_prefixlen; char iifname[16]; char oifname[16]; struct fib_kuid_range uid_range; struct fib_rule_port_range sport_range; struct fib_rule_port_range dport_range; struct callback_head rcu; }; struct fib4_rule { struct fib_rule common; u8 dst_len; u8 src_len; dscp_t dscp; u8 dscp_full: 1; __be32 src; __be32 srcmask; __be32 dst; __be32 dstmask; }; struct fib6_node; struct fib6_walker { struct list_head lh; struct fib6_node *root; struct fib6_node *node; struct fib6_info *leaf; enum fib6_walk_state state; unsigned int skip; unsigned int count; unsigned int skip_in_node; int (*func)(struct fib6_walker *); void *args; }; struct fib6_cleaner { struct fib6_walker w; struct net *net; int (*func)(struct fib6_info *, void *); int sernum; void *arg; bool skip_notify; }; struct nlmsghdr; struct nl_info { struct nlmsghdr *nlh; struct net *nl_net; u32 portid; u8 skip_notify: 1; u8 skip_notify_kernel: 1; }; struct fib6_config { u32 fc_table; u32 fc_metric; int fc_dst_len; int fc_src_len; int fc_ifindex; u32 fc_flags; u32 fc_protocol; u16 fc_type; u16 fc_delete_all_nh: 1; u16 fc_ignore_dev_down: 1; u16 __unused: 14; u32 fc_nh_id; struct in6_addr fc_dst; struct in6_addr fc_src; struct in6_addr fc_prefsrc; struct in6_addr fc_gateway; long unsigned int fc_expires; struct nlattr *fc_mx; int fc_mx_len; int fc_mp_len; struct nlattr *fc_mp; struct nl_info fc_nlinfo; struct nlattr *fc_encap; u16 fc_encap_type; bool fc_is_fdb; }; struct fib6_dump_arg { struct net *net; struct notifier_block *nb; struct netlink_ext_ack *extack; }; struct fib_notifier_info { int family; struct netlink_ext_ack *extack; }; struct fib6_entry_notifier_info { struct fib_notifier_info info; struct fib6_info *rt; unsigned int nsiblings; }; struct fib6_gc_args { int timeout; int more; }; struct rt6key { struct in6_addr addr; int plen; }; struct rtable; struct fnhe_hash_bucket; struct fib_nh_common { struct net_device *nhc_dev; netdevice_tracker nhc_dev_tracker; int nhc_oif; unsigned char nhc_scope; u8 nhc_family; u8 nhc_gw_family; unsigned char nhc_flags; struct lwtunnel_state *nhc_lwtstate; union { __be32 ipv4; struct in6_addr ipv6; } nhc_gw; int nhc_weight; atomic_t nhc_upper_bound; struct rtable **nhc_pcpu_rth_output; struct rtable *nhc_rth_input; struct fnhe_hash_bucket *nhc_exceptions; }; struct rt6_info; struct rt6_exception_bucket; struct fib6_nh { struct fib_nh_common nh_common; long unsigned int last_probe; struct rt6_info **rt6i_pcpu; struct rt6_exception_bucket *rt6i_exception_bucket; }; struct fib6_table; struct nexthop; struct fib6_info { struct fib6_table *fib6_table; struct fib6_info *fib6_next; struct fib6_node *fib6_node; union { struct list_head fib6_siblings; struct list_head nh_list; }; unsigned int fib6_nsiblings; refcount_t fib6_ref; long unsigned int expires; struct hlist_node gc_link; struct dst_metrics *fib6_metrics; struct rt6key fib6_dst; u32 fib6_flags; struct rt6key fib6_src; struct rt6key fib6_prefsrc; u32 fib6_metric; u8 fib6_protocol; u8 fib6_type; u8 offload; u8 trap; u8 offload_failed; u8 should_flush: 1; u8 dst_nocount: 1; u8 dst_nopolicy: 1; u8 fib6_destroying: 1; u8 unused: 4; struct callback_head rcu; struct nexthop *nh; struct fib6_nh fib6_nh[0]; }; struct fib6_nh_age_excptn_arg { struct fib6_gc_args *gc_args; long unsigned int now; }; struct fib6_nh_del_cached_rt_arg { struct fib6_config *cfg; struct fib6_info *f6i; }; struct fib6_nh_dm_arg { struct net *net; const struct in6_addr *saddr; int oif; int flags; struct fib6_nh *nh; }; struct rt6_rtnl_dump_arg; struct fib6_nh_exception_dump_walker { struct rt6_rtnl_dump_arg *dump; struct fib6_info *rt; unsigned int flags; unsigned int skip; unsigned int count; }; struct fib6_nh_excptn_arg { struct rt6_info *rt; int plen; }; struct fib6_nh_frl_arg { u32 flags; int oif; int strict; int *mpri; bool *do_rr; struct fib6_nh *nh; }; struct fib6_nh_match_arg { const struct net_device *dev; const struct in6_addr *gw; struct fib6_nh *match; }; struct fib6_nh_pcpu_arg { struct fib6_info *from; const struct fib6_table *table; }; struct fib6_result; struct flowi6; struct fib6_nh_rd_arg { struct fib6_result *res; struct flowi6 *fl6; const struct in6_addr *gw; struct rt6_info **ret; }; struct fib6_node { struct fib6_node *parent; struct fib6_node *left; struct fib6_node *right; struct fib6_node *subtree; struct fib6_info *leaf; __u16 fn_bit; __u16 fn_flags; int fn_sernum; struct fib6_info *rr_ptr; struct callback_head rcu; }; struct fib6_result { struct fib6_nh *nh; struct fib6_info *f6i; u32 fib6_flags; u8 fib6_type; struct rt6_info *rt6; }; struct fib6_rule { struct fib_rule common; struct rt6key src; struct rt6key dst; dscp_t dscp; u8 dscp_full: 1; }; struct inet_peer_base { struct rb_root rb_root; seqlock_t lock; int total; }; struct fib6_table { struct hlist_node tb6_hlist; u32 tb6_id; spinlock_t tb6_lock; struct fib6_node tb6_root; struct inet_peer_base tb6_peers; unsigned int flags; unsigned int fib_seq; struct hlist_head tb6_gc_hlist; }; struct fib_info; struct fib_alias { struct hlist_node fa_list; struct fib_info *fa_info; dscp_t fa_dscp; u8 fa_type; u8 fa_state; u8 fa_slen; u32 tb_id; s16 fa_default; u8 offload; u8 trap; u8 offload_failed; struct callback_head rcu; }; struct rtnexthop; struct fib_config { u8 fc_dst_len; dscp_t fc_dscp; u8 fc_protocol; u8 fc_scope; u8 fc_type; u8 fc_gw_family; u32 fc_table; __be32 fc_dst; union { __be32 fc_gw4; struct in6_addr fc_gw6; }; int fc_oif; u32 fc_flags; u32 fc_priority; __be32 fc_prefsrc; u32 fc_nh_id; struct nlattr *fc_mx; struct rtnexthop *fc_mp; int fc_mx_len; int fc_mp_len; u32 fc_flow; u32 fc_nlflags; struct nl_info fc_nlinfo; struct nlattr *fc_encap; u16 fc_encap_type; }; struct fib_dump_filter { u32 table_id; bool filter_set; bool dump_routes; bool dump_exceptions; bool rtnl_held; unsigned char protocol; unsigned char rt_type; unsigned int flags; struct net_device *dev; }; struct fib_entry_notifier_info { struct fib_notifier_info info; u32 dst; int dst_len; struct fib_info *fi; dscp_t dscp; u8 type; u32 tb_id; }; struct fib_nh { struct fib_nh_common nh_common; struct hlist_node nh_hash; struct fib_info *nh_parent; __be32 nh_saddr; int nh_saddr_genid; }; struct fib_info { struct hlist_node fib_hash; struct hlist_node fib_lhash; struct list_head nh_list; struct net *fib_net; refcount_t fib_treeref; refcount_t fib_clntref; unsigned int fib_flags; unsigned char fib_dead; unsigned char fib_protocol; unsigned char fib_scope; unsigned char fib_type; __be32 fib_prefsrc; u32 fib_tb_id; u32 fib_priority; struct dst_metrics *fib_metrics; int fib_nhs; bool fib_nh_is_v6; bool nh_updated; bool pfsrc_removed; struct nexthop *nh; struct callback_head rcu; struct fib_nh fib_nh[0]; }; struct fib_lookup_arg { void *lookup_ptr; const void *lookup_data; void *result; struct fib_rule *rule; u32 table; int flags; }; struct fib_nh_exception { struct fib_nh_exception *fnhe_next; int fnhe_genid; __be32 fnhe_daddr; u32 fnhe_pmtu; bool fnhe_mtu_locked; __be32 fnhe_gw; long unsigned int fnhe_expires; struct rtable *fnhe_rth_input; struct rtable *fnhe_rth_output; long unsigned int fnhe_stamp; struct callback_head rcu; }; struct fib_nh_notifier_info { struct fib_notifier_info info; struct fib_nh *fib_nh; }; struct fib_notifier_net { struct list_head fib_notifier_ops; struct atomic_notifier_head fib_chain; }; struct fib_notifier_ops { int family; struct list_head list; unsigned int (*fib_seq_read)(struct net *); int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *); struct module *owner; struct callback_head rcu; }; struct fib_prop { int error; u8 scope; }; struct fib_table; struct fib_result { __be32 prefix; unsigned char prefixlen; unsigned char nh_sel; unsigned char type; unsigned char scope; u32 tclassid; dscp_t dscp; struct fib_nh_common *nhc; struct fib_info *fi; struct fib_table *table; struct hlist_head *fa_head; }; struct fib_result_nl { __be32 fl_addr; u32 fl_mark; unsigned char fl_tos; unsigned char fl_scope; unsigned char tb_id_in; unsigned char tb_id; unsigned char prefixlen; unsigned char nh_sel; unsigned char type; unsigned char scope; int err; }; struct key_vector; struct fib_route_iter { struct seq_net_private p; struct fib_table *main_tb; struct key_vector *tnode; loff_t pos; t_key key; }; struct fib_rt_info { struct fib_info *fi; u32 tb_id; __be32 dst; int dst_len; dscp_t dscp; u8 type; u8 offload: 1; u8 trap: 1; u8 offload_failed: 1; u8 unused: 5; }; struct fib_rule_hdr { __u8 family; __u8 dst_len; __u8 src_len; __u8 tos; __u8 table; __u8 res1; __u8 res2; __u8 action; __u32 flags; }; struct fib_rule_notifier_info { struct fib_notifier_info info; struct fib_rule *rule; }; struct fib_rule_uid_range { __u32 start; __u32 end; }; struct flowi; struct fib_rules_ops { int family; struct list_head list; int rule_size; int addr_size; int unresolved_rules; int nr_goto_rules; unsigned int fib_rules_seq; int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); bool (*suppress)(struct fib_rule *, int, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **, struct netlink_ext_ack *); int (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); size_t (*nlmsg_payload)(struct fib_rule *); void (*flush_cache)(struct fib_rules_ops *); int nlgroup; struct list_head rules_list; struct module *owner; struct net *fro_net; struct callback_head rcu; }; struct fib_table { struct hlist_node tb_hlist; u32 tb_id; int tb_num_default; struct callback_head rcu; long unsigned int *tb_data; long unsigned int __data[0]; }; struct fib_trie_iter { struct seq_net_private p; struct fib_table *tb; struct key_vector *tnode; unsigned int index; unsigned int depth; }; struct fid { union { struct { u32 ino; u32 gen; u32 parent_ino; u32 parent_gen; } i32; struct { u64 ino; u32 gen; } __attribute__((packed)) i64; struct { u32 block; u16 partref; u16 parent_partref; u32 generation; u32 parent_block; u32 parent_generation; } udf; struct { struct {} __empty_raw; __u32 raw[0]; }; }; }; struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2]; __u32 fe_flags; __u32 fe_reserved[3]; }; struct fiemap { __u64 fm_start; __u64 fm_length; __u32 fm_flags; __u32 fm_mapped_extents; __u32 fm_extent_count; __u32 fm_reserved; struct fiemap_extent fm_extents[0]; }; struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; }; struct file__safe_trusted { struct inode *f_inode; }; struct file_clone_range { __s64 src_fd; __u64 src_offset; __u64 src_length; __u64 dest_offset; }; struct file_dedupe_range_info { __s64 dest_fd; __u64 dest_offset; __u64 bytes_deduped; __s32 status; __u32 reserved; }; struct file_dedupe_range { __u64 src_offset; __u64 src_length; __u16 dest_count; __u16 reserved1; __u32 reserved2; struct file_dedupe_range_info info[0]; }; struct file_handle { __u32 handle_bytes; int handle_type; unsigned char f_handle[0]; }; struct file_lock_core { struct file_lock_core *flc_blocker; struct list_head flc_list; struct hlist_node flc_link; struct list_head flc_blocked_requests; struct list_head flc_blocked_member; fl_owner_t flc_owner; unsigned int flc_flags; unsigned char flc_type; pid_t flc_pid; int flc_link_cpu; wait_queue_head_t flc_wait; struct file *flc_file; }; struct lease_manager_operations; struct file_lease { struct file_lock_core c; struct fasync_struct *fl_fasync; long unsigned int fl_break_time; long unsigned int fl_downgrade_time; const struct lease_manager_operations *fl_lmops; }; struct nlm_lockowner; struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner; }; struct file_lock_operations; struct lock_manager_operations; struct file_lock { struct file_lock_core c; loff_t fl_start; loff_t fl_end; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct { struct list_head link; int state; unsigned int debug_id; } afs; struct { struct inode *inode; } ceph; } fl_u; }; struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; }; struct file_lock_list_struct { spinlock_t lock; struct hlist_head hlist; }; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); }; struct io_uring_cmd; struct file_operations { struct module *owner; fop_flags_t fop_flags; loff_t (*llseek)(struct file *, loff_t, int); ssize_t (*read)(struct file *, char *, size_t, loff_t *); ssize_t (*write)(struct file *, const char *, size_t, loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iopoll)(struct kiocb *, struct io_comp_batch *, unsigned int); int (*iterate_shared)(struct file *, struct dir_context *); __poll_t (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int); long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t, loff_t, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); void (*splice_eof)(struct file *); int (*setlease)(struct file *, int, struct file_lease **, void **); long int (*fallocate)(struct file *, int, loff_t, loff_t); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int); int (*fadvise)(struct file *, loff_t, loff_t, int); int (*uring_cmd)(struct io_uring_cmd *, unsigned int); int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int); }; struct tpm_chip; struct tpm_space; struct file_priv { struct tpm_chip *chip; struct tpm_space *space; struct mutex buffer_mutex; struct timer_list user_read_timer; struct work_struct timeout_work; struct work_struct async_work; wait_queue_head_t async_wait; ssize_t response_length; bool response_read; bool command_enqueued; u8 data_buffer[4096]; }; struct page_counter; struct file_region { struct list_head link; long int from; long int to; struct page_counter *reservation_counter; struct cgroup_subsys_state *css; }; struct file_security_struct { u32 sid; u32 fown_sid; u32 isid; u32 pseqno; }; struct fs_context; struct fs_parameter_spec; struct file_system_type { const char *name; int fs_flags; int (*init_fs_context)(struct fs_context *); const struct fs_parameter_spec *parameters; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key invalidate_lock_key; struct lock_class_key i_mutex_dir_key; }; struct fileattr { u32 flags; u32 fsx_xflags; u32 fsx_extsize; u32 fsx_nextents; u32 fsx_projid; u32 fsx_cowextsize; bool flags_valid: 1; bool fsx_valid: 1; }; struct filename { const char *name; const char *uptr; atomic_t refcnt; struct audit_names *aname; const char iname[0]; }; struct filename_trans_datum { struct ebitmap stypes; u32 otype; struct filename_trans_datum *next; }; struct filename_trans_key { u32 ttype; u16 tclass; const char *name; }; struct files_stat_struct { long unsigned int nr_files; long unsigned int nr_free_files; long unsigned int max_files; }; struct files_struct { atomic_t count; bool resize_in_progress; wait_queue_head_t resize_wait; struct fdtable *fdt; struct fdtable fdtab; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t file_lock; unsigned int next_fd; long unsigned int close_on_exec_init[1]; long unsigned int open_fds_init[1]; long unsigned int full_fds_bits_init[1]; struct file *fd_array[64]; long: 64; long: 64; long: 64; long: 64; }; struct filter_list { struct list_head list; struct event_filter *filter; }; struct filter_parse_error { int lasterr; int lasterr_pos; }; struct regex; struct ftrace_event_field; struct filter_pred { struct regex *regex; struct cpumask *mask; short unsigned int *ops; struct ftrace_event_field *field; u64 val; u64 val2; enum filter_pred_fn fn_num; int offset; int not; int op; }; struct find_child_walk_data { struct acpi_device *adev; u64 address; int score; bool check_sta; bool check_children; }; struct kernel_symbol; struct find_symbol_arg { const char *name; bool gplok; bool warn; struct module *owner; const s32 *crc; const struct kernel_symbol *sym; enum mod_license license; }; struct firmware { size_t size; const u8 *data; void *priv; }; struct firmware_cache { spinlock_t lock; struct list_head head; int state; spinlock_t name_lock; struct list_head fw_names; struct delayed_work work; struct notifier_block pm_notify; }; struct firmware_fallback_config { unsigned int force_sysfs_fallback; unsigned int ignore_sysfs_fallback; int old_timeout; int loading_timeout; }; struct firmware_map_entry { u64 start; u64 end; const char *type; struct list_head list; struct kobject kobj; }; struct firmware_work { struct work_struct work; struct module *module; const char *name; struct device *device; void *context; void (*cont)(const struct firmware *, void *); u32 opt_flags; }; struct fixed_percpu_data { char gs_base[40]; long unsigned int stack_canary; }; struct fixed_range_block { int base_msr; int ranges; }; struct fl_flow_mask_range { short unsigned int start; short unsigned int end; }; struct fl_flow_mask { struct fl_flow_key key; struct fl_flow_mask_range range; u32 flags; struct rhash_head ht_node; struct rhashtable ht; struct rhashtable_params filter_ht_params; struct flow_dissector dissector; struct list_head filters; struct rcu_work rwork; struct list_head list; refcount_t refcnt; }; struct tcf_chain; struct fl_flow_tmplt { struct fl_flow_key dummy_key; struct fl_flow_key mask; struct flow_dissector dissector; struct tcf_chain *chain; }; struct flex_groups { atomic64_t free_clusters; atomic_t free_inodes; atomic_t used_dirs; }; struct flock { short int l_type; short int l_whence; __kernel_off_t l_start; __kernel_off_t l_len; __kernel_pid_t l_pid; }; struct flock64 { short int l_type; short int l_whence; __kernel_loff_t l_start; __kernel_loff_t l_len; __kernel_pid_t l_pid; }; typedef void (*action_destr)(void *); struct ip_tunnel_info; struct psample_group; struct nf_flowtable; struct flow_action_entry { enum flow_action_id id; u32 hw_index; long unsigned int cookie; u64 miss_cookie; enum flow_action_hw_stats hw_stats; action_destr destructor; void *destructor_priv; union { u32 chain_index; struct net_device *dev; struct { u16 vid; __be16 proto; u8 prio; } vlan; struct { unsigned char dst[6]; unsigned char src[6]; } vlan_push_eth; struct { enum flow_action_mangle_base htype; u32 offset; u32 mask; u32 val; } mangle; struct ip_tunnel_info *tunnel; u32 csum_flags; u32 mark; u16 ptype; u16 rx_queue; u32 priority; struct { u32 ctx; u32 index; u8 vf; } queue; struct { struct psample_group *psample_group; u32 rate; u32 trunc_size; bool truncate; } sample; struct { u32 burst; u64 rate_bytes_ps; u64 peakrate_bytes_ps; u32 avrate; u16 overhead; u64 burst_pkt; u64 rate_pkt_ps; u32 mtu; struct { enum flow_action_id act_id; u32 extval; } exceed; struct { enum flow_action_id act_id; u32 extval; } notexceed; } police; struct { int action; u16 zone; struct nf_flowtable *flow_table; } ct; struct { long unsigned int cookie; u32 mark; u32 labels[4]; bool orig_dir; } ct_metadata; struct { u32 label; __be16 proto; u8 tc; u8 bos; u8 ttl; } mpls_push; struct { __be16 proto; } mpls_pop; struct { u32 label; u8 tc; u8 bos; u8 ttl; } mpls_mangle; struct { s32 prio; u64 basetime; u64 cycletime; u64 cycletimeext; u32 num_entries; struct action_gate_entry *entries; } gate; struct { u16 sid; } pppoe; }; struct flow_action_cookie *user_cookie; }; struct flow_action { unsigned int num_entries; struct flow_action_entry entries[0]; }; struct flow_action_cookie { u32 cookie_len; u8 cookie[0]; }; struct flow_block { struct list_head cb_list; }; typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *); struct flow_block_cb; struct flow_block_indr { struct list_head list; struct net_device *dev; struct Qdisc *sch; enum flow_block_binder_type binder_type; void *data; void *cb_priv; void (*cleanup)(struct flow_block_cb *); }; struct flow_block_cb { struct list_head driver_list; struct list_head list; flow_setup_cb_t *cb; void *cb_ident; void *cb_priv; void (*release)(void *); struct flow_block_indr indr; unsigned int refcnt; }; struct flow_block_offload { enum flow_block_command command; enum flow_block_binder_type binder_type; bool block_shared; bool unlocked_driver_cb; struct net *net; struct flow_block *block; struct list_head cb_list; struct list_head *driver_block_list; struct netlink_ext_ack *extack; struct Qdisc *sch; struct list_head *cb_list_head; }; struct flow_cls_common_offload { u32 chain_index; __be16 protocol; u32 prio; struct netlink_ext_ack *extack; }; struct flow_stats { u64 pkts; u64 bytes; u64 drops; u64 lastused; enum flow_action_hw_stats used_hw_stats; bool used_hw_stats_valid; }; struct flow_cls_offload { struct flow_cls_common_offload common; enum flow_cls_command command; bool use_act_stats; long unsigned int cookie; struct flow_rule *rule; struct flow_stats stats; u32 classid; }; struct flow_dissector_key { enum flow_dissector_key_id key_id; size_t offset; }; struct flow_dissector_key_tipc { __be32 key; }; struct flow_dissector_key_addrs { union { struct flow_dissector_key_ipv4_addrs v4addrs; struct flow_dissector_key_ipv6_addrs v6addrs; struct flow_dissector_key_tipc tipckey; }; }; struct flow_dissector_key_tags { u32 flow_label; }; struct flow_indir_dev_info { void *data; struct net_device *dev; struct Qdisc *sch; enum tc_setup_type type; void (*cleanup)(struct flow_block_cb *); struct list_head list; enum flow_block_command command; enum flow_block_binder_type binder_type; struct list_head *cb_list; }; typedef int flow_indr_block_bind_cb_t(struct net_device *, struct Qdisc *, void *, enum tc_setup_type, void *, void *, void (*)(struct flow_block_cb *)); struct flow_indr_dev { struct list_head list; flow_indr_block_bind_cb_t *cb; void *cb_priv; refcount_t refcnt; }; struct flow_keys { struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; struct flow_dissector_key_tags tags; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_vlan cvlan; struct flow_dissector_key_keyid keyid; struct flow_dissector_key_ports ports; struct flow_dissector_key_icmp icmp; struct flow_dissector_key_addrs addrs; long: 0; }; struct flow_keys_basic { struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; }; struct flow_keys_digest { u8 data[16]; }; struct flow_match { struct flow_dissector *dissector; void *mask; void *key; }; struct flow_match_arp { struct flow_dissector_key_arp *key; struct flow_dissector_key_arp *mask; }; struct flow_match_basic { struct flow_dissector_key_basic *key; struct flow_dissector_key_basic *mask; }; struct flow_match_control { struct flow_dissector_key_control *key; struct flow_dissector_key_control *mask; }; struct flow_match_ct { struct flow_dissector_key_ct *key; struct flow_dissector_key_ct *mask; }; struct flow_match_enc_keyid { struct flow_dissector_key_keyid *key; struct flow_dissector_key_keyid *mask; }; struct flow_match_enc_opts { struct flow_dissector_key_enc_opts *key; struct flow_dissector_key_enc_opts *mask; }; struct flow_match_eth_addrs { struct flow_dissector_key_eth_addrs *key; struct flow_dissector_key_eth_addrs *mask; }; struct flow_match_icmp { struct flow_dissector_key_icmp *key; struct flow_dissector_key_icmp *mask; }; struct flow_match_ip { struct flow_dissector_key_ip *key; struct flow_dissector_key_ip *mask; }; struct flow_match_ipsec { struct flow_dissector_key_ipsec *key; struct flow_dissector_key_ipsec *mask; }; struct flow_match_ipv4_addrs { struct flow_dissector_key_ipv4_addrs *key; struct flow_dissector_key_ipv4_addrs *mask; }; struct flow_match_ipv6_addrs { struct flow_dissector_key_ipv6_addrs *key; struct flow_dissector_key_ipv6_addrs *mask; }; struct flow_match_l2tpv3 { struct flow_dissector_key_l2tpv3 *key; struct flow_dissector_key_l2tpv3 *mask; }; struct flow_match_meta { struct flow_dissector_key_meta *key; struct flow_dissector_key_meta *mask; }; struct flow_match_mpls { struct flow_dissector_key_mpls *key; struct flow_dissector_key_mpls *mask; }; struct flow_match_ports { struct flow_dissector_key_ports *key; struct flow_dissector_key_ports *mask; }; struct flow_match_ports_range { struct flow_dissector_key_ports_range *key; struct flow_dissector_key_ports_range *mask; }; struct flow_match_pppoe { struct flow_dissector_key_pppoe *key; struct flow_dissector_key_pppoe *mask; }; struct flow_match_tcp { struct flow_dissector_key_tcp *key; struct flow_dissector_key_tcp *mask; }; struct flow_match_vlan { struct flow_dissector_key_vlan *key; struct flow_dissector_key_vlan *mask; }; struct flow_offload_tuple { union { struct in_addr src_v4; struct in6_addr src_v6; }; union { struct in_addr dst_v4; struct in6_addr dst_v6; }; struct { __be16 src_port; __be16 dst_port; }; int iifidx; u8 l3proto; u8 l4proto; struct { u16 id; __be16 proto; } encap[2]; struct {} __hash; u8 dir: 2; u8 xmit_type: 3; u8 encap_num: 2; char: 1; u8 in_vlan_ingress: 2; u16 mtu; union { struct { struct dst_entry *dst_cache; u32 dst_cookie; }; struct { u32 ifidx; u32 hw_ifidx; u8 h_source[6]; u8 h_dest[6]; } out; struct { u32 iifidx; } tc; }; }; struct flow_offload_tuple_rhash { struct rhash_head node; struct flow_offload_tuple tuple; }; struct nf_conn; struct flow_offload { struct flow_offload_tuple_rhash tuplehash[2]; struct nf_conn *ct; long unsigned int flags; u16 type; u32 timeout; struct callback_head callback_head; }; struct flow_offload_action { struct netlink_ext_ack *extack; enum offload_act_command command; enum flow_action_id id; u32 index; long unsigned int cookie; struct flow_stats stats; struct flow_action action; }; struct flow_offload_work { struct list_head list; enum flow_cls_command cmd; struct nf_flowtable *flowtable; struct flow_offload *flow; struct work_struct work; }; struct flow_offload_xdp { struct hlist_node hnode; long unsigned int net_device_addr; struct list_head head; }; struct flow_offload_xdp_ft { struct list_head head; struct nf_flowtable *ft; struct callback_head rcuhead; }; struct flow_ports { __be16 source; __be16 dest; }; struct flow_rule { struct flow_match match; struct flow_action action; }; struct flowi_tunnel { __be64 tun_id; }; struct flowi_common { int flowic_oif; int flowic_iif; int flowic_l3mdev; __u32 flowic_mark; __u8 flowic_tos; __u8 flowic_scope; __u8 flowic_proto; __u8 flowic_flags; __u32 flowic_secid; kuid_t flowic_uid; __u32 flowic_multipath_hash; struct flowi_tunnel flowic_tun_key; }; union flowi_uli { struct { __be16 dport; __be16 sport; } ports; struct { __u8 type; __u8 code; } icmpt; __be32 gre_key; struct { __u8 type; } mht; }; struct flowi4 { struct flowi_common __fl_common; __be32 saddr; __be32 daddr; union flowi_uli uli; }; struct flowi6 { struct flowi_common __fl_common; struct in6_addr daddr; struct in6_addr saddr; __be32 flowlabel; union flowi_uli uli; __u32 mp_hash; }; struct flowi { union { struct flowi_common __fl_common; struct flowi4 ip4; struct flowi6 ip6; } u; }; struct flush_busy_ctx_data { struct blk_mq_hw_ctx *hctx; struct list_head *list; }; struct kyber_hctx_data; struct flush_kcq_data { struct kyber_hctx_data *khd; unsigned int sched_domain; struct list_head *list; }; struct flush_tlb_info { struct mm_struct *mm; long unsigned int start; long unsigned int end; u64 new_tlb_gen; unsigned int initiating_cpu; u8 stride_shift; u8 freed_tables; }; struct fname { __u32 hash; __u32 minor_hash; struct rb_node rb_hash; struct fname *next; __u32 inode; __u8 name_len; __u8 file_type; char name[0]; }; struct fnhe_hash_bucket { struct fib_nh_exception *chain; }; struct focaltech_finger_state { bool active; bool valid; unsigned int x; unsigned int y; }; struct focaltech_hw_state { struct focaltech_finger_state fingers[5]; unsigned int width; bool pressed; }; struct focaltech_data { unsigned int x_max; unsigned int y_max; struct focaltech_hw_state state; }; struct page_pool; struct page { long unsigned int flags; union { struct { union { struct list_head lru; struct { void *__filler; unsigned int mlock_count; }; struct list_head buddy_list; struct list_head pcp_list; }; struct address_space *mapping; union { long unsigned int index; long unsigned int share; }; long unsigned int private; }; struct { long unsigned int pp_magic; struct page_pool *pp; long unsigned int _pp_mapping_pad; long unsigned int dma_addr; atomic_long_t pp_ref_count; }; struct { long unsigned int compound_head; }; struct { struct dev_pagemap *pgmap; void *zone_device_data; }; struct callback_head callback_head; }; union { unsigned int page_type; atomic_t _mapcount; }; atomic_t _refcount; long unsigned int memcg_data; }; struct folio { union { struct { long unsigned int flags; union { struct list_head lru; struct { void *__filler; unsigned int mlock_count; }; }; struct address_space *mapping; long unsigned int index; union { void *private; swp_entry_t swap; }; atomic_t _mapcount; atomic_t _refcount; long unsigned int memcg_data; }; struct page page; }; union { struct { long unsigned int _flags_1; long unsigned int _head_1; atomic_t _large_mapcount; atomic_t _entire_mapcount; atomic_t _nr_pages_mapped; atomic_t _pincount; unsigned int _folio_nr_pages; }; struct page __page_1; }; union { struct { long unsigned int _flags_2; long unsigned int _head_2; void *_hugetlb_subpool; void *_hugetlb_cgroup; void *_hugetlb_cgroup_rsvd; void *_hugetlb_hwpoison; }; struct { long unsigned int _flags_2a; long unsigned int _head_2a; struct list_head _deferred_list; }; struct page __page_2; }; }; struct folio_iter { struct folio *folio; size_t offset; size_t length; struct folio *_next; size_t _seg_count; int _i; }; struct folio_queue { struct folio_batch vec; u8 orders[31]; struct folio_queue *next; struct folio_queue *prev; long unsigned int marks; long unsigned int marks2; long unsigned int marks3; }; struct folio_referenced_arg { int mapcount; int referenced; long unsigned int vm_flags; struct mem_cgroup *memcg; }; struct folio_walk { struct page *page; enum folio_walk_level level; union { pte_t *ptep; pud_t *pudp; pmd_t *pmdp; }; union { pte_t pte; pud_t pud; pmd_t pmd; }; struct vm_area_struct *vma; spinlock_t *ptl; }; struct follow_page_context { struct dev_pagemap *pgmap; unsigned int page_mask; }; struct follow_pfnmap_args { struct vm_area_struct *vma; long unsigned int address; spinlock_t *lock; pte_t *ptep; long unsigned int pfn; pgprot_t pgprot; bool writable; bool special; }; struct font_data { unsigned int extra[4]; const unsigned char data[0]; }; struct font_desc { int idx; const char *name; unsigned int width; unsigned int height; unsigned int charcount; const void *data; int pref; }; struct inactive_task_frame { long unsigned int r15; long unsigned int r14; long unsigned int r13; long unsigned int r12; long unsigned int bx; long unsigned int bp; long unsigned int ret_addr; }; struct fork_frame { struct inactive_task_frame frame; struct pt_regs regs; }; struct fou { struct socket *sock; u8 protocol; u8 flags; __be16 port; u8 family; u16 type; struct list_head list; struct callback_head rcu; }; struct udp_port_cfg { u8 family; union { struct in_addr local_ip; struct in6_addr local_ip6; }; union { struct in_addr peer_ip; struct in6_addr peer_ip6; }; __be16 local_udp_port; __be16 peer_udp_port; int bind_ifindex; unsigned int use_udp_checksums: 1; unsigned int use_udp6_tx_checksums: 1; unsigned int use_udp6_rx_checksums: 1; unsigned int ipv6_v6only: 1; }; struct fou_cfg { u16 type; u8 protocol; u8 flags; struct udp_port_cfg udp_config; }; struct fou_net { struct list_head fou_list; struct mutex fou_lock; }; struct fown_struct { struct file *file; rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; }; struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20]; u32 status; }; struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union { struct { u64 rip; u64 rdp; }; struct { u32 fip; u32 fcs; u32 foo; u32 fos; }; }; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32]; u32 xmm_space[64]; u32 padding[12]; union { u32 padding1[12]; u32 sw_reserved[12]; }; }; struct math_emu_info; struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; }; struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6]; }; struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0]; }; union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096]; }; struct rethook_node { struct callback_head rcu; struct llist_node llist; struct rethook *rethook; long unsigned int ret_addr; long unsigned int frame; }; struct fprobe_rethook_node { struct rethook_node node; long unsigned int entry_ip; long unsigned int entry_parent_ip; char data[0]; }; struct fprop_global { struct percpu_counter events; unsigned int period; seqcount_t sequence; }; struct fpstate { unsigned int size; unsigned int user_size; u64 xfeatures; u64 user_xfeatures; u64 xfd; unsigned int is_valloc: 1; unsigned int is_guest: 1; unsigned int is_confidential: 1; unsigned int in_use: 1; long: 64; long: 64; long: 64; union fpregs_state regs; }; struct fpu_state_perm { u64 __state_perm; unsigned int __state_size; unsigned int __user_state_size; }; struct fpu { unsigned int last_cpu; long unsigned int avx512_timestamp; struct fpstate *fpstate; struct fpstate *__task_fpstate; struct fpu_state_perm perm; struct fpu_state_perm guest_perm; struct fpstate __fpstate; }; struct fpu_guest { u64 xfeatures; u64 perm; u64 xfd_err; unsigned int uabi_size; struct fpstate *fpstate; }; struct fpu_state_config { unsigned int max_size; unsigned int default_size; u64 max_features; u64 default_features; u64 legacy_features; u64 independent_features; }; struct fq_codel_flow { struct sk_buff *head; struct sk_buff *tail; struct list_head flowchain; int deficit; struct codel_vars cvars; }; struct fq_codel_sched_data { struct tcf_proto *filter_list; struct tcf_block *block; struct fq_codel_flow *flows; u32 *backlogs; u32 flows_cnt; u32 quantum; u32 drop_batch_size; u32 memory_limit; struct codel_params cparams; struct codel_stats cstats; u32 memory_usage; u32 drop_overmemory; u32 drop_overlimit; u32 new_flow_count; struct list_head new_flows; struct list_head old_flows; }; struct fq_flow { struct rb_root t_root; struct sk_buff *head; union { struct sk_buff *tail; long unsigned int age; }; union { struct rb_node fq_node; u64 stat_fastpath_packets; }; struct sock *sk; u32 socket_hash; int qlen; int credit; int band; struct fq_flow *next; struct rb_node rate_node; u64 time_next_packet; }; struct fq_flow_head { struct fq_flow *first; struct fq_flow *last; }; struct fq_perband_flows { struct fq_flow_head new_flows; struct fq_flow_head old_flows; int credit; int quantum; }; struct qdisc_watchdog { struct hrtimer timer; struct Qdisc *qdisc; }; struct fq_sched_data { u32 quantum; u32 initial_quantum; u32 flow_refill_delay; u32 flow_plimit; long unsigned int flow_max_rate; u64 ce_threshold; u64 horizon; u32 orphan_mask; u32 low_rate_threshold; struct rb_root *fq_root; u8 rate_enable; u8 fq_trees_log; u8 horizon_drop; u8 prio2band[4]; u32 timer_slack; unsigned int band_nr; struct fq_perband_flows band_flows[3]; struct fq_flow internal; struct rb_root delayed; u64 time_next_delayed_flow; long unsigned int unthrottle_latency_ns; u32 band_pkt_count[3]; u32 flows; u32 inactive_flows; u32 throttled_flows; u64 stat_throttled; struct qdisc_watchdog watchdog; u64 stat_gc_flows; u64 stat_band_drops[3]; u64 stat_ce_mark; u64 stat_horizon_drops; u64 stat_horizon_caps; u64 stat_flows_plimit; u64 stat_pkts_too_long; u64 stat_allocation_errors; }; struct fq_skb_cb { u64 time_to_send; u8 band; }; struct inet_frags; struct fqdir { long int high_thresh; long int low_thresh; int timeout; int max_dist; struct inet_frags *f; struct net *net; bool dead; long: 64; long: 64; struct rhashtable rhashtable; long: 64; long: 64; long: 64; long: 64; atomic_long_t mem; struct work_struct destroy_work; struct llist_node free_list; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct frag_hdr { __u8 nexthdr; __u8 reserved; __be16 frag_off; __be32 identification; }; struct frag_v4_compare_key { __be32 saddr; __be32 daddr; u32 user; u32 vif; __be16 id; u16 protocol; }; struct frag_v6_compare_key { struct in6_addr saddr; struct in6_addr daddr; u32 user; __be32 id; u32 iif; }; struct inet_frag_queue { struct rhash_head node; union { struct frag_v4_compare_key v4; struct frag_v6_compare_key v6; } key; struct timer_list timer; spinlock_t lock; refcount_t refcnt; struct rb_root rb_fragments; struct sk_buff *fragments_tail; struct sk_buff *last_run_head; ktime_t stamp; int len; int meat; u8 tstamp_type; __u8 flags; u16 max_size; struct fqdir *fqdir; struct callback_head rcu; }; struct frag_queue { struct inet_frag_queue q; int iif; __u16 nhoffset; u8 ecn; }; struct freader { void *buf; u32 buf_sz; int err; union { struct { struct file *file; struct folio *folio; void *addr; loff_t folio_off; bool may_fault; }; struct { const char *data; u64 data_sz; }; }; }; struct free_area { struct list_head free_list[6]; long unsigned int nr_free; }; struct freerunning_counters { unsigned int counter_base; unsigned int counter_offset; unsigned int box_offset; unsigned int num_counters; unsigned int bits; unsigned int *box_offsets; }; struct freezer { struct cgroup_subsys_state css; unsigned int state; }; struct freq_attr { struct attribute attr; ssize_t (*show)(struct cpufreq_policy *, char *); ssize_t (*store)(struct cpufreq_policy *, const char *, size_t); }; struct muldiv { u32 multiplier; u32 divider; }; struct freq_desc { bool use_msr_plat; struct muldiv muldiv[16]; u32 freqs[16]; u32 mask; }; struct p_log { const char *prefix; struct fc_log *log; }; struct fs_context_operations; struct fs_context { const struct fs_context_operations *ops; struct mutex uapi_mutex; struct file_system_type *fs_type; void *fs_private; void *sget_key; struct dentry *root; struct user_namespace *user_ns; struct net *net_ns; const struct cred *cred; struct p_log log; const char *source; void *security; void *s_fs_info; unsigned int sb_flags; unsigned int sb_flags_mask; unsigned int s_iflags; enum fs_context_purpose purpose: 8; enum fs_context_phase phase: 8; bool need_free: 1; bool global: 1; bool oldapi: 1; bool exclusive: 1; }; struct fs_parameter; struct fs_context_operations { void (*free)(struct fs_context *); int (*dup)(struct fs_context *, struct fs_context *); int (*parse_param)(struct fs_context *, struct fs_parameter *); int (*parse_monolithic)(struct fs_context *, void *); int (*get_tree)(struct fs_context *); int (*reconfigure)(struct fs_context *); }; struct fs_error_report { int error; struct inode *inode; struct super_block *sb; }; struct fs_parameter { const char *key; enum fs_value_type type: 8; union { char *string; void *blob; struct filename *name; struct file *file; }; size_t size; int dirfd; }; struct fs_parse_result; typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *); struct fs_parameter_spec { const char *name; fs_param_type *type; u8 opt; short unsigned int flags; const void *data; }; struct fs_parse_result { bool negated; union { bool boolean; int int_32; unsigned int uint_32; u64 uint_64; kuid_t uid; kgid_t gid; }; }; struct fs_struct { int users; spinlock_t lock; seqcount_spinlock_t seq; int umask; int in_exec; struct path root; struct path pwd; }; struct fs_sysfs_path { __u8 len; __u8 name[128]; }; struct fscrypt_name { const struct qstr *usr_fname; struct fscrypt_str disk_name; u32 hash; u32 minor_hash; struct fscrypt_str crypto_buf; bool is_nokey_name; }; struct fsl_mc_obj_desc { char type[16]; int id; u16 vendor; u16 ver_major; u16 ver_minor; u8 irq_count; u8 region_count; u32 state; char label[16]; u16 flags; }; struct fsl_mc_io; struct fsl_mc_device_irq; struct fsl_mc_resource; struct fsl_mc_device { struct device dev; u64 dma_mask; u16 flags; u32 icid; u16 mc_handle; struct fsl_mc_io *mc_io; struct fsl_mc_obj_desc obj_desc; struct resource *regions; struct fsl_mc_device_irq **irqs; struct fsl_mc_resource *resource; struct device_link *consumer_link; const char *driver_override; }; struct fsl_mc_resource_pool; struct fsl_mc_resource { enum fsl_mc_pool_type type; s32 id; void *data; struct fsl_mc_resource_pool *parent_pool; struct list_head node; }; struct fsl_mc_device_irq { unsigned int virq; struct fsl_mc_device *mc_dev; u8 dev_irq_index; struct fsl_mc_resource resource; }; struct fsl_mc_io { struct device *dev; u16 flags; u32 portal_size; phys_addr_t portal_phys_addr; void *portal_virt_addr; struct fsl_mc_device *dpmcp_dev; union { struct mutex mutex; raw_spinlock_t spinlock; }; }; struct fsmap { __u32 fmr_device; __u32 fmr_flags; __u64 fmr_physical; __u64 fmr_owner; __u64 fmr_offset; __u64 fmr_length; __u64 fmr_reserved[3]; }; struct fsmap_head { __u32 fmh_iflags; __u32 fmh_oflags; __u32 fmh_count; __u32 fmh_entries; __u64 fmh_reserved[6]; struct fsmap fmh_keys[2]; struct fsmap fmh_recs[0]; }; struct fsnotify_event { struct list_head list; }; struct inotify_group_private_data { spinlock_t idr_lock; struct idr idr; struct ucounts *ucounts; }; struct fsnotify_ops; struct fsnotify_group { const struct fsnotify_ops *ops; refcount_t refcnt; spinlock_t notification_lock; struct list_head notification_list; wait_queue_head_t notification_waitq; unsigned int q_len; unsigned int max_events; enum fsnotify_group_prio priority; bool shutdown; int flags; unsigned int owner_flags; struct mutex mark_mutex; atomic_t user_waits; struct list_head marks_list; struct fasync_struct *fsn_fa; struct fsnotify_event *overflow_event; struct mem_cgroup *memcg; union { void *private; struct inotify_group_private_data inotify_data; }; }; struct fsnotify_iter_info { struct fsnotify_mark *marks[5]; struct fsnotify_group *current_group; unsigned int report_mask; int srcu_idx; }; typedef struct fsnotify_mark_connector *fsnotify_connp_t; struct fsnotify_mark_connector { spinlock_t lock; unsigned char type; unsigned char prio; short unsigned int flags; union { void *obj; struct fsnotify_mark_connector *destroy_next; }; struct hlist_head list; }; struct fsnotify_ops { int (*handle_event)(struct fsnotify_group *, u32, const void *, int, struct inode *, const struct qstr *, u32, struct fsnotify_iter_info *); int (*handle_inode_event)(struct fsnotify_mark *, u32, struct inode *, struct inode *, const struct qstr *, u32); void (*free_group_priv)(struct fsnotify_group *); void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); void (*free_event)(struct fsnotify_group *, struct fsnotify_event *); void (*free_mark)(struct fsnotify_mark *); }; struct fsnotify_sb_info { struct fsnotify_mark_connector *sb_marks; atomic_long_t watched_objects[3]; }; struct fstrim_range { __u64 start; __u64 len; __u64 minlen; }; struct fsuuid { __u32 fsu_len; __u32 fsu_flags; __u8 fsu_uuid[0]; }; struct fsuuid2 { __u8 len; __u8 uuid[16]; }; struct fsverity_descriptor { __u8 version; __u8 hash_algorithm; __u8 log_blocksize; __u8 salt_size; __le32 sig_size; __le64 data_size; __u8 root_hash[64]; __u8 salt[32]; __u8 __reserved[144]; __u8 signature[0]; }; struct fsverity_digest { __u16 digest_algorithm; __u16 digest_size; __u8 digest[0]; }; struct fsverity_enable_arg { __u32 version; __u32 hash_algorithm; __u32 block_size; __u32 salt_size; __u64 salt_ptr; __u32 sig_size; __u32 __reserved1; __u64 sig_ptr; __u64 __reserved2[11]; }; struct fsverity_hash_alg { struct crypto_shash *tfm; const char *name; unsigned int digest_size; unsigned int block_size; enum hash_algo algo_id; }; struct merkle_tree_params { const struct fsverity_hash_alg *hash_alg; const u8 *hashstate; unsigned int digest_size; unsigned int block_size; unsigned int hashes_per_block; unsigned int blocks_per_page; u8 log_digestsize; u8 log_blocksize; u8 log_arity; u8 log_blocks_per_page; unsigned int num_levels; u64 tree_size; long unsigned int tree_pages; long unsigned int level_start[8]; }; struct fsverity_info { struct merkle_tree_params tree_params; u8 root_hash[64]; u8 file_digest[64]; const struct inode *inode; long unsigned int *hash_block_verified; }; struct fsverity_operations { int (*begin_enable_verity)(struct file *); int (*end_enable_verity)(struct file *, const void *, size_t, u64); int (*get_verity_descriptor)(struct inode *, void *, size_t); struct page * (*read_merkle_tree_page)(struct inode *, long unsigned int, long unsigned int); int (*write_merkle_tree_block)(struct inode *, const void *, u64, unsigned int); }; struct fsverity_read_metadata_arg { __u64 metadata_type; __u64 offset; __u64 length; __u64 buf_ptr; __u64 __reserved; }; struct fsxattr { __u32 fsx_xflags; __u32 fsx_extsize; __u32 fsx_nextents; __u32 fsx_projid; __u32 fsx_cowextsize; unsigned char fsx_pad[8]; }; struct trace_seq { char buffer[8156]; struct seq_buf seq; size_t readpos; int full; }; struct tracer; struct ring_buffer_iter; struct trace_iterator { struct trace_array *tr; struct tracer *trace; struct array_buffer *array_buffer; void *private; int cpu_file; struct mutex mutex; struct ring_buffer_iter **buffer_iter; long unsigned int iter_flags; void *temp; unsigned int temp_size; char *fmt; unsigned int fmt_size; atomic_t wait_index; struct trace_seq tmp_seq; cpumask_var_t started; bool closed; bool snapshot; struct trace_seq seq; struct trace_entry *ent; long unsigned int lost_events; int leftover; int ent_size; int cpu; u64 ts; loff_t pos; long int idx; }; struct ftrace_buffer_info { struct trace_iterator iter; void *spare; unsigned int spare_cpu; unsigned int spare_size; unsigned int read; }; struct ftrace_entry { struct trace_entry ent; long unsigned int ip; long unsigned int parent_ip; }; struct ftrace_event_field { struct list_head link; const char *name; const char *type; int filter_type; int offset; int size; int is_signed; int len; }; struct ftrace_func_command { struct list_head list; char *name; int (*func)(struct trace_array *, struct ftrace_hash *, char *, char *, char *, int); }; struct ftrace_func_entry { struct hlist_node hlist; long unsigned int ip; long unsigned int direct; }; struct ftrace_func_map { struct ftrace_func_entry entry; void *data; }; struct ftrace_hash { long unsigned int size_bits; struct hlist_head *buckets; long unsigned int count; long unsigned int flags; struct callback_head rcu; }; struct ftrace_func_mapper { struct ftrace_hash hash; }; struct ftrace_probe_ops; struct ftrace_func_probe { struct ftrace_probe_ops *probe_ops; struct ftrace_ops ops; struct trace_array *tr; struct list_head list; void *data; int ref; }; struct ftrace_glob { char *search; unsigned int len; int type; }; struct trace_parser { bool cont; char *buffer; unsigned int idx; unsigned int size; }; struct ftrace_graph_data { struct ftrace_hash *hash; struct ftrace_func_entry *entry; int idx; enum graph_filter_type type; struct ftrace_hash *new_hash; const struct seq_operations *seq_ops; struct trace_parser parser; }; struct ftrace_init_func { struct list_head list; long unsigned int ip; }; struct ftrace_page; struct ftrace_iterator { loff_t pos; loff_t func_pos; loff_t mod_pos; struct ftrace_page *pg; struct dyn_ftrace *func; struct ftrace_func_probe *probe; struct ftrace_func_entry *probe_entry; struct trace_parser parser; struct ftrace_hash *hash; struct ftrace_ops *ops; struct trace_array *tr; struct list_head *mod_list; int pidx; int idx; unsigned int flags; }; struct ftrace_mod_func { struct list_head list; char *name; long unsigned int ip; unsigned int size; }; struct ftrace_mod_load { struct list_head list; char *func; char *module; int enable; }; struct ftrace_mod_map { struct callback_head rcu; struct list_head list; struct module *mod; long unsigned int start_addr; long unsigned int end_addr; struct list_head funcs; unsigned int num_funcs; }; union ftrace_op_code_union { char code[7]; struct { char op[3]; int offset; } __attribute__((packed)); }; struct ftrace_page { struct ftrace_page *next; struct dyn_ftrace *records; int index; int order; }; struct ftrace_probe_ops { void (*func)(long unsigned int, long unsigned int, struct trace_array *, struct ftrace_probe_ops *, void *); int (*init)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *, void **); void (*free)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *); int (*print)(struct seq_file *, long unsigned int, struct ftrace_probe_ops *, void *); }; struct ftrace_rec_iter { struct ftrace_page *pg; int index; }; struct ftrace_regs { struct pt_regs regs; }; struct ftrace_ret_stack { long unsigned int ret; long unsigned int func; long long unsigned int calltime; long unsigned int *retp; }; struct ftrace_stack { long unsigned int calls[1024]; }; struct ftrace_stacks { struct ftrace_stack stacks[4]; }; struct func_repeats_entry { struct trace_entry ent; long unsigned int ip; long unsigned int parent_ip; u16 count; u16 top_delta_ts; u32 bottom_delta_ts; }; struct function_filter_data { struct ftrace_ops *ops; int first_filter; int first_notrace; }; struct futex_hash_bucket { atomic_t waiters; spinlock_t lock; struct plist_head chain; long: 64; long: 64; long: 64; long: 64; long: 64; }; union futex_key { struct { u64 i_seq; long unsigned int pgoff; unsigned int offset; } shared; struct { union { struct mm_struct *mm; u64 __tmp; }; long unsigned int address; unsigned int offset; } private; struct { u64 ptr; long unsigned int word; unsigned int offset; } both; }; struct rt_mutex_base { raw_spinlock_t wait_lock; struct rb_root_cached waiters; struct task_struct *owner; }; struct futex_pi_state { struct list_head list; struct rt_mutex_base pi_mutex; struct task_struct *owner; refcount_t refcount; union futex_key key; }; struct wake_q_head; struct futex_q; typedef void futex_wake_fn(struct wake_q_head *, struct futex_q *); struct rt_mutex_waiter; struct futex_q { struct plist_node list; struct task_struct *task; spinlock_t *lock_ptr; futex_wake_fn *wake; void *wake_data; union futex_key key; struct futex_pi_state *pi_state; struct rt_mutex_waiter *rt_waiter; union futex_key *requeue_pi_key; u32 bitset; atomic_t requeue_state; }; struct futex_waitv { __u64 val; __u64 uaddr; __u32 flags; __u32 __reserved; }; struct futex_vector { struct futex_waitv w; struct futex_q q; }; struct fw_cache_entry { struct list_head list; const char *name; }; struct fw_name_devm { long unsigned int magic; const char *name; }; struct fw_state { struct completion completion; enum fw_status status; }; struct fw_priv { struct kref ref; struct list_head list; struct firmware_cache *fwc; struct fw_state fw_st; void *data; size_t size; size_t allocated_size; size_t offset; u32 opt_flags; bool is_paged_buf; struct page **pages; int nr_pages; int page_array_size; bool need_uevent; struct list_head pending_list; const char *fw_name; }; struct fw_sysfs { bool nowait; struct device dev; struct fw_priv *fw_priv; struct firmware *fw; void *fw_upload_priv; }; union fw_table_header { struct acpi_table_header acpi; struct acpi_table_cdat cdat; }; union fwnet_hwaddr { u8 u[16]; struct { __be64 uniq_id; u8 max_rec; u8 sspd; u8 fifo[6]; } uc; }; struct fwnode_endpoint { unsigned int port; unsigned int id; const struct fwnode_handle *local_fwnode; }; struct fwnode_link { struct fwnode_handle *supplier; struct list_head s_hook; struct fwnode_handle *consumer; struct list_head c_hook; u8 flags; }; struct fwnode_reference_args; struct fwnode_operations { struct fwnode_handle * (*get)(struct fwnode_handle *); void (*put)(struct fwnode_handle *); bool (*device_is_available)(const struct fwnode_handle *); const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); bool (*device_dma_supported)(const struct fwnode_handle *); enum dev_dma_attr (*device_get_dma_attr)(const struct fwnode_handle *); bool (*property_present)(const struct fwnode_handle *, const char *); int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); const char * (*get_name)(const struct fwnode_handle *); const char * (*get_name_prefix)(const struct fwnode_handle *); struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); void * (*iomap)(struct fwnode_handle *, int); int (*irq_get)(const struct fwnode_handle *, unsigned int); int (*add_links)(struct fwnode_handle *); }; struct fwnode_reference_args { struct fwnode_handle *fwnode; unsigned int nargs; u64 args[8]; }; struct idt_bits { u16 ist: 3; u16 zero: 5; u16 type: 5; u16 dpl: 2; u16 p: 1; }; struct gate_struct { u16 offset_low; u16 segment; struct idt_bits bits; u16 offset_middle; u32 offset_high; u32 reserved; }; typedef struct gate_struct gate_desc; struct gatt_mask { long unsigned int mask; u32 type; }; struct gcm_instance_ctx { struct crypto_skcipher_spawn ctr; struct crypto_ahash_spawn ghash; }; struct gcry_mpi; typedef struct gcry_mpi *MPI; struct gcry_mpi { int alloced; int nlimbs; int nbits; int sign; unsigned int flags; mpi_limb_t *d; }; struct gdt_page { struct desc_struct gdt[16]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct pcpu_gen_cookie; struct gen_cookie { struct pcpu_gen_cookie *local; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; atomic64_t forward_last; atomic64_t reverse_last; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct gen_pool; typedef long unsigned int (*genpool_algo_t)(long unsigned int *, long unsigned int, long unsigned int, unsigned int, void *, struct gen_pool *, long unsigned int); struct gen_pool { spinlock_t lock; struct list_head chunks; int min_alloc_order; genpool_algo_t algo; void *data; const char *name; }; struct gen_pool_chunk { struct list_head next_chunk; atomic_long_t avail; phys_addr_t phys_addr; void *owner; long unsigned int start_addr; long unsigned int end_addr; long unsigned int bits[0]; }; struct timer_rand_state; struct gendisk { int major; int first_minor; int minors; char disk_name[32]; short unsigned int events; short unsigned int event_flags; struct xarray part_tbl; struct block_device *part0; const struct block_device_operations *fops; struct request_queue *queue; void *private_data; struct bio_set bio_split; int flags; long unsigned int state; struct mutex open_mutex; unsigned int open_partitions; struct backing_dev_info *bdi; struct kobject queue_kobj; struct kobject *slave_dir; struct timer_rand_state *random; atomic_t sync_io; struct disk_events *ev; int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; u64 diskseq; blk_mode_t open_mode; struct blk_independent_access_ranges *ia_ranges; }; struct ip_tunnel_key { __be64 tun_id; union { struct { __be32 src; __be32 dst; } ipv4; struct { struct in6_addr src; struct in6_addr dst; } ipv6; } u; long unsigned int tun_flags[1]; __be32 label; u32 nhid; u8 tos; u8 ttl; __be16 tp_src; __be16 tp_dst; __u8 flow_flags; }; struct ip_tunnel_encap { u16 type; u16 flags; __be16 sport; __be16 dport; }; struct ip_tunnel_info { struct ip_tunnel_key key; struct ip_tunnel_encap encap; struct dst_cache dst_cache; u8 options_len; u8 mode; }; struct geneve_config { struct ip_tunnel_info info; bool collect_md; bool use_udp6_rx_checksums; bool ttl_inherit; enum ifla_geneve_df df; bool inner_proto_inherit; }; struct geneve_dev; struct geneve_dev_node { struct hlist_node hlist; struct geneve_dev *geneve; }; struct gro_cell; struct gro_cells { struct gro_cell *cells; }; struct geneve_sock; struct geneve_dev { struct geneve_dev_node hlist4; struct geneve_dev_node hlist6; struct net *net; struct net_device *dev; struct geneve_sock *sock4; struct geneve_sock *sock6; struct list_head next; struct gro_cells gro_cells; struct geneve_config cfg; }; struct geneve_net { struct list_head geneve_list; struct list_head sock_list; }; struct geneve_opt { __be16 opt_class; u8 type; u8 length: 5; u8 r3: 1; u8 r2: 1; u8 r1: 1; u8 opt_data[0]; }; struct geneve_sock { bool collect_md; struct list_head list; struct socket *sock; struct callback_head rcu; int refcnt; struct hlist_head vni_list[1024]; }; struct genevehdr { u8 opt_len: 6; u8 ver: 2; u8 rsvd1: 6; u8 critical: 1; u8 oam: 1; __be16 proto_type; u8 vni[3]; u8 rsvd2; u8 options[0]; }; struct ocontext; struct genfs { char *fstype; struct ocontext *head; struct genfs *next; }; struct netlink_callback; struct nla_policy; struct genl_split_ops { union { struct { int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); int (*doit)(struct sk_buff *, struct genl_info *); void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); }; struct { int (*start)(struct netlink_callback *); int (*dumpit)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); }; }; const struct nla_policy *policy; unsigned int maxattr; u8 cmd; u8 internal_flags; u8 flags; u8 validate; }; struct genlmsghdr; struct genl_info { u32 snd_seq; u32 snd_portid; const struct genl_family *family; const struct nlmsghdr *nlhdr; struct genlmsghdr *genlhdr; struct nlattr **attrs; possible_net_t _net; void *user_ptr[2]; struct netlink_ext_ack *extack; }; struct genl_dumpit_info { struct genl_split_ops op; struct genl_info info; }; struct genl_ops; struct genl_small_ops; struct genl_multicast_group; struct genl_family { unsigned int hdrsize; char name[16]; unsigned int version; unsigned int maxattr; u8 netnsok: 1; u8 parallel_ops: 1; u8 n_ops; u8 n_small_ops; u8 n_split_ops; u8 n_mcgrps; u8 resv_start_op; const struct nla_policy *policy; int (*pre_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); void (*post_doit)(const struct genl_split_ops *, struct sk_buff *, struct genl_info *); int (*bind)(int); void (*unbind)(int); const struct genl_ops *ops; const struct genl_small_ops *small_ops; const struct genl_split_ops *split_ops; const struct genl_multicast_group *mcgrps; struct module *module; size_t sock_priv_size; void (*sock_priv_init)(void *); void (*sock_priv_destroy)(void *); int id; unsigned int mcgrp_offset; struct xarray *sock_privs; }; struct genl_multicast_group { char name[16]; u8 flags; }; struct genl_op_iter { const struct genl_family *family; struct genl_split_ops doit; struct genl_split_ops dumpit; int cmd_idx; int entry_idx; u32 cmd; u8 flags; }; struct genl_ops { int (*doit)(struct sk_buff *, struct genl_info *); int (*start)(struct netlink_callback *); int (*dumpit)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); const struct nla_policy *policy; unsigned int maxattr; u8 cmd; u8 internal_flags; u8 flags; u8 validate; }; struct genl_small_ops { int (*doit)(struct sk_buff *, struct genl_info *); int (*dumpit)(struct sk_buff *, struct netlink_callback *); u8 cmd; u8 internal_flags; u8 flags; u8 validate; }; struct genl_start_context { const struct genl_family *family; struct nlmsghdr *nlh; struct netlink_ext_ack *extack; const struct genl_split_ops *ops; int hdrlen; }; struct genlmsghdr { __u8 cmd; __u8 version; __u16 reserved; }; struct genpool_data_align { int align; }; struct genpool_data_fixed { long unsigned int offset; }; struct genradix_iter { size_t offset; size_t pos; }; struct genradix_node { union { struct genradix_node *children[64]; u8 data[512]; }; }; struct getcpu_cache { long unsigned int blob[16]; }; struct linux_dirent; struct getdents_callback { struct dir_context ctx; struct linux_dirent *current_dir; int prev_reclen; int count; int error; }; struct getdents_callback___2 { struct dir_context ctx; char *name; u64 ino; int found; int sequence; }; struct linux_dirent64; struct getdents_callback64 { struct dir_context ctx; struct linux_dirent64 *current_dir; int prev_reclen; int count; int error; }; struct getfsmap_info { struct super_block *gi_sb; struct fsmap_head *gi_data; unsigned int gi_idx; __u32 gi_last_flags; }; struct input_keymap_entry { __u8 flags; __u8 len; __u16 index; __u32 keycode; __u8 scancode[32]; }; struct getset_keycode_data { struct input_keymap_entry ke; int error; }; struct gf128mul_4k { be128 t[256]; }; struct gf128mul_64k { struct gf128mul_4k *t[16]; }; struct kvm_memory_slot; struct gfn_to_hva_cache { u64 generation; gpa_t gpa; long unsigned int hva; long unsigned int len; struct kvm_memory_slot *memslot; }; struct kvm; struct gfn_to_pfn_cache { u64 generation; gpa_t gpa; long unsigned int uhva; struct kvm_memory_slot *memslot; struct kvm *kvm; struct list_head list; rwlock_t lock; struct mutex refresh_lock; void *khva; kvm_pfn_t pfn; bool active; bool valid; }; struct ghash_ctx { struct gf128mul_4k *gf128; }; struct ghash_desc_ctx { u8 buffer[16]; u32 bytes; }; struct global_params { bool no_turbo; bool turbo_disabled; int max_perf_pct; int min_perf_pct; }; struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; }; struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; int padattr; void *xstats; int xstats_len; struct tc_stats tc_stats; }; struct gnet_estimator { signed char interval; unsigned char ewma_log; }; struct gnet_stats_basic { __u64 bytes; __u32 packets; }; struct gnet_stats_rate_est { __u32 bps; __u32 pps; }; struct gnet_stats_rate_est64 { __u64 bps; __u64 pps; }; struct governor_attr { struct attribute attr; ssize_t (*show)(struct gov_attr_set *, char *); ssize_t (*store)(struct gov_attr_set *, const char *, size_t); }; struct gre_base_hdr { __be16 flags; __be16 protocol; }; struct gre_full_hdr { struct gre_base_hdr fixed_header; __be16 csum; __be16 reserved1; __be32 key; __be32 seq; }; struct gre_protocol { int (*handler)(struct sk_buff *); void (*err_handler)(struct sk_buff *, u32); }; struct gro_list { struct list_head list; int count; }; struct napi_struct { struct list_head poll_list; long unsigned int state; int weight; u32 defer_hard_irqs_count; long unsigned int gro_bitmask; int (*poll)(struct napi_struct *, int); int list_owner; struct net_device *dev; struct gro_list gro_hash[8]; struct sk_buff *skb; struct list_head rx_list; int rx_count; unsigned int napi_id; struct hrtimer timer; struct task_struct *thread; struct list_head dev_list; struct hlist_node napi_hash_node; int irq; }; struct gro_cell { struct sk_buff_head napi_skbs; struct napi_struct napi; }; struct gro_remcsum { int offset; __wsum delta; }; struct group_device { struct list_head list; struct device *dev; char *name; }; struct group_filter { union { struct { __u32 gf_interface_aux; struct __kernel_sockaddr_storage gf_group_aux; __u32 gf_fmode_aux; __u32 gf_numsrc_aux; struct __kernel_sockaddr_storage gf_slist[1]; }; struct { __u32 gf_interface; struct __kernel_sockaddr_storage gf_group; __u32 gf_fmode; __u32 gf_numsrc; struct __kernel_sockaddr_storage gf_slist_flex[0]; }; }; }; struct group_for_pci_data { struct pci_dev *pdev; struct iommu_group *group; }; struct group_info { refcount_t usage; int ngroups; kgid_t gid[0]; }; struct group_req { __u32 gr_interface; struct __kernel_sockaddr_storage gr_group; }; struct group_source_req { __u32 gsr_interface; struct __kernel_sockaddr_storage gsr_group; struct __kernel_sockaddr_storage gsr_source; }; struct gtp_pdu_session_info { u8 pdu_type; u8 qfi; }; struct guehdr { union { struct { __u8 hlen: 5; __u8 control: 1; __u8 version: 2; __u8 proto_ctype; __be16 flags; }; __be32 word; }; }; union handle_parts { depot_stack_handle_t handle; struct { u32 pool_index_plus_1: 17; u32 offset: 10; u32 extra: 5; }; }; struct handle_to_path_ctx { struct path root; enum handle_to_path_flags flags; unsigned int fh_flags; }; struct hash { int ino; int minor; int major; umode_t mode; struct hash *next; char name[4098]; }; struct hash_ctx { struct af_alg_sgl sgl; u8 *result; struct crypto_wait wait; unsigned int len; bool more; struct ahash_request req; }; struct hashtab_key_params { u32 (*hash)(const void *); int (*cmp)(const void *, const void *); }; struct hashtab_node { void *key; void *datum; struct hashtab_node *next; }; struct mei_client_properties { uuid_le protocol_name; u8 protocol_version; u8 max_number_of_connections; u8 fixed_address; u8 single_recv_buf: 1; u8 vt_supported: 1; u8 reserved: 6; u32 max_msg_length; }; struct hbm_add_client_request { u8 hbm_cmd; u8 me_addr; u8 reserved[2]; struct mei_client_properties client_properties; }; struct hbm_add_client_response { u8 hbm_cmd; u8 me_addr; u8 status; u8 reserved; }; struct hbm_capability_request { u8 hbm_cmd; u8 capability_requested[3]; }; struct hbm_capability_response { u8 hbm_cmd; u8 capability_granted[3]; }; struct hbm_client_connect_request { u8 hbm_cmd; u8 me_addr; u8 host_addr; u8 reserved; }; struct hbm_client_connect_response { u8 hbm_cmd; u8 me_addr; u8 host_addr; u8 status; }; struct hbm_client_dma_map_request { u8 hbm_cmd; u8 client_buffer_id; u8 reserved[2]; u32 address_lsb; u32 address_msb; u32 size; }; struct hbm_client_dma_response { u8 hbm_cmd; u8 status; }; struct hbm_client_dma_unmap_request { u8 hbm_cmd; u8 status; u8 client_buffer_id; u8 reserved; }; struct hbm_dma_mem_dscr { u32 addr_hi; u32 addr_lo; u32 size; }; struct hbm_dma_ring_ctrl { u32 hbuf_wr_idx; u32 reserved1; u32 hbuf_rd_idx; u32 reserved2; u32 dbuf_wr_idx; u32 reserved3; u32 dbuf_rd_idx; u32 reserved4; }; struct hbm_dma_setup_request { u8 hbm_cmd; u8 reserved[3]; struct hbm_dma_mem_dscr dma_dscr[3]; }; struct hbm_dma_setup_response { u8 hbm_cmd; u8 status; u8 reserved[2]; }; struct hbm_flow_control { u8 hbm_cmd; u8 me_addr; u8 host_addr; u8 reserved[5]; }; struct hbm_host_enum_request { u8 hbm_cmd; u8 flags; u8 reserved[2]; }; struct hbm_host_enum_response { u8 hbm_cmd; u8 reserved[3]; u8 valid_addresses[32]; }; struct hbm_host_stop_request { u8 hbm_cmd; u8 reason; u8 reserved[2]; }; struct hbm_version { u8 minor_version; u8 major_version; }; struct hbm_host_version_request { u8 hbm_cmd; u8 reserved; struct hbm_version host_version; }; struct hbm_host_version_response { u8 hbm_cmd; u8 host_version_supported; struct hbm_version me_max_version; }; struct hbm_notification_request { u8 hbm_cmd; u8 me_addr; u8 host_addr; u8 start; }; struct hbm_notification_response { u8 hbm_cmd; u8 me_addr; u8 host_addr; u8 status; u8 start; u8 reserved[3]; }; struct hbm_power_gate { u8 hbm_cmd; u8 reserved[3]; }; struct hbm_props_request { u8 hbm_cmd; u8 me_addr; u8 reserved[2]; }; struct hbm_props_response { u8 hbm_cmd; u8 me_addr; u8 status; u8 reserved; struct mei_client_properties client_properties; }; struct hd_geometry { unsigned char heads; unsigned char sectors; short unsigned int cylinders; long unsigned int start; }; struct hh_cache; struct header_ops { int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); __be16 (*parse_protocol)(const struct sk_buff *); }; struct held_lock { u64 prev_chain_key; long unsigned int acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; unsigned int class_idx: 13; unsigned int irq_context: 2; unsigned int trylock: 1; unsigned int read: 2; unsigned int check: 1; unsigned int hardirqs_off: 1; unsigned int sync: 1; unsigned int references: 11; unsigned int pin_count; }; struct hh_cache { unsigned int hh_len; seqlock_t hh_lock; long unsigned int hh_data[12]; }; struct hid_collection { int parent_idx; unsigned int type; unsigned int usage; unsigned int level; }; struct hid_device; struct hid_debug_list { struct { union { struct __kfifo kfifo; char *type; const char *const_type; char (*rectype)[0]; char *ptr; const char *ptr_const; }; char buf[0]; } hid_debug_fifo; struct fasync_struct *fasync; struct hid_device *hdev; struct list_head node; struct mutex read_mutex; }; struct hid_report; struct hid_report_enum { unsigned int numbered; struct list_head report_list; struct hid_report *report_id_hash[256]; }; struct semaphore { raw_spinlock_t lock; unsigned int count; struct list_head wait_list; }; struct hid_driver; struct hid_ll_driver; struct hid_field; struct hid_usage; struct hid_device { const __u8 *dev_rdesc; unsigned int dev_rsize; const __u8 *rdesc; unsigned int rsize; struct hid_collection *collection; unsigned int collection_size; unsigned int maxcollection; unsigned int maxapplication; __u16 bus; __u16 group; __u32 vendor; __u32 product; __u32 version; enum hid_type type; unsigned int country; struct hid_report_enum report_enum[3]; struct work_struct led_work; struct semaphore driver_input_lock; struct device dev; struct hid_driver *driver; void *devres_group_id; const struct hid_ll_driver *ll_driver; struct mutex ll_open_lock; unsigned int ll_open_count; long unsigned int status; unsigned int claimed; unsigned int quirks; unsigned int initial_quirks; bool io_started; struct list_head inputs; void *hiddev; void *hidraw; char name[128]; char phys[64]; char uniq[64]; void *driver_data; int (*ff_init)(struct hid_device *); int (*hiddev_connect)(struct hid_device *, unsigned int); void (*hiddev_disconnect)(struct hid_device *); void (*hiddev_hid_event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); void (*hiddev_report_event)(struct hid_device *, struct hid_report *); short unsigned int debug; struct dentry *debug_dir; struct dentry *debug_rdesc; struct dentry *debug_events; struct list_head debug_list; spinlock_t debug_list_lock; wait_queue_head_t debug_wait; struct kref ref; unsigned int id; }; struct hid_device_id { __u16 bus; __u16 group; __u32 vendor; __u32 product; kernel_ulong_t driver_data; }; struct hid_report_id; struct hid_usage_id; struct hid_input; struct hid_driver { char *name; const struct hid_device_id *id_table; struct list_head dyn_list; spinlock_t dyn_lock; bool (*match)(struct hid_device *, bool); int (*probe)(struct hid_device *, const struct hid_device_id *); void (*remove)(struct hid_device *); const struct hid_report_id *report_table; int (*raw_event)(struct hid_device *, struct hid_report *, u8 *, int); const struct hid_usage_id *usage_table; int (*event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); void (*report)(struct hid_device *, struct hid_report *); const __u8 * (*report_fixup)(struct hid_device *, __u8 *, unsigned int *); int (*input_mapping)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); int (*input_mapped)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *); int (*input_configured)(struct hid_device *, struct hid_input *); void (*feature_mapping)(struct hid_device *, struct hid_field *, struct hid_usage *); int (*suspend)(struct hid_device *, pm_message_t); int (*resume)(struct hid_device *); int (*reset_resume)(struct hid_device *); struct device_driver driver; }; struct hid_dynid { struct list_head list; struct hid_device_id id; }; struct hid_field { unsigned int physical; unsigned int logical; unsigned int application; struct hid_usage *usage; unsigned int maxusage; unsigned int flags; unsigned int report_offset; unsigned int report_size; unsigned int report_count; unsigned int report_type; __s32 *value; __s32 *new_value; __s32 *usages_priorities; __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __s32 unit_exponent; unsigned int unit; bool ignored; struct hid_report *report; unsigned int index; struct hid_input *hidinput; __u16 dpad; unsigned int slot_idx; }; struct hid_field_entry { struct list_head list; struct hid_field *field; unsigned int index; __s32 priority; }; struct hid_global { unsigned int usage_page; __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __s32 unit_exponent; unsigned int unit; unsigned int report_id; unsigned int report_size; unsigned int report_count; }; struct hid_input { struct list_head list; struct hid_report *report; struct input_dev *input; const char *name; struct list_head reports; unsigned int application; bool registered; }; struct hid_item { unsigned int format; __u8 size; __u8 type; __u8 tag; union { __u8 u8; __s8 s8; __u16 u16; __s16 s16; __u32 u32; __s32 s32; const __u8 *longdata; } data; }; struct hid_ll_driver { int (*start)(struct hid_device *); void (*stop)(struct hid_device *); int (*open)(struct hid_device *); void (*close)(struct hid_device *); int (*power)(struct hid_device *, int); int (*parse)(struct hid_device *); void (*request)(struct hid_device *, struct hid_report *, int); int (*wait)(struct hid_device *); int (*raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, unsigned char, int); int (*output_report)(struct hid_device *, __u8 *, size_t); int (*idle)(struct hid_device *, int, int, int); bool (*may_wakeup)(struct hid_device *); unsigned int max_buffer_size; }; struct hid_local { unsigned int usage[12288]; u8 usage_size[12288]; unsigned int collection_index[12288]; unsigned int usage_index; unsigned int usage_minimum; unsigned int delimiter_depth; unsigned int delimiter_branch; }; struct hid_parser { struct hid_global global; struct hid_global global_stack[4]; unsigned int global_stack_ptr; struct hid_local local; unsigned int *collection_stack; unsigned int collection_stack_ptr; unsigned int collection_stack_size; struct hid_device *device; unsigned int scan_flags; }; struct hid_report { struct list_head list; struct list_head hidinput_list; struct list_head field_entry_list; unsigned int id; enum hid_report_type type; unsigned int application; struct hid_field *field[256]; struct hid_field_entry *field_entries; unsigned int maxfield; unsigned int size; struct hid_device *device; bool tool_active; unsigned int tool; }; struct hid_report_id { __u32 report_type; }; struct hid_usage { unsigned int hid; unsigned int collection_index; unsigned int usage_index; __s8 resolution_multiplier; __s8 wheel_factor; __u16 code; __u8 type; __s16 hat_min; __s16 hat_max; __s16 hat_dir; __s16 wheel_accumulated; }; struct hid_usage_entry { unsigned int page; unsigned int usage; const char *description; }; struct hid_usage_id { __u32 usage_hid; __u32 usage_type; __u32 usage_code; }; struct hiddev { int minor; int exist; int open; struct mutex existancelock; wait_queue_head_t wait; struct hid_device *hid; struct list_head list; spinlock_t list_lock; bool initialized; }; struct hidraw { unsigned int minor; int exist; int open; wait_queue_head_t wait; struct hid_device *hid; struct device *dev; spinlock_t list_lock; struct list_head list; }; struct hlist_bl_head { struct hlist_bl_node *first; }; struct hmac_ctx { struct crypto_shash *hash; u8 pads[0]; }; struct hop_jumbo_hdr { u8 nexthdr; u8 hdrlen; u8 tlv_type; u8 tlv_len; __be32 jumbo_payload_len; }; struct hotplug_slot_ops; struct pci_slot; struct hotplug_slot { const struct hotplug_slot_ops *ops; struct list_head slot_list; struct pci_slot *pci_slot; struct module *owner; const char *mod_name; }; struct hotplug_slot_ops { int (*enable_slot)(struct hotplug_slot *); int (*disable_slot)(struct hotplug_slot *); int (*set_attention_status)(struct hotplug_slot *, u8); int (*hardware_test)(struct hotplug_slot *, u32); int (*get_power_status)(struct hotplug_slot *, u8 *); int (*get_attention_status)(struct hotplug_slot *, u8 *); int (*get_latch_status)(struct hotplug_slot *, u8 *); int (*get_adapter_status)(struct hotplug_slot *, u8 *); int (*reset_slot)(struct hotplug_slot *, bool); }; struct housekeeping { struct cpumask cpumasks[9]; long unsigned int flags; }; struct hpet_timer { u64 hpet_config; union { u64 _hpet_hc64; u32 _hpet_hc32; long unsigned int _hpet_compare; } _u1; u64 hpet_fsb[2]; }; struct hpet { u64 hpet_cap; u64 res0; u64 hpet_config; u64 res1; u64 hpet_isr; u64 res2[25]; union { u64 _hpet_mc64; u32 _hpet_mc32; long unsigned int _hpet_mc; } _u0; u64 res3; struct hpet_timer hpet_timers[0]; }; struct hpet_channel; struct hpet_base { unsigned int nr_channels; unsigned int nr_clockevents; unsigned int boot_cfg; struct hpet_channel *channels; }; struct hpet_channel { struct clock_event_device evt; unsigned int num; unsigned int cpu; unsigned int irq; unsigned int in_use; enum hpet_mode mode; unsigned int boot_cfg; char name[10]; long: 64; long: 64; long: 64; }; struct hpet_data { long unsigned int hd_phys_address; void *hd_address; short unsigned int hd_nirqs; unsigned int hd_state; unsigned int hd_irq[32]; }; struct hpets; struct hpet_dev { struct hpets *hd_hpets; struct hpet *hd_hpet; struct hpet_timer *hd_timer; long unsigned int hd_ireqfreq; long unsigned int hd_irqdata; wait_queue_head_t hd_waitqueue; struct fasync_struct *hd_async_queue; unsigned int hd_flags; unsigned int hd_irq; unsigned int hd_hdwirq; char hd_name[7]; }; struct hpet_info { long unsigned int hi_ireqfreq; long unsigned int hi_flags; short unsigned int hi_hpet; short unsigned int hi_timer; }; union hpet_lock { struct { arch_spinlock_t lock; u32 value; }; u64 lockval; }; struct hpets { struct hpets *hp_next; struct hpet *hp_hpet; long unsigned int hp_hpet_phys; long long unsigned int hp_tick_freq; long unsigned int hp_delta; unsigned int hp_ntimer; unsigned int hp_which; struct hpet_dev hp_dev[0]; }; struct hpx_type0 { u32 revision; u8 cache_line_size; u8 latency_timer; u8 enable_serr; u8 enable_perr; }; struct hpx_type1 { u32 revision; u8 max_mem_read; u8 avg_max_split; u16 tot_max_split; }; struct hpx_type2 { u32 revision; u32 unc_err_mask_and; u32 unc_err_mask_or; u32 unc_err_sever_and; u32 unc_err_sever_or; u32 cor_err_mask_and; u32 cor_err_mask_or; u32 adv_err_cap_and; u32 adv_err_cap_or; u16 pci_exp_devctl_and; u16 pci_exp_devctl_or; u16 pci_exp_lnkctl_and; u16 pci_exp_lnkctl_or; u32 sec_unc_err_sever_and; u32 sec_unc_err_sever_or; u32 sec_unc_err_mask_and; u32 sec_unc_err_mask_or; }; struct hpx_type3 { u16 device_type; u16 function_type; u16 config_space_location; u16 pci_exp_cap_id; u16 pci_exp_cap_ver; u16 pci_exp_vendor_id; u16 dvsec_id; u16 dvsec_rev; u16 match_offset; u32 match_mask_and; u32 match_value; u16 reg_offset; u32 reg_mask_and; u32 reg_mask_or; }; struct seqcount_raw_spinlock { seqcount_t seqcount; raw_spinlock_t *lock; }; typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; struct hrtimer_cpu_base; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; unsigned int index; clockid_t clockid; seqcount_raw_spinlock_t seq; struct hrtimer *running; struct timerqueue_head active; ktime_t (*get_time)(void); ktime_t offset; long: 64; long: 64; }; struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; unsigned int hres_active: 1; unsigned int in_hrtirq: 1; unsigned int hang_detected: 1; unsigned int softirq_activated: 1; unsigned int online: 1; unsigned int nr_events; short unsigned int nr_retries; short unsigned int nr_hangs; unsigned int max_hang_time; ktime_t expires_next; struct hrtimer *next_timer; ktime_t softirq_expires_next; struct hrtimer *softirq_next_timer; struct hrtimer_clock_base clock_base[8]; }; struct hrtimer_sleeper { struct hrtimer timer; struct task_struct *task; }; struct hsr_tag { __be16 path_and_LSDU_size; __be16 sequence_nr; __be16 encap_proto; }; struct hstate { struct mutex resize_lock; struct lock_class_key resize_key; int next_nid_to_alloc; int next_nid_to_free; unsigned int order; unsigned int demote_order; long unsigned int mask; long unsigned int max_huge_pages; long unsigned int nr_huge_pages; long unsigned int free_huge_pages; long unsigned int resv_huge_pages; long unsigned int surplus_huge_pages; long unsigned int nr_overcommit_huge_pages; struct list_head hugepage_activelist; struct list_head hugepage_freelists[64]; unsigned int max_huge_pages_node[64]; unsigned int nr_huge_pages_node[64]; unsigned int free_huge_pages_node[64]; unsigned int surplus_huge_pages_node[64]; char name[32]; }; struct hsu_dma; struct hsu_dma_chip { struct device *dev; int irq; void *regs; unsigned int length; unsigned int offset; struct hsu_dma *hsu; }; struct hsu_dma_slave { struct device *dma_dev; int chan_id; }; union hsw_tsx_tuning { struct { u32 cycles_last_block: 32; u32 hle_abort: 1; u32 rtm_abort: 1; u32 instruction_abort: 1; u32 non_instruction_abort: 1; u32 retry: 1; u32 data_conflict: 1; u32 capacity_writes: 1; u32 capacity_reads: 1; }; u64 value; }; struct pcpu_freelist_node { struct pcpu_freelist_node *next; }; struct htab_elem { union { struct hlist_nulls_node hash_node; struct { void *padding; union { struct pcpu_freelist_node fnode; struct htab_elem *batch_flink; }; }; }; union { void *ptr_to_pptr; struct bpf_lru_node lru_node; }; u32 hash; long: 0; char key[0]; }; struct huge_bootmem_page { struct list_head list; struct hstate *hstate; }; struct hugepage_subpool { spinlock_t lock; long int count; long int max_hpages; long int used_hpages; struct hstate *hstate; long int min_hpages; long int rsv_hpages; }; struct page_counter { atomic_long_t usage; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad1_; long unsigned int emin; atomic_long_t min_usage; atomic_long_t children_min_usage; long unsigned int elow; atomic_long_t low_usage; atomic_long_t children_low_usage; long unsigned int watermark; long unsigned int local_watermark; long unsigned int failcnt; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad2_; bool protection_support; long unsigned int min; long unsigned int low; long unsigned int high; long unsigned int max; struct page_counter *parent; long: 64; long: 64; }; struct hugetlb_cgroup_per_node; struct hugetlb_cgroup { struct cgroup_subsys_state css; long: 64; long: 64; long: 64; long: 64; struct page_counter hugepage[2]; struct page_counter rsvd_hugepage[2]; atomic_long_t events[2]; atomic_long_t events_local[2]; struct cgroup_file events_file[2]; struct cgroup_file events_local_file[2]; struct hugetlb_cgroup_per_node *nodeinfo[0]; long: 64; long: 64; long: 64; long: 64; }; struct hugetlb_cgroup_per_node { long unsigned int usage[2]; }; struct hugetlb_vma_lock { struct kref refs; struct rw_semaphore rw_sema; struct vm_area_struct *vma; }; struct hugetlbfs_fs_context { struct hstate *hstate; long long unsigned int max_size_opt; long long unsigned int min_size_opt; long int max_hpages; long int nr_inodes; long int min_hpages; enum hugetlbfs_size_type max_val_type; enum hugetlbfs_size_type min_val_type; kuid_t uid; kgid_t gid; umode_t mode; }; struct hugetlbfs_inode_info { struct inode vfs_inode; unsigned int seals; }; struct hugetlbfs_sb_info { long int max_inodes; long int free_inodes; spinlock_t stat_lock; struct hstate *hstate; struct hugepage_subpool *spool; kuid_t uid; kgid_t gid; umode_t mode; }; struct hw_perf_event_extra { u64 config; unsigned int reg; int alloc; int idx; }; struct rhlist_head { struct rhash_head rhead; struct rhlist_head *next; }; struct hw_perf_event { union { struct { u64 config; u64 last_tag; long unsigned int config_base; long unsigned int event_base; int event_base_rdpmc; int idx; int last_cpu; int flags; struct hw_perf_event_extra extra_reg; struct hw_perf_event_extra branch_reg; }; struct { u64 aux_config; }; struct { struct hrtimer hrtimer; }; struct { struct list_head tp_list; }; struct { u64 pwr_acc; u64 ptsc; }; struct { struct arch_hw_breakpoint info; struct rhlist_head bp_list; }; struct { u8 iommu_bank; u8 iommu_cntr; u16 padding; u64 conf; u64 conf1; }; }; struct task_struct *target; void *addr_filters; long unsigned int addr_filters_gen; int state; local64_t prev_count; u64 sample_period; union { struct { u64 last_period; local64_t period_left; }; struct { u64 saved_metric; u64 saved_slots; }; }; u64 interrupts_seq; u64 interrupts; u64 freq_time_stamp; u64 freq_count_stamp; }; struct hw_port_info { struct net_device *lower_dev; u32 port_id; }; struct hwlat_entry { struct trace_entry ent; u64 duration; u64 outer_duration; u64 nmi_total_ts; struct timespec64 timestamp; unsigned int nmi_count; unsigned int seqnum; unsigned int count; }; struct hwmon_channel_info { enum hwmon_sensor_types type; const u32 *config; }; struct hwmon_ops; struct hwmon_chip_info { const struct hwmon_ops *ops; const struct hwmon_channel_info * const *info; }; struct hwmon_device { const char *name; const char *label; struct device dev; const struct hwmon_chip_info *chip; struct list_head tzdata; struct attribute_group group; const struct attribute_group **groups; }; struct hwmon_device_attribute { struct device_attribute dev_attr; const struct hwmon_ops *ops; enum hwmon_sensor_types type; u32 attr; int index; char name[32]; }; struct hwmon_ops { umode_t (*is_visible)(const void *, enum hwmon_sensor_types, u32, int); int (*read)(struct device *, enum hwmon_sensor_types, u32, int, long int *); int (*read_string)(struct device *, enum hwmon_sensor_types, u32, int, const char **); int (*write)(struct device *, enum hwmon_sensor_types, u32, int, long int); }; struct hwmon_type_attr_list { const u32 *attrs; size_t n_attrs; }; struct to_kill { struct list_head nd; struct task_struct *tsk; long unsigned int addr; short int size_shift; }; struct hwpoison_walk { struct to_kill tk; long unsigned int pfn; int flags; }; struct hwrng { const char *name; int (*init)(struct hwrng *); void (*cleanup)(struct hwrng *); int (*data_present)(struct hwrng *, int); int (*data_read)(struct hwrng *, u32 *); int (*read)(struct hwrng *, void *, size_t, bool); long unsigned int priv; short unsigned int quality; struct list_head list; struct kref ref; struct completion cleanup_done; struct completion dying; }; struct hwtstamp_config { int flags; int tx_type; int rx_filter; }; struct i8042_port { struct serio *serio; int irq; bool exists; bool driver_bound; signed char mux; }; struct iattr { unsigned int ia_valid; umode_t ia_mode; union { kuid_t ia_uid; vfsuid_t ia_vfsuid; }; union { kgid_t ia_gid; vfsgid_t ia_vfsgid; }; loff_t ia_size; struct timespec64 ia_atime; struct timespec64 ia_mtime; struct timespec64 ia_ctime; struct file *ia_file; }; struct ib_pd; struct ib_uobject; struct ib_gid_attr; struct ib_ah { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; const struct ib_gid_attr *sgid_attr; enum rdma_ah_attr_type type; }; struct ib_ah_attr { u16 dlid; u8 src_path_bits; }; struct ib_core_device { struct device dev; possible_net_t rdma_net; struct kobject *ports_kobj; struct list_head port_list; struct ib_device *owner; }; struct ib_counters { struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; }; struct ib_counters_read_attr { u64 *counters_buff; u32 ncounters; u32 flags; }; struct ib_ucq_object; struct ib_cq; typedef void (*ib_comp_handler)(struct ib_cq *, void *); struct irq_poll; typedef int irq_poll_fn(struct irq_poll *, int); struct irq_poll { struct list_head list; long unsigned int state; int weight; irq_poll_fn *poll; }; struct rdma_restrack_entry { bool valid; u8 no_track: 1; struct kref kref; struct completion comp; struct task_struct *task; const char *kern_name; enum rdma_restrack_type type; bool user; u32 id; }; struct ib_event; struct ib_wc; struct ib_cq { struct ib_device *device; struct ib_ucq_object *uobject; ib_comp_handler comp_handler; void (*event_handler)(struct ib_event *, void *); void *cq_context; int cqe; unsigned int cqe_used; atomic_t usecnt; enum ib_poll_context poll_ctx; struct ib_wc *wc; struct list_head pool_entry; union { struct irq_poll iop; struct work_struct work; }; struct workqueue_struct *comp_wq; struct dim *dim; ktime_t timestamp; u8 interrupt: 1; u8 shared: 1; unsigned int comp_vector; struct rdma_restrack_entry res; }; struct ib_cq_caps { u16 max_cq_moderation_count; u16 max_cq_moderation_period; }; struct ib_cq_init_attr { unsigned int cqe; u32 comp_vector; u32 flags; }; struct ib_cqe { void (*done)(struct ib_cq *, struct ib_wc *); }; struct ib_mad; struct uverbs_attr_bundle; struct rdma_cm_id; struct iw_cm_id; struct iw_cm_conn_param; struct ib_qp; struct ib_send_wr; struct ib_recv_wr; struct ib_srq; struct ib_grh; struct ib_device_attr; struct ib_udata; struct ib_device_modify; struct ib_port_attr; struct ib_port_modify; struct ib_port_immutable; struct rdma_netdev_alloc_params; union ib_gid; struct ib_ucontext; struct rdma_user_mmap_entry; struct rdma_ah_init_attr; struct rdma_ah_attr; struct ib_srq_init_attr; struct ib_srq_attr; struct ib_qp_init_attr; struct ib_qp_attr; struct ib_mr; struct ib_sge; struct ib_mr_status; struct ib_mw; struct ib_xrcd; struct ib_flow; struct ib_flow_attr; struct ib_flow_action; struct ifla_vf_info; struct ifla_vf_stats; struct ifla_vf_guid; struct ib_wq; struct ib_wq_init_attr; struct ib_wq_attr; struct ib_rwq_ind_table; struct ib_rwq_ind_table_init_attr; struct ib_dm; struct ib_dm_alloc_attr; struct ib_dm_mr_attr; struct rdma_hw_stats; struct rdma_counter; struct ib_device_ops { struct module *owner; enum rdma_driver_id driver_id; u32 uverbs_abi_ver; unsigned int uverbs_no_driver_id_binding: 1; const struct attribute_group *device_group; const struct attribute_group **port_groups; int (*post_send)(struct ib_qp *, const struct ib_send_wr *, const struct ib_send_wr **); int (*post_recv)(struct ib_qp *, const struct ib_recv_wr *, const struct ib_recv_wr **); void (*drain_rq)(struct ib_qp *); void (*drain_sq)(struct ib_qp *); int (*poll_cq)(struct ib_cq *, int, struct ib_wc *); int (*peek_cq)(struct ib_cq *, int); int (*req_notify_cq)(struct ib_cq *, enum ib_cq_notify_flags); int (*post_srq_recv)(struct ib_srq *, const struct ib_recv_wr *, const struct ib_recv_wr **); int (*process_mad)(struct ib_device *, int, u32, const struct ib_wc *, const struct ib_grh *, const struct ib_mad *, struct ib_mad *, size_t *, u16 *); int (*query_device)(struct ib_device *, struct ib_device_attr *, struct ib_udata *); int (*modify_device)(struct ib_device *, int, struct ib_device_modify *); void (*get_dev_fw_str)(struct ib_device *, char *); const struct cpumask * (*get_vector_affinity)(struct ib_device *, int); int (*query_port)(struct ib_device *, u32, struct ib_port_attr *); int (*modify_port)(struct ib_device *, u32, int, struct ib_port_modify *); int (*get_port_immutable)(struct ib_device *, u32, struct ib_port_immutable *); enum rdma_link_layer (*get_link_layer)(struct ib_device *, u32); struct net_device * (*get_netdev)(struct ib_device *, u32); struct net_device * (*alloc_rdma_netdev)(struct ib_device *, u32, enum rdma_netdev_t, const char *, unsigned char, void (*)(struct net_device *)); int (*rdma_netdev_get_params)(struct ib_device *, u32, enum rdma_netdev_t, struct rdma_netdev_alloc_params *); int (*query_gid)(struct ib_device *, u32, int, union ib_gid *); int (*add_gid)(const struct ib_gid_attr *, void **); int (*del_gid)(const struct ib_gid_attr *, void **); int (*query_pkey)(struct ib_device *, u32, u16, u16 *); int (*alloc_ucontext)(struct ib_ucontext *, struct ib_udata *); void (*dealloc_ucontext)(struct ib_ucontext *); int (*mmap)(struct ib_ucontext *, struct vm_area_struct *); void (*mmap_free)(struct rdma_user_mmap_entry *); void (*disassociate_ucontext)(struct ib_ucontext *); int (*alloc_pd)(struct ib_pd *, struct ib_udata *); int (*dealloc_pd)(struct ib_pd *, struct ib_udata *); int (*create_ah)(struct ib_ah *, struct rdma_ah_init_attr *, struct ib_udata *); int (*create_user_ah)(struct ib_ah *, struct rdma_ah_init_attr *, struct ib_udata *); int (*modify_ah)(struct ib_ah *, struct rdma_ah_attr *); int (*query_ah)(struct ib_ah *, struct rdma_ah_attr *); int (*destroy_ah)(struct ib_ah *, u32); int (*create_srq)(struct ib_srq *, struct ib_srq_init_attr *, struct ib_udata *); int (*modify_srq)(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask, struct ib_udata *); int (*query_srq)(struct ib_srq *, struct ib_srq_attr *); int (*destroy_srq)(struct ib_srq *, struct ib_udata *); int (*create_qp)(struct ib_qp *, struct ib_qp_init_attr *, struct ib_udata *); int (*modify_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); int (*query_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_qp_init_attr *); int (*destroy_qp)(struct ib_qp *, struct ib_udata *); int (*create_cq)(struct ib_cq *, const struct ib_cq_init_attr *, struct uverbs_attr_bundle *); int (*modify_cq)(struct ib_cq *, u16, u16); int (*destroy_cq)(struct ib_cq *, struct ib_udata *); int (*resize_cq)(struct ib_cq *, int, struct ib_udata *); struct ib_mr * (*get_dma_mr)(struct ib_pd *, int); struct ib_mr * (*reg_user_mr)(struct ib_pd *, u64, u64, u64, int, struct ib_udata *); struct ib_mr * (*reg_user_mr_dmabuf)(struct ib_pd *, u64, u64, u64, int, int, struct uverbs_attr_bundle *); struct ib_mr * (*rereg_user_mr)(struct ib_mr *, int, u64, u64, u64, int, struct ib_pd *, struct ib_udata *); int (*dereg_mr)(struct ib_mr *, struct ib_udata *); struct ib_mr * (*alloc_mr)(struct ib_pd *, enum ib_mr_type, u32); struct ib_mr * (*alloc_mr_integrity)(struct ib_pd *, u32, u32); int (*advise_mr)(struct ib_pd *, enum ib_uverbs_advise_mr_advice, u32, struct ib_sge *, u32, struct uverbs_attr_bundle *); int (*map_mr_sg)(struct ib_mr *, struct scatterlist *, int, unsigned int *); int (*check_mr_status)(struct ib_mr *, u32, struct ib_mr_status *); int (*alloc_mw)(struct ib_mw *, struct ib_udata *); int (*dealloc_mw)(struct ib_mw *); int (*attach_mcast)(struct ib_qp *, union ib_gid *, u16); int (*detach_mcast)(struct ib_qp *, union ib_gid *, u16); int (*alloc_xrcd)(struct ib_xrcd *, struct ib_udata *); int (*dealloc_xrcd)(struct ib_xrcd *, struct ib_udata *); struct ib_flow * (*create_flow)(struct ib_qp *, struct ib_flow_attr *, struct ib_udata *); int (*destroy_flow)(struct ib_flow *); int (*destroy_flow_action)(struct ib_flow_action *); int (*set_vf_link_state)(struct ib_device *, int, u32, int); int (*get_vf_config)(struct ib_device *, int, u32, struct ifla_vf_info *); int (*get_vf_stats)(struct ib_device *, int, u32, struct ifla_vf_stats *); int (*get_vf_guid)(struct ib_device *, int, u32, struct ifla_vf_guid *, struct ifla_vf_guid *); int (*set_vf_guid)(struct ib_device *, int, u32, u64, int); struct ib_wq * (*create_wq)(struct ib_pd *, struct ib_wq_init_attr *, struct ib_udata *); int (*destroy_wq)(struct ib_wq *, struct ib_udata *); int (*modify_wq)(struct ib_wq *, struct ib_wq_attr *, u32, struct ib_udata *); int (*create_rwq_ind_table)(struct ib_rwq_ind_table *, struct ib_rwq_ind_table_init_attr *, struct ib_udata *); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *); struct ib_dm * (*alloc_dm)(struct ib_device *, struct ib_ucontext *, struct ib_dm_alloc_attr *, struct uverbs_attr_bundle *); int (*dealloc_dm)(struct ib_dm *, struct uverbs_attr_bundle *); struct ib_mr * (*reg_dm_mr)(struct ib_pd *, struct ib_dm *, struct ib_dm_mr_attr *, struct uverbs_attr_bundle *); int (*create_counters)(struct ib_counters *, struct uverbs_attr_bundle *); int (*destroy_counters)(struct ib_counters *); int (*read_counters)(struct ib_counters *, struct ib_counters_read_attr *, struct uverbs_attr_bundle *); int (*map_mr_sg_pi)(struct ib_mr *, struct scatterlist *, int, unsigned int *, struct scatterlist *, int, unsigned int *); struct rdma_hw_stats * (*alloc_hw_device_stats)(struct ib_device *); struct rdma_hw_stats * (*alloc_hw_port_stats)(struct ib_device *, u32); int (*get_hw_stats)(struct ib_device *, struct rdma_hw_stats *, u32, int); int (*modify_hw_stat)(struct ib_device *, u32, unsigned int, bool); int (*fill_res_mr_entry)(struct sk_buff *, struct ib_mr *); int (*fill_res_mr_entry_raw)(struct sk_buff *, struct ib_mr *); int (*fill_res_cq_entry)(struct sk_buff *, struct ib_cq *); int (*fill_res_cq_entry_raw)(struct sk_buff *, struct ib_cq *); int (*fill_res_qp_entry)(struct sk_buff *, struct ib_qp *); int (*fill_res_qp_entry_raw)(struct sk_buff *, struct ib_qp *); int (*fill_res_cm_id_entry)(struct sk_buff *, struct rdma_cm_id *); int (*fill_res_srq_entry)(struct sk_buff *, struct ib_srq *); int (*fill_res_srq_entry_raw)(struct sk_buff *, struct ib_srq *); int (*enable_driver)(struct ib_device *); void (*dealloc_driver)(struct ib_device *); void (*iw_add_ref)(struct ib_qp *); void (*iw_rem_ref)(struct ib_qp *); struct ib_qp * (*iw_get_qp)(struct ib_device *, int); int (*iw_connect)(struct iw_cm_id *, struct iw_cm_conn_param *); int (*iw_accept)(struct iw_cm_id *, struct iw_cm_conn_param *); int (*iw_reject)(struct iw_cm_id *, const void *, u8); int (*iw_create_listen)(struct iw_cm_id *, int); int (*iw_destroy_listen)(struct iw_cm_id *); int (*counter_bind_qp)(struct rdma_counter *, struct ib_qp *); int (*counter_unbind_qp)(struct ib_qp *); int (*counter_dealloc)(struct rdma_counter *); struct rdma_hw_stats * (*counter_alloc_stats)(struct rdma_counter *); int (*counter_update_stats)(struct rdma_counter *); int (*fill_stat_mr_entry)(struct sk_buff *, struct ib_mr *); int (*query_ucontext)(struct ib_ucontext *, struct uverbs_attr_bundle *); int (*get_numa_node)(struct ib_device *); struct ib_device * (*add_sub_dev)(struct ib_device *, enum rdma_nl_dev_type, const char *); void (*del_sub_dev)(struct ib_device *); size_t size_ib_ah; size_t size_ib_counters; size_t size_ib_cq; size_t size_ib_mw; size_t size_ib_pd; size_t size_ib_qp; size_t size_ib_rwq_ind_table; size_t size_ib_srq; size_t size_ib_ucontext; size_t size_ib_xrcd; }; struct ib_odp_caps { uint64_t general_caps; struct { uint32_t rc_odp_caps; uint32_t uc_odp_caps; uint32_t ud_odp_caps; uint32_t xrc_odp_caps; } per_transport_caps; }; struct ib_rss_caps { u32 supported_qpts; u32 max_rwq_indirection_tables; u32 max_rwq_indirection_table_size; }; struct ib_tm_caps { u32 max_rndv_hdr_size; u32 max_num_tags; u32 flags; u32 max_ops; u32 max_sge; }; struct ib_device_attr { u64 fw_ver; __be64 sys_image_guid; u64 max_mr_size; u64 page_size_cap; u32 vendor_id; u32 vendor_part_id; u32 hw_ver; int max_qp; int max_qp_wr; u64 device_cap_flags; u64 kernel_cap_flags; int max_send_sge; int max_recv_sge; int max_sge_rd; int max_cq; int max_cqe; int max_mr; int max_pd; int max_qp_rd_atom; int max_ee_rd_atom; int max_res_rd_atom; int max_qp_init_rd_atom; int max_ee_init_rd_atom; enum ib_atomic_cap atomic_cap; enum ib_atomic_cap masked_atomic_cap; int max_ee; int max_rdd; int max_mw; int max_raw_ipv6_qp; int max_raw_ethy_qp; int max_mcast_grp; int max_mcast_qp_attach; int max_total_mcast_qp_attach; int max_ah; int max_srq; int max_srq_wr; int max_srq_sge; unsigned int max_fast_reg_page_list_len; unsigned int max_pi_fast_reg_page_list_len; u16 max_pkeys; u8 local_ca_ack_delay; int sig_prot_cap; int sig_guard_cap; struct ib_odp_caps odp_caps; uint64_t timestamp_mask; uint64_t hca_core_clock; struct ib_rss_caps rss_caps; u32 max_wq_type_rq; u32 raw_packet_caps; struct ib_tm_caps tm_caps; struct ib_cq_caps cq_caps; u64 max_dm_size; u32 max_sgl_rd; }; struct hw_stats_device_data; struct rdma_restrack_root; struct uapi_definition; struct ib_port_data; struct rdma_link_ops; struct ib_device { struct device *dma_device; struct ib_device_ops ops; char name[64]; struct callback_head callback_head; struct list_head event_handler_list; struct rw_semaphore event_handler_rwsem; spinlock_t qp_open_list_lock; struct rw_semaphore client_data_rwsem; struct xarray client_data; struct mutex unregistration_lock; rwlock_t cache_lock; struct ib_port_data *port_data; int num_comp_vectors; union { struct device dev; struct ib_core_device coredev; }; const struct attribute_group *groups[4]; u64 uverbs_cmd_mask; char node_desc[64]; __be64 node_guid; u32 local_dma_lkey; u16 is_switch: 1; u16 kverbs_provider: 1; u16 use_cq_dim: 1; u8 node_type; u32 phys_port_cnt; struct ib_device_attr attrs; struct hw_stats_device_data *hw_stats_data; u32 index; spinlock_t cq_pools_lock; struct list_head cq_pools[3]; struct rdma_restrack_root *res; const struct uapi_definition *driver_def; refcount_t refcount; struct completion unreg_completion; struct work_struct unregistration_work; const struct rdma_link_ops *link_ops; struct mutex compat_devs_mutex; struct xarray compat_devs; char iw_ifname[16]; u32 iw_driver_flags; u32 lag_flags; struct mutex subdev_lock; struct list_head subdev_list_head; enum rdma_nl_dev_type type; struct ib_device *parent; struct list_head subdev_list; enum rdma_nl_name_assign_type name_assign_type; }; struct ib_device_modify { u64 sys_image_guid; char node_desc[64]; }; struct ib_dm { struct ib_device *device; u32 length; u32 flags; struct ib_uobject *uobject; atomic_t usecnt; }; struct ib_dm_alloc_attr { u64 length; u32 alignment; u32 flags; }; struct ib_dm_mr_attr { u64 length; u64 offset; u32 access_flags; }; struct ib_event { struct ib_device *device; union { struct ib_cq *cq; struct ib_qp *qp; struct ib_srq *srq; struct ib_wq *wq; u32 port_num; } element; enum ib_event_type event; }; struct ib_flow { struct ib_qp *qp; struct ib_device *device; struct ib_uobject *uobject; }; struct ib_flow_action { struct ib_device *device; struct ib_uobject *uobject; enum ib_flow_action_type type; atomic_t usecnt; }; struct ib_flow_eth_filter { u8 dst_mac[6]; u8 src_mac[6]; __be16 ether_type; __be16 vlan_tag; }; struct ib_flow_spec_eth { u32 type; u16 size; struct ib_flow_eth_filter val; struct ib_flow_eth_filter mask; }; struct ib_flow_ib_filter { __be16 dlid; __u8 sl; }; struct ib_flow_spec_ib { u32 type; u16 size; struct ib_flow_ib_filter val; struct ib_flow_ib_filter mask; }; struct ib_flow_ipv4_filter { __be32 src_ip; __be32 dst_ip; u8 proto; u8 tos; u8 ttl; u8 flags; }; struct ib_flow_spec_ipv4 { u32 type; u16 size; struct ib_flow_ipv4_filter val; struct ib_flow_ipv4_filter mask; }; struct ib_flow_tcp_udp_filter { __be16 dst_port; __be16 src_port; }; struct ib_flow_spec_tcp_udp { u32 type; u16 size; struct ib_flow_tcp_udp_filter val; struct ib_flow_tcp_udp_filter mask; }; struct ib_flow_ipv6_filter { u8 src_ip[16]; u8 dst_ip[16]; __be32 flow_label; u8 next_hdr; u8 traffic_class; u8 hop_limit; } __attribute__((packed)); struct ib_flow_spec_ipv6 { u32 type; u16 size; struct ib_flow_ipv6_filter val; struct ib_flow_ipv6_filter mask; }; struct ib_flow_tunnel_filter { __be32 tunnel_id; }; struct ib_flow_spec_tunnel { u32 type; u16 size; struct ib_flow_tunnel_filter val; struct ib_flow_tunnel_filter mask; }; struct ib_flow_esp_filter { __be32 spi; __be32 seq; }; struct ib_flow_spec_esp { u32 type; u16 size; struct ib_flow_esp_filter val; struct ib_flow_esp_filter mask; }; struct ib_flow_gre_filter { __be16 c_ks_res0_ver; __be16 protocol; __be32 key; }; struct ib_flow_spec_gre { u32 type; u16 size; struct ib_flow_gre_filter val; struct ib_flow_gre_filter mask; }; struct ib_flow_mpls_filter { __be32 tag; }; struct ib_flow_spec_mpls { u32 type; u16 size; struct ib_flow_mpls_filter val; struct ib_flow_mpls_filter mask; }; struct ib_flow_spec_action_tag { enum ib_flow_spec_type type; u16 size; u32 tag_id; }; struct ib_flow_spec_action_drop { enum ib_flow_spec_type type; u16 size; }; struct ib_flow_spec_action_handle { enum ib_flow_spec_type type; u16 size; struct ib_flow_action *act; }; struct ib_flow_spec_action_count { enum ib_flow_spec_type type; u16 size; struct ib_counters *counters; }; union ib_flow_spec { struct { u32 type; u16 size; }; struct ib_flow_spec_eth eth; struct ib_flow_spec_ib ib; struct ib_flow_spec_ipv4 ipv4; struct ib_flow_spec_tcp_udp tcp_udp; struct ib_flow_spec_ipv6 ipv6; struct ib_flow_spec_tunnel tunnel; struct ib_flow_spec_esp esp; struct ib_flow_spec_gre gre; struct ib_flow_spec_mpls mpls; struct ib_flow_spec_action_tag flow_tag; struct ib_flow_spec_action_drop drop; struct ib_flow_spec_action_handle action; struct ib_flow_spec_action_count flow_count; }; struct ib_flow_attr { enum ib_flow_attr_type type; u16 size; u16 priority; u32 flags; u8 num_of_specs; u32 port; union ib_flow_spec flows[0]; }; union ib_gid { u8 raw[16]; struct { __be64 subnet_prefix; __be64 interface_id; } global; }; struct ib_gid_attr { struct net_device *ndev; struct ib_device *device; union ib_gid gid; enum ib_gid_type gid_type; u16 index; u32 port_num; }; struct ib_global_route { const struct ib_gid_attr *sgid_attr; union ib_gid dgid; u32 flow_label; u8 sgid_index; u8 hop_limit; u8 traffic_class; }; struct ib_grh { __be32 version_tclass_flow; __be16 paylen; u8 next_hdr; u8 hop_limit; union ib_gid sgid; union ib_gid dgid; }; struct ib_sig_attrs; struct ib_mr { struct ib_device *device; struct ib_pd *pd; u32 lkey; u32 rkey; u64 iova; u64 length; unsigned int page_size; enum ib_mr_type type; bool need_inval; union { struct ib_uobject *uobject; struct list_head qp_entry; }; struct ib_dm *dm; struct ib_sig_attrs *sig_attrs; struct rdma_restrack_entry res; }; struct ib_sig_err { enum ib_sig_err_type err_type; u32 expected; u32 actual; u64 sig_err_offset; u32 key; }; struct ib_mr_status { u32 fail_status; struct ib_sig_err sig_err; }; struct ib_mw { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; u32 rkey; enum ib_mw_type type; }; struct ib_pd { u32 local_dma_lkey; u32 flags; struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; u32 unsafe_global_rkey; struct ib_mr *__internal_mr; struct rdma_restrack_entry res; }; struct ib_port_attr { u64 subnet_prefix; enum ib_port_state state; enum ib_mtu max_mtu; enum ib_mtu active_mtu; u32 phys_mtu; int gid_tbl_len; unsigned int ip_gids: 1; u32 port_cap_flags; u32 max_msg_sz; u32 bad_pkey_cntr; u32 qkey_viol_cntr; u16 pkey_tbl_len; u32 sm_lid; u32 lid; u8 lmc; u8 max_vl_num; u8 sm_sl; u8 subnet_timeout; u8 init_type_reply; u8 active_width; u16 active_speed; u8 phys_state; u16 port_cap_flags2; }; struct ib_pkey_cache; struct ib_gid_table; struct ib_port_cache { u64 subnet_prefix; struct ib_pkey_cache *pkey; struct ib_gid_table *gid; u8 lmc; enum ib_port_state port_state; }; struct ib_port_immutable { int pkey_tbl_len; int gid_tbl_len; u32 core_cap_flags; u32 max_mad_size; }; struct rdma_counter_mode { enum rdma_nl_counter_mode mode; enum rdma_nl_counter_mask mask; struct auto_mode_param param; }; struct rdma_port_counter { struct rdma_counter_mode mode; struct rdma_hw_stats *hstats; unsigned int num_counters; struct mutex lock; }; struct ib_port; struct ib_port_data { struct ib_device *ib_dev; struct ib_port_immutable immutable; spinlock_t pkey_list_lock; spinlock_t netdev_lock; struct list_head pkey_list; struct ib_port_cache cache; struct net_device *netdev; netdevice_tracker netdev_tracker; struct hlist_node ndev_hash_link; struct rdma_port_counter port_counter; struct ib_port *sysfs; }; struct ib_port_modify { u32 set_port_cap_mask; u32 clr_port_cap_mask; u8 init_type; }; struct ib_qp_security; struct ib_port_pkey { enum port_pkey_state state; u16 pkey_index; u32 port_num; struct list_head qp_list; struct list_head to_error_list; struct ib_qp_security *sec; }; struct ib_ports_pkeys { struct ib_port_pkey main; struct ib_port_pkey alt; }; struct ib_uqp_object; struct ib_qp { struct ib_device *device; struct ib_pd *pd; struct ib_cq *send_cq; struct ib_cq *recv_cq; spinlock_t mr_lock; int mrs_used; struct list_head rdma_mrs; struct list_head sig_mrs; struct ib_srq *srq; struct completion srq_completion; struct ib_xrcd *xrcd; struct list_head xrcd_list; atomic_t usecnt; struct list_head open_list; struct ib_qp *real_qp; struct ib_uqp_object *uobject; void (*event_handler)(struct ib_event *, void *); void (*registered_event_handler)(struct ib_event *, void *); void *qp_context; const struct ib_gid_attr *av_sgid_attr; const struct ib_gid_attr *alt_path_sgid_attr; u32 qp_num; u32 max_write_sge; u32 max_read_sge; enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; struct ib_qp_security *qp_sec; u32 port; bool integrity_en; struct rdma_restrack_entry res; struct rdma_counter *counter; }; struct ib_qp_cap { u32 max_send_wr; u32 max_recv_wr; u32 max_send_sge; u32 max_recv_sge; u32 max_inline_data; u32 max_rdma_ctxs; }; struct roce_ah_attr { u8 dmac[6]; }; struct opa_ah_attr { u32 dlid; u8 src_path_bits; bool make_grd; }; struct rdma_ah_attr { struct ib_global_route grh; u8 sl; u8 static_rate; u32 port_num; u8 ah_flags; enum rdma_ah_attr_type type; union { struct ib_ah_attr ib; struct roce_ah_attr roce; struct opa_ah_attr opa; }; }; struct ib_qp_attr { enum ib_qp_state qp_state; enum ib_qp_state cur_qp_state; enum ib_mtu path_mtu; enum ib_mig_state path_mig_state; u32 qkey; u32 rq_psn; u32 sq_psn; u32 dest_qp_num; int qp_access_flags; struct ib_qp_cap cap; struct rdma_ah_attr ah_attr; struct rdma_ah_attr alt_ah_attr; u16 pkey_index; u16 alt_pkey_index; u8 en_sqd_async_notify; u8 sq_draining; u8 max_rd_atomic; u8 max_dest_rd_atomic; u8 min_rnr_timer; u32 port_num; u8 timeout; u8 retry_cnt; u8 rnr_retry; u32 alt_port_num; u8 alt_timeout; u32 rate_limit; struct net_device *xmit_slave; }; struct ib_qp_init_attr { void (*event_handler)(struct ib_event *, void *); void *qp_context; struct ib_cq *send_cq; struct ib_cq *recv_cq; struct ib_srq *srq; struct ib_xrcd *xrcd; struct ib_qp_cap cap; enum ib_sig_type sq_sig_type; enum ib_qp_type qp_type; u32 create_flags; u32 port_num; struct ib_rwq_ind_table *rwq_ind_tbl; u32 source_qpn; }; struct ib_qp_security { struct ib_qp *qp; struct ib_device *dev; struct mutex mutex; struct ib_ports_pkeys *ports_pkeys; struct list_head shared_qp_list; void *security; bool destroying; atomic_t error_list_count; struct completion error_complete; int error_comps_pending; }; struct ib_rdmacg_object {}; struct ib_recv_wr { struct ib_recv_wr *next; union { u64 wr_id; struct ib_cqe *wr_cqe; }; struct ib_sge *sg_list; int num_sge; }; struct ib_rwq_ind_table { struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; u32 ind_tbl_num; u32 log_ind_tbl_size; struct ib_wq **ind_tbl; }; struct ib_rwq_ind_table_init_attr { u32 log_ind_tbl_size; struct ib_wq **ind_tbl; }; struct ib_send_wr { struct ib_send_wr *next; union { u64 wr_id; struct ib_cqe *wr_cqe; }; struct ib_sge *sg_list; int num_sge; enum ib_wr_opcode opcode; int send_flags; union { __be32 imm_data; u32 invalidate_rkey; } ex; }; struct ib_sge { u64 addr; u32 length; u32 lkey; }; struct ib_t10_dif_domain { enum ib_t10_dif_bg_type bg_type; u16 pi_interval; u16 bg; u16 app_tag; u32 ref_tag; bool ref_remap; bool app_escape; bool ref_escape; u16 apptag_check_mask; }; struct ib_sig_domain { enum ib_signature_type sig_type; union { struct ib_t10_dif_domain dif; } sig; }; struct ib_sig_attrs { u8 check_mask; struct ib_sig_domain mem; struct ib_sig_domain wire; int meta_length; }; struct ib_usrq_object; struct ib_srq { struct ib_device *device; struct ib_pd *pd; struct ib_usrq_object *uobject; void (*event_handler)(struct ib_event *, void *); void *srq_context; enum ib_srq_type srq_type; atomic_t usecnt; struct { struct ib_cq *cq; union { struct { struct ib_xrcd *xrcd; u32 srq_num; } xrc; }; } ext; struct rdma_restrack_entry res; }; struct ib_srq_attr { u32 max_wr; u32 max_sge; u32 srq_limit; }; struct ib_srq_init_attr { void (*event_handler)(struct ib_event *, void *); void *srq_context; struct ib_srq_attr attr; enum ib_srq_type srq_type; struct { struct ib_cq *cq; union { struct { struct ib_xrcd *xrcd; } xrc; struct { u32 max_num_tags; } tag_matching; }; } ext; }; struct ib_uverbs_file; struct ib_ucontext { struct ib_device *device; struct ib_uverbs_file *ufile; struct ib_rdmacg_object cg_obj; struct rdma_restrack_entry res; struct xarray mmap_xa; }; struct ib_udata { const void *inbuf; void *outbuf; size_t inlen; size_t outlen; }; struct uverbs_api_object; struct ib_uobject { u64 user_handle; struct ib_uverbs_file *ufile; struct ib_ucontext *context; void *object; struct list_head list; struct ib_rdmacg_object cg_obj; int id; struct kref ref; atomic_t usecnt; struct callback_head rcu; const struct uverbs_api_object *uapi_object; }; struct ib_wc { union { u64 wr_id; struct ib_cqe *wr_cqe; }; enum ib_wc_status status; enum ib_wc_opcode opcode; u32 vendor_err; u32 byte_len; struct ib_qp *qp; union { __be32 imm_data; u32 invalidate_rkey; } ex; u32 src_qp; u32 slid; int wc_flags; u16 pkey_index; u8 sl; u8 dlid_path_bits; u32 port_num; u8 smac[6]; u16 vlan_id; u8 network_hdr_type; }; struct ib_uwq_object; struct ib_wq { struct ib_device *device; struct ib_uwq_object *uobject; void *wq_context; void (*event_handler)(struct ib_event *, void *); struct ib_pd *pd; struct ib_cq *cq; u32 wq_num; enum ib_wq_state state; enum ib_wq_type wq_type; atomic_t usecnt; }; struct ib_wq_attr { enum ib_wq_state wq_state; enum ib_wq_state curr_wq_state; u32 flags; u32 flags_mask; }; struct ib_wq_init_attr { void *wq_context; enum ib_wq_type wq_type; u32 max_wr; u32 max_sge; struct ib_cq *cq; void (*event_handler)(struct ib_event *, void *); u32 create_flags; }; struct ib_xrcd { struct ib_device *device; atomic_t usecnt; struct inode *inode; struct rw_semaphore tgt_qps_rwsem; struct xarray tgt_qps; }; union ibs_fetch_ctl { __u64 val; struct { __u64 fetch_maxcnt: 16; __u64 fetch_cnt: 16; __u64 fetch_lat: 16; __u64 fetch_en: 1; __u64 fetch_val: 1; __u64 fetch_comp: 1; __u64 ic_miss: 1; __u64 phy_addr_valid: 1; __u64 l1tlb_pgsz: 2; __u64 l1tlb_miss: 1; __u64 l2tlb_miss: 1; __u64 rand_en: 1; __u64 fetch_l2_miss: 1; __u64 l3_miss_only: 1; __u64 fetch_oc_miss: 1; __u64 fetch_l3_miss: 1; __u64 reserved: 2; }; }; union ibs_op_ctl { __u64 val; struct { __u64 opmaxcnt: 16; __u64 l3_miss_only: 1; __u64 op_en: 1; __u64 op_val: 1; __u64 cnt_ctl: 1; __u64 opmaxcnt_ext: 7; __u64 reserved0: 5; __u64 opcurcnt: 27; __u64 reserved1: 5; }; }; union ibs_op_data { __u64 val; struct { __u64 comp_to_ret_ctr: 16; __u64 tag_to_ret_ctr: 16; __u64 reserved1: 2; __u64 op_return: 1; __u64 op_brn_taken: 1; __u64 op_brn_misp: 1; __u64 op_brn_ret: 1; __u64 op_rip_invalid: 1; __u64 op_brn_fuse: 1; __u64 op_microcode: 1; __u64 reserved2: 23; }; }; union ibs_op_data2 { __u64 val; struct { __u64 data_src_lo: 3; __u64 reserved0: 1; __u64 rmt_node: 1; __u64 cache_hit_st: 1; __u64 data_src_hi: 2; __u64 reserved1: 56; }; }; union ibs_op_data3 { __u64 val; struct { __u64 ld_op: 1; __u64 st_op: 1; __u64 dc_l1tlb_miss: 1; __u64 dc_l2tlb_miss: 1; __u64 dc_l1tlb_hit_2m: 1; __u64 dc_l1tlb_hit_1g: 1; __u64 dc_l2tlb_hit_2m: 1; __u64 dc_miss: 1; __u64 dc_mis_acc: 1; __u64 reserved: 4; __u64 dc_wc_mem_acc: 1; __u64 dc_uc_mem_acc: 1; __u64 dc_locked_op: 1; __u64 dc_miss_no_mab_alloc: 1; __u64 dc_lin_addr_valid: 1; __u64 dc_phy_addr_valid: 1; __u64 dc_l2_tlb_hit_1g: 1; __u64 l2_miss: 1; __u64 sw_pf: 1; __u64 op_mem_width: 4; __u64 op_dc_miss_open_mem_reqs: 6; __u64 dc_miss_lat: 16; __u64 tlb_refill_lat: 16; }; }; struct icmp6_err { int err; int fatal; }; struct icmp6_filter { __u32 data[8]; }; struct icmpv6_echo { __be16 identifier; __be16 sequence; }; struct icmpv6_nd_advt { __u32 reserved: 5; __u32 override: 1; __u32 solicited: 1; __u32 router: 1; __u32 reserved2: 24; }; struct icmpv6_nd_ra { __u8 hop_limit; __u8 reserved: 3; __u8 router_pref: 2; __u8 home_agent: 1; __u8 other: 1; __u8 managed: 1; __be16 rt_lifetime; }; struct icmp6hdr { __u8 icmp6_type; __u8 icmp6_code; __sum16 icmp6_cksum; union { __be32 un_data32[1]; __be16 un_data16[2]; __u8 un_data8[4]; struct icmpv6_echo u_echo; struct icmpv6_nd_advt u_nd_advt; struct icmpv6_nd_ra u_nd_ra; } icmp6_dataun; }; struct icmphdr { __u8 type; __u8 code; __sum16 checksum; union { struct { __be16 id; __be16 sequence; } echo; __be32 gateway; struct { __be16 __unused; __be16 mtu; } frag; __u8 reserved[4]; } un; }; struct ip_options_rcu { struct callback_head rcu; struct ip_options opt; }; struct ip_options_data { struct ip_options_rcu opt; char data[40]; }; struct icmp_bxm { struct sk_buff *skb; int offset; int data_len; struct { struct icmphdr icmph; __be32 times[3]; } data; int head_len; struct ip_options_data replyopts; }; struct icmp_control { enum skb_drop_reason (*handler)(struct sk_buff *); short int error; }; struct icmp_err { int errno; unsigned int fatal: 1; }; struct icmp_ext_echo_ctype3_hdr { __be16 afi; __u8 addrlen; __u8 reserved; }; struct icmp_extobj_hdr { __be16 length; __u8 class_num; __u8 class_type; }; struct icmp_ext_echo_iio { struct icmp_extobj_hdr extobj_hdr; union { char name[16]; __be32 ifindex; struct { struct icmp_ext_echo_ctype3_hdr ctype3_hdr; union { __be32 ipv4_addr; struct in6_addr ipv6_addr; } ip_addr; } addr; } ident; }; struct icmp_ext_hdr { __u8 reserved1: 4; __u8 version: 4; __u8 reserved2; __sum16 checksum; }; struct icmp_filter { __u32 data; }; struct icmp_mib { long unsigned int mibs[30]; }; struct icmpmsg_mib { atomic_long_t mibs[512]; }; struct icmpv6_mib { long unsigned int mibs[7]; }; struct icmpv6_mib_device { atomic_long_t mibs[7]; }; struct icmpv6_msg { struct sk_buff *skb; int offset; uint8_t type; }; struct icmpv6msg_mib { atomic_long_t mibs[512]; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512]; }; struct id { __u16 id; __be16 proto; }; struct id_bitmap { long unsigned int map[4]; }; struct ida_bitmap { long unsigned int bitmap[16]; }; struct idempotent { const void *cookie; struct hlist_node entry; struct completion complete; int ret; }; struct idle_inject_device { struct hrtimer timer; unsigned int idle_duration_us; unsigned int run_duration_us; unsigned int latency_us; bool (*update)(void); long unsigned int cpumask[0]; }; struct idle_inject_thread { struct task_struct *tsk; int should_run; }; struct idle_timer { struct hrtimer timer; int done; }; struct idmap_key { bool map_up; u32 id; u32 count; }; struct idt_data { unsigned int vector; unsigned int segment; struct idt_bits bits; const void *addr; }; struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8]; __u8 tc_rx_bw[8]; __u8 tc_tsa[8]; __u8 prio_tc[8]; __u8 tc_reco_bw[8]; __u8 tc_reco_tsa[8]; __u8 reco_prio_tc[8]; }; struct ieee_maxrate { __u64 tc_maxrate[8]; }; struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8]; __u64 indications[8]; }; struct ieee_qcn { __u8 rpg_enable[8]; __u32 rppp_max_rps[8]; __u32 rpg_time_reset[8]; __u32 rpg_byte_reset[8]; __u32 rpg_threshold[8]; __u32 rpg_max_rate[8]; __u32 rpg_ai_rate[8]; __u32 rpg_hai_rate[8]; __u32 rpg_gd[8]; __u32 rpg_min_dec_fac[8]; __u32 rpg_min_rate[8]; __u32 cndd_state_machine[8]; }; struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8]; __u32 rppp_created_rps[8]; }; struct if6_iter_state { struct seq_net_private p; int bucket; int offset; }; struct if_settings { unsigned int type; unsigned int size; union { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; x25_hdlc_proto *x25; sync_serial_settings *sync; te1_settings *te1; } ifs_ifsu; }; struct if_stats_msg { __u8 family; __u8 pad1; __u16 pad2; __u32 ifindex; __u32 filter_mask; }; struct ifa6_config { const struct in6_addr *pfx; unsigned int plen; u8 ifa_proto; const struct in6_addr *peer_pfx; u32 rt_priority; u32 ifa_flags; u32 preferred_lft; u32 valid_lft; u16 scope; }; struct ifa_cacheinfo { __u32 ifa_prefered; __u32 ifa_valid; __u32 cstamp; __u32 tstamp; }; struct ifacaddr6 { struct in6_addr aca_addr; struct fib6_info *aca_rt; struct ifacaddr6 *aca_next; struct hlist_node aca_addr_lst; int aca_users; refcount_t aca_refcnt; long unsigned int aca_cstamp; long unsigned int aca_tstamp; struct callback_head rcu; }; struct ifaddrlblmsg { __u8 ifal_family; __u8 __ifal_reserved; __u8 ifal_prefixlen; __u8 ifal_flags; __u32 ifal_index; __u32 ifal_seq; }; struct ifaddrmsg { __u8 ifa_family; __u8 ifa_prefixlen; __u8 ifa_flags; __u8 ifa_scope; __u32 ifa_index; }; struct ifbond { __s32 bond_mode; __s32 num_slaves; __s32 miimon; }; typedef struct ifbond ifbond; struct ifreq; struct ifconf { int ifc_len; union { char *ifcu_buf; struct ifreq *ifcu_req; } ifc_ifcu; }; struct ifinfomsg { unsigned char ifi_family; unsigned char __ifi_pad; short unsigned int ifi_type; int ifi_index; unsigned int ifi_flags; unsigned int ifi_change; }; struct ifla_cacheinfo { __u32 max_reasm_len; __u32 tstamp; __u32 reachable_time; __u32 retrans_time; }; struct ifla_vf_broadcast { __u8 broadcast[32]; }; struct ifla_vf_guid { __u32 vf; __u64 guid; }; struct ifla_vf_info { __u32 vf; __u8 mac[32]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; __be16 vlan_proto; }; struct ifla_vf_link_state { __u32 vf; __u32 link_state; }; struct ifla_vf_mac { __u32 vf; __u8 mac[32]; }; struct ifla_vf_rate { __u32 vf; __u32 min_tx_rate; __u32 max_tx_rate; }; struct ifla_vf_rss_query_en { __u32 vf; __u32 setting; }; struct ifla_vf_spoofchk { __u32 vf; __u32 setting; }; struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; __u64 rx_dropped; __u64 tx_dropped; }; struct ifla_vf_trust { __u32 vf; __u32 setting; }; struct ifla_vf_tx_rate { __u32 vf; __u32 rate; }; struct ifla_vf_vlan { __u32 vf; __u32 vlan; __u32 qos; }; struct ifla_vf_vlan_info { __u32 vf; __u32 vlan; __u32 qos; __be16 vlan_proto; }; struct ifla_vlan_flags { __u32 flags; __u32 mask; }; struct ifla_vlan_qos_mapping { __u32 from; __u32 to; }; struct ifla_vxlan_port_range { __be16 low; __be16 high; }; struct ifmap { long unsigned int mem_start; long unsigned int mem_end; short unsigned int base_addr; unsigned char irq; unsigned char dma; unsigned char port; }; struct inet6_dev; struct ip6_sf_list; struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; struct ifmcaddr6 *next; struct ip6_sf_list *mca_sources; struct ip6_sf_list *mca_tomb; unsigned int mca_sfmode; unsigned char mca_crcount; long unsigned int mca_sfcount[2]; struct delayed_work mca_work; unsigned int mca_flags; int mca_users; refcount_t mca_refcnt; long unsigned int mca_cstamp; long unsigned int mca_tstamp; struct callback_head rcu; }; struct ifreq { union { char ifrn_name[16]; } ifr_ifrn; union { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short int ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16]; char ifru_newname[16]; void *ifru_data; struct if_settings ifru_settings; } ifr_ifru; }; struct ifslave { __s32 slave_id; char slave_name[16]; __s8 link; __s8 state; __u32 link_failure_count; }; typedef struct ifslave ifslave; struct igmp6_mc_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; }; struct igmp6_mcf_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; struct ifmcaddr6 *im; }; struct in_device; struct igmp_mc_iter_state { struct seq_net_private p; struct net_device *dev; struct in_device *in_dev; }; struct ip_mc_list; struct igmp_mcf_iter_state { struct seq_net_private p; struct net_device *dev; struct in_device *idev; struct ip_mc_list *im; }; struct igmphdr { __u8 type; __u8 code; __sum16 csum; __be32 group; }; struct igmpmsg { __u32 unused1; __u32 unused2; unsigned char im_msgtype; unsigned char im_mbz; unsigned char im_vif; unsigned char im_vif_hi; struct in_addr im_src; struct in_addr im_dst; }; struct igmpv3_grec { __u8 grec_type; __u8 grec_auxwords; __be16 grec_nsrcs; __be32 grec_mca; __be32 grec_src[0]; }; struct igmpv3_query { __u8 type; __u8 code; __sum16 csum; __be32 group; __u8 qrv: 3; __u8 suppress: 1; __u8 resv: 4; __u8 qqic; __be16 nsrcs; __be32 srcs[0]; }; struct igmpv3_report { __u8 type; __u8 resv1; __sum16 csum; __be16 resv2; __be16 ngrec; struct igmpv3_grec grec[0]; }; struct ima_algo_desc { struct crypto_shash *tfm; enum hash_algo algo; }; struct ima_digest_data_hdr { u8 algo; u8 length; union { struct { u8 unused; u8 type; } sha1; struct { u8 type; u8 algo; } ng; u8 data[2]; } xattr; }; struct ima_digest_data { union { struct { u8 algo; u8 length; union { struct { u8 unused; u8 type; } sha1; struct { u8 type; u8 algo; } ng; u8 data[2]; } xattr; }; struct ima_digest_data_hdr hdr; }; u8 digest[0]; }; struct modsig; struct ima_iint_cache; struct ima_event_data { struct ima_iint_cache *iint; struct file *file; const unsigned char *filename; struct evm_ima_xattr_data *xattr_value; int xattr_len; const struct modsig *modsig; const char *violation; const void *buf; int buf_len; }; struct ima_field_data { u8 *data; u32 len; }; struct ima_h_table { atomic_long_t len; atomic_long_t violations; struct hlist_head queue[1024]; }; struct integrity_inode_attributes { u64 version; long unsigned int ino; dev_t dev; }; struct ima_iint_cache { struct mutex mutex; struct integrity_inode_attributes real_inode; long unsigned int flags; long unsigned int measured_pcrs; long unsigned int atomic_flags; enum integrity_status ima_file_status: 4; enum integrity_status ima_mmap_status: 4; enum integrity_status ima_bprm_status: 4; enum integrity_status ima_read_status: 4; enum integrity_status ima_creds_status: 4; struct ima_digest_data *ima_hash; }; struct ima_kexec_hdr { u16 version; u16 _reserved0; u32 _reserved1; u64 buffer_size; u64 count; }; struct ima_key_entry { struct list_head list; void *payload; size_t payload_len; char *keyring_name; }; struct ima_max_digest_data { struct ima_digest_data_hdr hdr; u8 digest[64]; }; struct ima_template_entry; struct ima_queue_entry { struct hlist_node hnext; struct list_head later; struct ima_template_entry *entry; }; struct ima_rule_opt_list; struct ima_template_desc; struct ima_rule_entry { struct list_head list; int action; unsigned int flags; enum ima_hooks func; int mask; long unsigned int fsmagic; uuid_t fsuuid; kuid_t uid; kgid_t gid; kuid_t fowner; kgid_t fgroup; bool (*uid_op)(kuid_t, kuid_t); bool (*gid_op)(kgid_t, kgid_t); bool (*fowner_op)(vfsuid_t, kuid_t); bool (*fgroup_op)(vfsgid_t, kgid_t); int pcr; unsigned int allowed_algos; struct { void *rule; char *args_p; int type; } lsm[6]; char *fsname; struct ima_rule_opt_list *keyrings; struct ima_rule_opt_list *label; struct ima_template_desc *template; }; struct ima_rule_opt_list { size_t count; char *items[0]; }; struct ima_setup_data { __u64 addr; __u64 size; }; struct ima_template_field; struct ima_template_desc { struct list_head list; char *name; char *fmt; int num_fields; const struct ima_template_field **fields; }; struct tpm_digest; struct ima_template_entry { int pcr; struct tpm_digest *digests; struct ima_template_desc *template_desc; u32 template_data_len; struct ima_field_data template_data[0]; }; struct ima_template_field { const char field_id[16]; int (*field_init)(struct ima_event_data *, struct ima_field_data *); void (*field_show)(struct seq_file *, enum ima_show_type, struct ima_field_data *); }; struct imc_uncore_pci_dev { __u32 pci_id; struct pci_driver *driver; }; struct in6_flowlabel_req { struct in6_addr flr_dst; __be32 flr_label; __u8 flr_action; __u8 flr_share; __u16 flr_flags; __u16 flr_expires; __u16 flr_linger; __u32 __flr_pad; }; struct in6_ifreq { struct in6_addr ifr6_addr; __u32 ifr6_prefixlen; int ifr6_ifindex; }; struct in6_pktinfo { struct in6_addr ipi6_addr; int ipi6_ifindex; }; struct in6_rtmsg { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; __u32 rtmsg_type; __u16 rtmsg_dst_len; __u16 rtmsg_src_len; __u32 rtmsg_metric; long unsigned int rtmsg_info; __u32 rtmsg_flags; int rtmsg_ifindex; }; struct in6_validator_info { struct in6_addr i6vi_addr; struct inet6_dev *i6vi_dev; struct netlink_ext_ack *extack; }; struct ipv4_devconf { void *sysctl; int data[33]; long unsigned int state[1]; }; struct in_ifaddr; struct neigh_parms; struct in_device { struct net_device *dev; netdevice_tracker dev_tracker; refcount_t refcnt; int dead; struct in_ifaddr *ifa_list; struct ip_mc_list *mc_list; struct ip_mc_list **mc_hash; int mc_count; spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; long unsigned int mr_v1_seen; long unsigned int mr_v2_seen; long unsigned int mr_maxdelay; long unsigned int mr_qi; long unsigned int mr_qri; unsigned char mr_qrv; unsigned char mr_gq_running; u32 mr_ifc_count; struct timer_list mr_gq_timer; struct timer_list mr_ifc_timer; struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct callback_head callback_head; }; struct in_ifaddr { struct hlist_node hash; struct in_ifaddr *ifa_next; struct in_device *ifa_dev; struct callback_head callback_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; __u32 ifa_rt_priority; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; unsigned char ifa_proto; __u32 ifa_flags; char ifa_label[16]; __u32 ifa_valid_lft; __u32 ifa_preferred_lft; long unsigned int ifa_cstamp; long unsigned int ifa_tstamp; }; struct in_pktinfo { int ipi_ifindex; struct in_addr ipi_spec_dst; struct in_addr ipi_addr; }; struct in_validator_info { __be32 ivi_addr; struct in_device *ivi_dev; struct netlink_ext_ack *extack; }; struct ipv6_txoptions; struct inet6_cork { struct ipv6_txoptions *opt; u8 hop_limit; u8 tclass; }; struct ipv6_stable_secret { bool initialized; struct in6_addr secret; }; struct ipv6_devconf { __u8 __cacheline_group_begin__ipv6_devconf_read_txrx[0]; __s32 disable_ipv6; __s32 hop_limit; __s32 mtu6; __s32 forwarding; __s32 disable_policy; __s32 proxy_ndp; __u8 __cacheline_group_end__ipv6_devconf_read_txrx[0]; __s32 accept_ra; __s32 accept_redirects; __s32 autoconf; __s32 dad_transmits; __s32 rtr_solicits; __s32 rtr_solicit_interval; __s32 rtr_solicit_max_interval; __s32 rtr_solicit_delay; __s32 force_mld_version; __s32 mldv1_unsolicited_report_interval; __s32 mldv2_unsolicited_report_interval; __s32 use_tempaddr; __s32 temp_valid_lft; __s32 temp_prefered_lft; __s32 regen_min_advance; __s32 regen_max_retry; __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; __u32 ra_defrtr_metric; __s32 accept_ra_min_hop_limit; __s32 accept_ra_min_lft; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; __s32 accept_ra_rt_info_min_plen; __s32 accept_ra_rt_info_max_plen; __s32 accept_source_route; __s32 accept_ra_from_local; __s32 drop_unicast_in_l2_multicast; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; __s32 accept_untracked_na; struct ipv6_stable_secret stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; __s32 seg6_enabled; __u32 enhanced_dad; __u32 addr_gen_mode; __s32 ndisc_tclass; __s32 rpl_seg_enabled; __u32 ioam6_id; __u32 ioam6_id_wide; __u8 ioam6_enabled; __u8 ndisc_evict_nocarrier; __u8 ra_honor_pio_life; __u8 ra_honor_pio_pflag; struct ctl_table_header *sysctl_header; }; struct ipstats_mib; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; struct ipstats_mib *ipv6; struct icmpv6_mib_device *icmpv6dev; struct icmpv6msg_mib_device *icmpv6msgdev; }; struct inet6_dev { struct net_device *dev; netdevice_tracker dev_tracker; struct list_head addr_list; struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; unsigned char mc_qrv; unsigned char mc_gq_running; unsigned char mc_ifc_count; unsigned char mc_dad_count; long unsigned int mc_v1_seen; long unsigned int mc_qi; long unsigned int mc_qri; long unsigned int mc_maxdelay; struct delayed_work mc_gq_work; struct delayed_work mc_ifc_work; struct delayed_work mc_dad_work; struct delayed_work mc_query_work; struct delayed_work mc_report_work; struct sk_buff_head mc_query_queue; struct sk_buff_head mc_report_queue; spinlock_t mc_query_lock; spinlock_t mc_report_lock; struct mutex mc_lock; struct ifacaddr6 *ac_list; rwlock_t lock; refcount_t refcnt; __u32 if_flags; int dead; u32 desync_factor; struct list_head tempaddr_list; struct in6_addr token; struct neigh_parms *nd_parms; struct ipv6_devconf cnf; struct ipv6_devstat stats; struct timer_list rs_timer; __s32 rs_interval; __u8 rs_probes; long unsigned int tstamp; struct callback_head rcu; unsigned int ra_mtu; }; struct inet6_fill_args { u32 portid; u32 seq; int event; unsigned int flags; int netnsid; int ifindex; enum addr_type_t type; }; struct inet6_ifaddr { struct in6_addr addr; __u32 prefix_len; __u32 rt_priority; __u32 valid_lft; __u32 prefered_lft; refcount_t refcnt; spinlock_t lock; int state; __u32 flags; __u8 dad_probes; __u8 stable_privacy_retry; __u16 scope; __u64 dad_nonce; long unsigned int cstamp; long unsigned int tstamp; struct delayed_work dad_work; struct inet6_dev *idev; struct fib6_info *rt; struct hlist_node addr_lst; struct list_head if_list; struct list_head if_list_aux; struct list_head tmp_list; struct inet6_ifaddr *ifpub; int regen_count; bool tokenized; u8 ifa_proto; struct callback_head rcu; struct in6_addr peer_addr; }; struct inet6_protocol { int (*handler)(struct sk_buff *); int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); unsigned int flags; u32 secret; }; struct inet_bind2_bucket { possible_net_t ib_net; int l3mdev; short unsigned int port; short unsigned int addr_type; struct in6_addr v6_rcv_saddr; struct hlist_node node; struct hlist_node bhash_node; struct hlist_head owners; }; struct inet_bind_bucket { possible_net_t ib_net; int l3mdev; short unsigned int port; signed char fastreuse; signed char fastreuseport; kuid_t fastuid; struct in6_addr fast_v6_rcv_saddr; __be32 fast_rcv_saddr; short unsigned int fast_sk_family; bool fast_ipv6_only; struct hlist_node node; struct hlist_head bhash2; }; struct inet_bind_hashbucket { spinlock_t lock; struct hlist_head chain; }; struct inet_cork { unsigned int flags; __be32 addr; struct ip_options *opt; unsigned int fragsize; int length; struct dst_entry *dst; u8 tx_flags; __u8 ttl; __s16 tos; char priority; __u16 gso_size; u64 transmit_time; u32 mark; }; struct inet_cork_full { struct inet_cork base; struct flowi fl; }; struct ipv6_pinfo; struct ip_mc_socklist; struct inet_sock { struct sock sk; struct ipv6_pinfo *pinet6; long unsigned int inet_flags; __be32 inet_saddr; __s16 uc_ttl; __be16 inet_sport; struct ip_options_rcu *inet_opt; atomic_t inet_id; __u8 tos; __u8 min_ttl; __u8 mc_ttl; __u8 pmtudisc; __u8 rcv_tos; __u8 convert_csum; int uc_index; int mc_index; __be32 mc_addr; u32 local_port_range; struct ip_mc_socklist *mc_list; struct inet_cork_full cork; }; struct request_sock_queue { spinlock_t rskq_lock; u8 rskq_defer_accept; u32 synflood_warned; atomic_t qlen; atomic_t young; struct request_sock *rskq_accept_head; struct request_sock *rskq_accept_tail; struct fastopen_queue fastopenq; }; struct inet_connection_sock_af_ops; struct tcp_ulp_ops; struct inet_connection_sock { struct inet_sock icsk_inet; struct request_sock_queue icsk_accept_queue; struct inet_bind_bucket *icsk_bind_hash; struct inet_bind2_bucket *icsk_bind2_hash; long unsigned int icsk_timeout; struct timer_list icsk_retransmit_timer; struct timer_list icsk_delack_timer; __u32 icsk_rto; __u32 icsk_rto_min; __u32 icsk_delack_max; __u32 icsk_pmtu_cookie; const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; const struct tcp_ulp_ops *icsk_ulp_ops; void *icsk_ulp_data; void (*icsk_clean_acked)(struct sock *, u32); unsigned int (*icsk_sync_mss)(struct sock *, u32); __u8 icsk_ca_state: 5; __u8 icsk_ca_initialized: 1; __u8 icsk_ca_setsockopt: 1; __u8 icsk_ca_dst_locked: 1; __u8 icsk_retransmits; __u8 icsk_pending; __u8 icsk_backoff; __u8 icsk_syn_retries; __u8 icsk_probes_out; __u16 icsk_ext_hdr_len; struct { __u8 pending; __u8 quick; __u8 pingpong; __u8 retry; __u32 ato: 8; __u32 lrcv_flowlabel: 20; __u32 unused: 4; long unsigned int timeout; __u32 lrcvtime; __u16 last_seg_size; __u16 rcv_mss; } icsk_ack; struct { int search_high; int search_low; u32 probe_size: 31; u32 enabled: 1; u32 probe_timestamp; } icsk_mtup; u32 icsk_probes_tstamp; u32 icsk_user_timeout; u64 icsk_ca_priv[13]; }; struct inet_connection_sock_af_ops { int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); void (*send_check)(struct sock *, struct sk_buff *); int (*rebuild_header)(struct sock *); void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); int (*conn_request)(struct sock *, struct sk_buff *); struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *); u16 net_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); int (*getsockopt)(struct sock *, int, int, char *, int *); void (*addr2sockaddr)(struct sock *, struct sockaddr *); void (*mtu_reduced)(struct sock *); }; struct inet_diag_bc_op { unsigned char code; unsigned char yes; short unsigned int no; }; struct inet_diag_dump_data { struct nlattr *req_nlas[4]; struct bpf_sk_storage_diag *bpf_stg_diag; }; struct inet_diag_entry { const __be32 *saddr; const __be32 *daddr; u16 sport; u16 dport; u16 family; u16 userlocks; u32 ifindex; u32 mark; u64 cgroup_id; }; struct inet_diag_req_v2; struct inet_diag_msg; struct inet_diag_handler { struct module *owner; void (*dump)(struct sk_buff *, struct netlink_callback *, const struct inet_diag_req_v2 *); int (*dump_one)(struct netlink_callback *, const struct inet_diag_req_v2 *); void (*idiag_get_info)(struct sock *, struct inet_diag_msg *, void *); int (*idiag_get_aux)(struct sock *, bool, struct sk_buff *); size_t (*idiag_get_aux_size)(struct sock *, bool); int (*destroy)(struct sk_buff *, const struct inet_diag_req_v2 *); __u16 idiag_type; __u16 idiag_info_size; }; struct inet_diag_hostcond { __u8 family; __u8 prefix_len; int port; __be32 addr[0]; }; struct inet_diag_markcond { __u32 mark; __u32 mask; }; struct inet_diag_meminfo { __u32 idiag_rmem; __u32 idiag_wmem; __u32 idiag_fmem; __u32 idiag_tmem; }; struct inet_diag_sockid { __be16 idiag_sport; __be16 idiag_dport; __be32 idiag_src[4]; __be32 idiag_dst[4]; __u32 idiag_if; __u32 idiag_cookie[2]; }; struct inet_diag_msg { __u8 idiag_family; __u8 idiag_state; __u8 idiag_timer; __u8 idiag_retrans; struct inet_diag_sockid id; __u32 idiag_expires; __u32 idiag_rqueue; __u32 idiag_wqueue; __u32 idiag_uid; __u32 idiag_inode; }; struct inet_diag_req { __u8 idiag_family; __u8 idiag_src_len; __u8 idiag_dst_len; __u8 idiag_ext; struct inet_diag_sockid id; __u32 idiag_states; __u32 idiag_dbs; }; struct inet_diag_req_v2 { __u8 sdiag_family; __u8 sdiag_protocol; __u8 idiag_ext; __u8 pad; __u32 idiag_states; struct inet_diag_sockid id; }; struct inet_diag_sockopt { __u8 recverr: 1; __u8 is_icsk: 1; __u8 freebind: 1; __u8 hdrincl: 1; __u8 mc_loop: 1; __u8 transparent: 1; __u8 mc_all: 1; __u8 nodefrag: 1; __u8 bind_address_no_port: 1; __u8 recverr_rfc4884: 1; __u8 defer_connect: 1; __u8 unused: 5; }; struct inet_ehash_bucket { struct hlist_nulls_head chain; }; struct inet_fill_args { u32 portid; u32 seq; int event; unsigned int flags; int netnsid; int ifindex; }; struct inet_frags { unsigned int qsize; void (*constructor)(struct inet_frag_queue *, const void *); void (*destructor)(struct inet_frag_queue *); void (*frag_expire)(struct timer_list *); struct kmem_cache *frags_cachep; const char *frags_cache_name; struct rhashtable_params rhash_params; refcount_t refcnt; struct completion completion; }; struct inet_listen_hashbucket; struct inet_hashinfo { struct inet_ehash_bucket *ehash; spinlock_t *ehash_locks; unsigned int ehash_mask; unsigned int ehash_locks_mask; struct kmem_cache *bind_bucket_cachep; struct inet_bind_hashbucket *bhash; struct kmem_cache *bind2_bucket_cachep; struct inet_bind_hashbucket *bhash2; unsigned int bhash_size; unsigned int lhash2_mask; struct inet_listen_hashbucket *lhash2; bool pernet; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct inet_listen_hashbucket { spinlock_t lock; struct hlist_nulls_head nulls_head; }; struct ipv4_addr_key { __be32 addr; int vif; }; struct inetpeer_addr { union { struct ipv4_addr_key a4; struct in6_addr a6; u32 key[4]; }; __u16 family; }; struct inet_peer { struct rb_node rb_node; struct inetpeer_addr daddr; u32 metrics[17]; u32 rate_tokens; u32 n_redirects; long unsigned int rate_last; union { struct { atomic_t rid; }; struct callback_head rcu; }; __u32 dtime; refcount_t refcnt; }; struct inet_protosw { struct list_head list; short unsigned int type; short unsigned int protocol; struct proto *prot; const struct proto_ops *ops; unsigned char flags; }; struct request_sock_ops; struct saved_syn; struct request_sock { struct sock_common __req_common; struct request_sock *dl_next; u16 mss; u8 num_retrans; u8 syncookie: 1; u8 num_timeout: 7; u32 ts_recent; struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; struct saved_syn *saved_syn; u32 secid; u32 peer_secid; u32 timeout; }; struct inet_request_sock { struct request_sock req; u16 snd_wscale: 4; u16 rcv_wscale: 4; u16 tstamp_ok: 1; u16 sack_ok: 1; u16 wscale_ok: 1; u16 ecn_ok: 1; u16 acked: 1; u16 no_srccheck: 1; u16 smc_ok: 1; u32 ir_mark; union { struct ip_options_rcu *ireq_opt; struct { struct ipv6_txoptions *ipv6_opt; struct sk_buff *pktopts; }; }; }; struct inet_timewait_death_row { refcount_t tw_refcount; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct inet_hashinfo *hashinfo; int sysctl_max_tw_buckets; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct inet_timewait_sock { struct sock_common __tw_common; __u32 tw_mark; unsigned char tw_substate; unsigned char tw_rcv_wscale; __be16 tw_sport; unsigned int tw_transparent: 1; unsigned int tw_flowlabel: 20; unsigned int tw_usec_ts: 1; unsigned int tw_pad: 2; unsigned int tw_tos: 8; u32 tw_txhash; u32 tw_priority; struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; struct inet_bind2_bucket *tw_tb2; }; struct inflate_state { inflate_mode mode; int last; int wrap; int havedict; int flags; unsigned int dmax; long unsigned int check; long unsigned int total; unsigned int wbits; unsigned int wsize; unsigned int whave; unsigned int write; unsigned char *window; long unsigned int hold; unsigned int bits; unsigned int length; unsigned int offset; unsigned int extra; const code *lencode; const code *distcode; unsigned int lenbits; unsigned int distbits; unsigned int ncode; unsigned int nlen; unsigned int ndist; unsigned int have; code *next; short unsigned int lens[320]; short unsigned int work[288]; code codes[2048]; }; struct inflate_workspace { struct inflate_state inflate_state; unsigned char working_window[32768]; }; struct ingress_sched_data { struct tcf_block *block; struct tcf_block_ext_info block_info; struct mini_Qdisc_pair miniqp; }; struct powernow_k8_data; struct init_on_cpu { struct powernow_k8_data *data; int rc; }; struct x86_mapping_info; struct init_pgtable_data { struct x86_mapping_info *info; pgd_t *level4p; }; struct mnt_idmap; struct kstat; struct offset_ctx; struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct mnt_idmap *, struct inode *, int); struct posix_acl * (*get_inode_acl)(struct inode *, int, bool); int (*readlink)(struct dentry *, char *, int); int (*create)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, bool); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct mnt_idmap *, struct inode *, struct dentry *, const char *); int (*mkdir)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, dev_t); int (*rename)(struct mnt_idmap *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct mnt_idmap *, struct dentry *, struct iattr *); int (*getattr)(struct mnt_idmap *, const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr)(struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); int (*update_time)(struct inode *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); int (*tmpfile)(struct mnt_idmap *, struct inode *, struct file *, umode_t); struct posix_acl * (*get_acl)(struct mnt_idmap *, struct dentry *, int); int (*set_acl)(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); int (*fileattr_set)(struct mnt_idmap *, struct dentry *, struct fileattr *); int (*fileattr_get)(struct dentry *, struct fileattr *); struct offset_ctx * (*get_offset_ctx)(struct inode *); long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct inode_security_struct { struct inode *inode; struct list_head list; u32 task_sid; u32 sid; u16 sclass; unsigned char initialized; spinlock_t lock; }; struct inode_switch_wbs_context { struct rcu_work work; struct bdi_writeback *new_wb; struct inode *inodes[0]; }; struct inodes_stat_t { long int nr_inodes; long int nr_unused; long int dummy[5]; }; struct inotify_event { __s32 wd; __u32 mask; __u32 cookie; __u32 len; char name[0]; }; struct inotify_event_info { struct fsnotify_event fse; u32 mask; int wd; u32 sync_cookie; int name_len; char name[0]; }; struct inotify_inode_mark { struct fsnotify_mark fsn_mark; int wd; }; struct input_absinfo { __s32 value; __s32 minimum; __s32 maximum; __s32 fuzz; __s32 flat; __s32 resolution; }; struct input_id { __u16 bustype; __u16 vendor; __u16 product; __u16 version; }; struct input_dev_poller; struct input_mt; struct input_value; struct input_dev { const char *name; const char *phys; const char *uniq; struct input_id id; long unsigned int propbit[1]; long unsigned int evbit[1]; long unsigned int keybit[12]; long unsigned int relbit[1]; long unsigned int absbit[1]; long unsigned int mscbit[1]; long unsigned int ledbit[1]; long unsigned int sndbit[1]; long unsigned int ffbit[2]; long unsigned int swbit[1]; unsigned int hint_events_per_packet; unsigned int keycodemax; unsigned int keycodesize; void *keycode; int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *); int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); struct ff_device *ff; struct input_dev_poller *poller; unsigned int repeat_key; struct timer_list timer; int rep[2]; struct input_mt *mt; struct input_absinfo *absinfo; long unsigned int key[12]; long unsigned int led[1]; long unsigned int snd[1]; long unsigned int sw[1]; int (*open)(struct input_dev *); void (*close)(struct input_dev *); int (*flush)(struct input_dev *, struct file *); int (*event)(struct input_dev *, unsigned int, unsigned int, int); struct input_handle *grab; spinlock_t event_lock; struct mutex mutex; unsigned int users; bool going_away; struct device dev; struct list_head h_list; struct list_head node; unsigned int num_vals; unsigned int max_vals; struct input_value *vals; bool devres_managed; ktime_t timestamp[3]; bool inhibited; }; struct input_dev_poller { void (*poll)(struct input_dev *); unsigned int poll_interval; unsigned int poll_interval_max; unsigned int poll_interval_min; struct input_dev *input; struct delayed_work work; }; struct input_device_id { kernel_ulong_t flags; __u16 bustype; __u16 vendor; __u16 product; __u16 version; kernel_ulong_t evbit[1]; kernel_ulong_t keybit[12]; kernel_ulong_t relbit[1]; kernel_ulong_t absbit[1]; kernel_ulong_t mscbit[1]; kernel_ulong_t ledbit[1]; kernel_ulong_t sndbit[1]; kernel_ulong_t ffbit[2]; kernel_ulong_t swbit[1]; kernel_ulong_t propbit[1]; kernel_ulong_t driver_info; }; struct input_devres { struct input_dev *input; }; struct input_handler { void *private; void (*event)(struct input_handle *, unsigned int, unsigned int, int); unsigned int (*events)(struct input_handle *, struct input_value *, unsigned int); bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); bool (*match)(struct input_handler *, struct input_dev *); int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *); void (*disconnect)(struct input_handle *); void (*start)(struct input_handle *); bool legacy_minors; int minor; const char *name; const struct input_device_id *id_table; struct list_head h_list; struct list_head node; }; struct input_mask { __u32 type; __u32 codes_size; __u64 codes_ptr; }; struct input_mt_slot { int abs[14]; unsigned int frame; unsigned int key; }; struct input_mt { int trkid; int num_slots; int slot; unsigned int flags; unsigned int frame; int *red; struct input_mt_slot slots[0]; }; struct input_seq_state { short unsigned int pos; bool mutex_acquired; int input_devices_state; }; struct input_value { __u16 type; __u16 code; __s32 value; }; struct insn_field { union { insn_value_t value; insn_byte_t bytes[4]; }; unsigned char got; unsigned char nbytes; }; struct insn { struct insn_field prefixes; struct insn_field rex_prefix; struct insn_field vex_prefix; struct insn_field opcode; struct insn_field modrm; struct insn_field sib; struct insn_field displacement; union { struct insn_field immediate; struct insn_field moffset1; struct insn_field immediate1; }; union { struct insn_field moffset2; struct insn_field immediate2; }; int emulate_prefix_size; insn_attr_t attr; unsigned char opnd_bytes; unsigned char addr_bytes; unsigned char length; unsigned char x86_64; const insn_byte_t *kaddr; const insn_byte_t *end_kaddr; const insn_byte_t *next_byte; }; struct intel_agp_driver_description { unsigned int chip_id; char *name; const struct agp_bridge_driver *driver; }; struct intel_early_ops { resource_size_t (*stolen_size)(int, int, int); resource_size_t (*stolen_base)(int, int, int, resource_size_t); }; struct intel_excl_states { enum intel_excl_state_type state[64]; bool sched_started; }; struct intel_excl_cntrs { raw_spinlock_t lock; struct intel_excl_states states[2]; union { u16 has_exclusive[2]; u32 exclusive_present; }; int refcnt; unsigned int core_id; }; struct intel_gtt_driver { unsigned int gen: 8; unsigned int is_g33: 1; unsigned int is_pineview: 1; unsigned int is_ironlake: 1; unsigned int has_pgtbl_enable: 1; unsigned int dma_mask_size: 8; int (*setup)(void); void (*cleanup)(void); void (*write_entry)(dma_addr_t, unsigned int, unsigned int); bool (*check_flags)(unsigned int); void (*chipset_flush)(void); }; struct intel_gtt_driver_description { unsigned int gmch_chip_id; char *name; const struct intel_gtt_driver *gtt_driver; }; struct intel_shared_regs { struct er_account regs[7]; int refcnt; unsigned int core_id; }; struct intel_uncore_extra_reg { raw_spinlock_t lock; u64 config; u64 config1; u64 config2; atomic_t ref; }; struct intel_uncore_pmu; struct intel_uncore_box { int dieid; int n_active; int n_events; int cpu; long unsigned int flags; atomic_t refcnt; struct perf_event *events[10]; struct perf_event *event_list[10]; struct event_constraint *event_constraint[10]; long unsigned int active_mask[1]; u64 tags[10]; struct pci_dev *pci_dev; struct intel_uncore_pmu *pmu; u64 hrtimer_duration; struct hrtimer hrtimer; struct list_head list; struct list_head active_list; void *io_addr; struct intel_uncore_extra_reg shared_regs[0]; }; struct intel_uncore_discovery_type { struct rb_node node; enum uncore_access_type access_type; struct rb_root units; u16 type; u8 num_counters; u8 counter_width; u8 ctl_offset; u8 ctr_offset; u16 num_units; }; struct intel_uncore_discovery_unit { struct rb_node node; unsigned int pmu_idx; unsigned int id; unsigned int die; u64 addr; }; struct intel_uncore_init_fun { void (*cpu_init)(void); int (*pci_init)(void); void (*mmio_init)(void); bool use_discovery; int *uncore_units_ignore; }; struct intel_uncore_ops { void (*init_box)(struct intel_uncore_box *); void (*exit_box)(struct intel_uncore_box *); void (*disable_box)(struct intel_uncore_box *); void (*enable_box)(struct intel_uncore_box *); void (*disable_event)(struct intel_uncore_box *, struct perf_event *); void (*enable_event)(struct intel_uncore_box *, struct perf_event *); u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); int (*hw_config)(struct intel_uncore_box *, struct perf_event *); struct event_constraint * (*get_constraint)(struct intel_uncore_box *, struct perf_event *); void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); }; struct intel_uncore_type; struct intel_uncore_pmu { struct pmu pmu; char name[32]; int pmu_idx; int func_id; bool registered; atomic_t activeboxes; cpumask_t cpu_mask; struct intel_uncore_type *type; struct intel_uncore_box **boxes; }; struct uncore_iio_topology; struct uncore_upi_topology; struct intel_uncore_topology { int pmu_idx; union { void *untyped; struct uncore_iio_topology *iio; struct uncore_upi_topology *upi; }; }; struct uncore_event_desc; struct intel_uncore_type { const char *name; int num_counters; int num_boxes; int perf_ctr_bits; int fixed_ctr_bits; int num_freerunning_types; int type_id; unsigned int perf_ctr; unsigned int event_ctl; unsigned int event_mask; unsigned int event_mask_ext; unsigned int fixed_ctr; unsigned int fixed_ctl; unsigned int box_ctl; union { unsigned int msr_offset; unsigned int mmio_offset; }; unsigned int mmio_map_size; unsigned int num_shared_regs: 8; unsigned int single_fixed: 1; unsigned int pair_ctr_ctl: 1; union { u64 *msr_offsets; u64 *pci_offsets; u64 *mmio_offsets; }; struct event_constraint unconstrainted; struct event_constraint *constraints; struct intel_uncore_pmu *pmus; struct intel_uncore_ops *ops; struct uncore_event_desc *event_descs; struct freerunning_counters *freerunning; const struct attribute_group *attr_groups[4]; const struct attribute_group **attr_update; struct pmu *pmu; struct rb_root *boxes; struct intel_uncore_topology **topology; int (*get_topology)(struct intel_uncore_type *); void (*set_mapping)(struct intel_uncore_type *); void (*cleanup_mapping)(struct intel_uncore_type *); void (*cleanup_extra_boxes)(struct intel_uncore_type *); }; union intel_x86_pebs_dse { u64 val; struct { unsigned int ld_dse: 4; unsigned int ld_stlb_miss: 1; unsigned int ld_locked: 1; unsigned int ld_data_blk: 1; unsigned int ld_addr_blk: 1; unsigned int ld_reserved: 24; }; struct { unsigned int st_l1d_hit: 1; unsigned int st_reserved1: 3; unsigned int st_stlb_miss: 1; unsigned int st_locked: 1; unsigned int st_reserved2: 26; }; struct { unsigned int st_lat_dse: 4; unsigned int st_lat_stlb_miss: 1; unsigned int st_lat_locked: 1; unsigned int ld_reserved3: 26; }; struct { unsigned int mtl_dse: 5; unsigned int mtl_locked: 1; unsigned int mtl_stlb_miss: 1; unsigned int mtl_fwd_blk: 1; unsigned int ld_reserved4: 24; }; struct { unsigned int lnc_dse: 8; unsigned int ld_reserved5: 2; unsigned int lnc_stlb_miss: 1; unsigned int lnc_locked: 1; unsigned int lnc_data_blk: 1; unsigned int lnc_addr_blk: 1; unsigned int ld_reserved6: 18; }; }; struct internal_container { struct klist_node node; struct attribute_container *cont; struct device classdev; }; struct internal_state { int dummy; }; struct interval { uint32_t first; uint32_t last; }; struct interval_tree_node { struct rb_node rb; long unsigned int start; long unsigned int last; long unsigned int __subtree_last; }; struct io_accept { struct file *file; struct sockaddr *addr; int *addr_len; int flags; int iou_flags; u32 file_slot; long unsigned int nofile; }; struct io_alloc_cache { void **entries; unsigned int nr_cached; unsigned int max_cached; size_t elem_size; }; struct io_apic { unsigned int index; unsigned int unused[3]; unsigned int data; unsigned int unused2[11]; unsigned int eoi; }; struct ubuf_info; struct msghdr { void *msg_name; int msg_namelen; int msg_inq; struct iov_iter msg_iter; union { void *msg_control; void *msg_control_user; }; bool msg_control_is_user: 1; bool msg_get_inq: 1; unsigned int msg_flags; __kernel_size_t msg_controllen; struct kiocb *msg_iocb; struct ubuf_info *msg_ubuf; int (*sg_from_iter)(struct sk_buff *, struct iov_iter *, size_t); }; struct io_async_msghdr { struct iovec fast_iov; struct iovec *free_iov; int free_iov_nr; int namelen; __kernel_size_t controllen; __kernel_size_t payloadlen; struct sockaddr *uaddr; struct msghdr msg; struct __kernel_sockaddr_storage addr; }; struct iov_iter_state { size_t iov_offset; size_t count; long unsigned int nr_segs; }; struct wait_page_queue { struct folio *folio; int bit_nr; wait_queue_entry_t wait; }; struct io_async_rw { size_t bytes_done; struct iov_iter iter; struct iov_iter_state iter_state; struct iovec fast_iov; struct iovec *free_iovec; int free_iov_nr; struct wait_page_queue wpq; }; struct io_bind { struct file *file; int addr_len; }; struct io_bitmap { u64 sequence; refcount_t refcnt; unsigned int max; long unsigned int bitmap[1024]; }; struct io_buffer { struct list_head list; __u64 addr; __u32 len; __u16 bid; __u16 bgid; }; struct io_uring_buf_ring; struct io_buffer_list { union { struct list_head buf_list; struct { struct page **buf_pages; struct io_uring_buf_ring *buf_ring; }; struct callback_head rcu; }; __u16 bgid; __u16 buf_nr_pages; __u16 nr_entries; __u16 head; __u16 mask; __u16 flags; atomic_t refs; }; struct io_cancel { struct file *file; u64 addr; u32 flags; s32 fd; u8 opcode; }; struct io_ring_ctx; struct io_cancel_data { struct io_ring_ctx *ctx; union { u64 data; struct file *file; }; u8 opcode; u32 flags; int seq; }; struct io_wq_work; typedef bool work_cancel_fn(struct io_wq_work *, void *); struct io_cb_cancel_data { work_cancel_fn *fn; void *data; int nr_running; int nr_pending; bool cancel_all; }; struct io_close { struct file *file; int fd; u32 file_slot; }; struct io_cmd_data { struct file *file; __u8 data[56]; }; struct io_kiocb; struct io_cold_def { const char *name; void (*cleanup)(struct io_kiocb *); void (*fail)(struct io_kiocb *); }; struct io_comp_batch { struct request *req_list; bool need_ts; void (*complete)(struct io_comp_batch *); }; struct io_connect { struct file *file; struct sockaddr *addr; int addr_len; bool in_progress; bool seen_econnaborted; }; struct io_context { atomic_long_t refcount; atomic_t active_ref; short unsigned int ioprio; }; struct io_cq { struct request_queue *q; struct io_context *ioc; union { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; }; union { struct hlist_node ioc_node; struct callback_head __rcu_head; }; unsigned int flags; }; struct io_cqe { __u64 user_data; __s32 res; union { __u32 flags; int fd; }; }; struct io_cqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 overflow; __u32 cqes; __u32 flags; __u32 resv1; __u64 user_addr; }; struct io_defer_entry { struct list_head list; struct io_kiocb *req; u32 seq; }; struct io_epoll { struct file *file; int epfd; int op; int fd; struct epoll_event event; }; struct io_ev_fd { struct eventfd_ctx *cq_ev_fd; unsigned int eventfd_async: 1; struct callback_head rcu; refcount_t refs; atomic_t ops; }; struct io_fadvise { struct file *file; u64 offset; u64 len; u32 advice; }; struct io_fixed_file; struct io_file_table { struct io_fixed_file *files; long unsigned int *bitmap; unsigned int alloc_hint; }; struct io_fixed_file { long unsigned int file_ptr; }; struct io_fixed_install { struct file *file; unsigned int o_flags; }; struct io_ftrunc { struct file *file; loff_t len; }; struct io_futex { struct file *file; union { u32 *uaddr; struct futex_waitv *uwaitv; }; long unsigned int futex_val; long unsigned int futex_mask; long unsigned int futexv_owned; u32 futex_flags; unsigned int futex_nr; bool futexv_unqueued; }; struct io_futex_data { struct futex_q q; struct io_kiocb *req; }; struct io_hash_bucket { spinlock_t lock; struct hlist_head list; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct io_hash_table { struct io_hash_bucket *hbs; unsigned int hash_bits; }; struct io_imu_folio_data { unsigned int nr_pages_head; unsigned int nr_pages_mid; unsigned int folio_shift; }; struct io_uring_sqe; struct io_issue_def { unsigned int needs_file: 1; unsigned int plug: 1; unsigned int hash_reg_file: 1; unsigned int unbound_nonreg_file: 1; unsigned int pollin: 1; unsigned int pollout: 1; unsigned int poll_exclusive: 1; unsigned int buffer_select: 1; unsigned int audit_skip: 1; unsigned int ioprio: 1; unsigned int iopoll: 1; unsigned int iopoll_queue: 1; unsigned int vectored: 1; short unsigned int async_size; int (*issue)(struct io_kiocb *, unsigned int); int (*prep)(struct io_kiocb *, const struct io_uring_sqe *); }; struct io_wq_work_node { struct io_wq_work_node *next; }; struct io_tw_state; typedef void (*io_req_tw_func_t)(struct io_kiocb *, struct io_tw_state *); struct io_task_work { struct llist_node node; io_req_tw_func_t func; }; struct io_wq_work { struct io_wq_work_node list; atomic_t flags; int cancel_seq; }; struct io_mapped_ubuf; struct io_rsrc_node; struct io_kiocb { union { struct file *file; struct io_cmd_data cmd; }; u8 opcode; u8 iopoll_completed; u16 buf_index; unsigned int nr_tw; io_req_flags_t flags; struct io_cqe cqe; struct io_ring_ctx *ctx; struct task_struct *task; union { struct io_mapped_ubuf *imu; struct io_buffer *kbuf; struct io_buffer_list *buf_list; }; union { struct io_wq_work_node comp_list; __poll_t apoll_events; }; struct io_rsrc_node *rsrc_node; atomic_t refs; bool cancel_seq_set; struct io_task_work io_task_work; struct hlist_node hash_node; struct async_poll *apoll; void *async_data; atomic_t poll_refs; struct io_kiocb *link; const struct cred *creds; struct io_wq_work work; struct { u64 extra1; u64 extra2; } big_cqe; }; struct io_link { struct file *file; int old_dfd; int new_dfd; struct filename *oldpath; struct filename *newpath; int flags; }; struct io_listen { struct file *file; int backlog; }; struct io_madvise { struct file *file; u64 addr; u64 len; u32 advice; }; struct io_mapped_ubuf { u64 ubuf; unsigned int len; unsigned int nr_bvecs; unsigned int folio_shift; refcount_t refs; long unsigned int acct_pages; struct bio_vec bvec[0]; }; struct io_mkdir { struct file *file; int dfd; umode_t mode; struct filename *filename; }; struct io_msg { struct file *file; struct file *src_file; struct callback_head tw; u64 user_data; u32 len; u32 cmd; u32 src_fd; union { u32 dst_fd; u32 cqe_flags; }; u32 flags; }; struct io_napi_entry { unsigned int napi_id; struct list_head list; long unsigned int timeout; struct hlist_node node; struct callback_head rcu; }; struct io_nop { struct file *file; int result; }; struct ubuf_info_ops; struct ubuf_info { const struct ubuf_info_ops *ops; refcount_t refcnt; u8 flags; }; struct io_notif_data { struct file *file; struct ubuf_info uarg; struct io_notif_data *next; struct io_notif_data *head; unsigned int account_pages; bool zc_report; bool zc_used; bool zc_copied; }; struct io_open { struct file *file; int dfd; u32 file_slot; struct filename *filename; struct open_how how; long unsigned int nofile; }; struct io_uring_cqe { __u64 user_data; __s32 res; __u32 flags; __u64 big_cqe[0]; }; struct io_overflow_cqe { struct list_head list; struct io_uring_cqe cqe; }; struct io_poll_table { struct poll_table_struct pt; struct io_kiocb *req; int nr_entries; int error; bool owning; __poll_t result_mask; }; struct io_poll_update { struct file *file; u64 old_user_data; u64 new_user_data; __poll_t events; bool update_events; bool update_user_data; }; struct io_provide_buf { struct file *file; __u64 addr; __u32 len; __u32 bgid; __u32 nbufs; __u16 bid; }; struct io_uring_recvmsg_out { __u32 namelen; __u32 controllen; __u32 payloadlen; __u32 flags; }; struct io_recvmsg_multishot_hdr { struct io_uring_recvmsg_out msg; struct __kernel_sockaddr_storage addr; }; struct io_rename { struct file *file; int old_dfd; int new_dfd; struct filename *oldpath; struct filename *newpath; int flags; }; struct io_restriction { long unsigned int register_op[1]; long unsigned int sqe_op[1]; u8 sqe_flags_allowed; u8 sqe_flags_required; bool registered; }; struct io_wq_work_list { struct io_wq_work_node *first; struct io_wq_work_node *last; }; struct io_submit_link { struct io_kiocb *head; struct io_kiocb *last; }; struct io_submit_state { struct io_wq_work_node free_list; struct io_wq_work_list compl_reqs; struct io_submit_link link; bool plug_started; bool need_plug; bool cq_flush; short unsigned int submit_nr; struct blk_plug plug; }; struct io_rings; struct io_sq_data; struct io_rsrc_data; struct io_wq_hash; struct io_ring_ctx { struct { unsigned int flags; unsigned int drain_next: 1; unsigned int restricted: 1; unsigned int off_timeout_used: 1; unsigned int drain_active: 1; unsigned int has_evfd: 1; unsigned int task_complete: 1; unsigned int lockless_cq: 1; unsigned int syscall_iopoll: 1; unsigned int poll_activated: 1; unsigned int drain_disabled: 1; unsigned int compat: 1; unsigned int iowq_limits_set: 1; struct task_struct *submitter_task; struct io_rings *rings; struct percpu_ref refs; clockid_t clockid; enum tk_offsets clock_offset; enum task_work_notify_mode notify_method; unsigned int sq_thread_idle; long: 64; }; struct { struct mutex uring_lock; u32 *sq_array; struct io_uring_sqe *sq_sqes; unsigned int cached_sq_head; unsigned int sq_entries; struct io_rsrc_node *rsrc_node; atomic_t cancel_seq; bool poll_multi_queue; struct io_wq_work_list iopoll_list; struct io_file_table file_table; struct io_mapped_ubuf **user_bufs; unsigned int nr_user_files; unsigned int nr_user_bufs; struct io_submit_state submit_state; struct xarray io_bl_xa; struct io_hash_table cancel_table_locked; struct io_alloc_cache apoll_cache; struct io_alloc_cache netmsg_cache; struct io_alloc_cache rw_cache; struct io_alloc_cache uring_cache; struct hlist_head cancelable_uring_cmd; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct { struct io_uring_cqe *cqe_cached; struct io_uring_cqe *cqe_sentinel; unsigned int cached_cq_tail; unsigned int cq_entries; struct io_ev_fd *io_ev_fd; unsigned int cq_extra; long: 64; long: 64; long: 64; }; struct { struct llist_head work_llist; long unsigned int check_cq; atomic_t cq_wait_nr; atomic_t cq_timeouts; struct wait_queue_head cq_wait; long: 64; long: 64; long: 64; }; struct { spinlock_t timeout_lock; struct list_head timeout_list; struct list_head ltimeout_list; unsigned int cq_last_tm_flush; long: 64; long: 64; long: 64; }; spinlock_t completion_lock; struct list_head io_buffers_comp; struct list_head cq_overflow_list; struct io_hash_table cancel_table; struct hlist_head waitid_list; struct hlist_head futex_list; struct io_alloc_cache futex_cache; const struct cred *sq_creds; struct io_sq_data *sq_data; struct wait_queue_head sqo_sq_wait; struct list_head sqd_list; unsigned int file_alloc_start; unsigned int file_alloc_end; struct list_head io_buffers_cache; struct wait_queue_head poll_wq; struct io_restriction restrictions; struct io_rsrc_data *file_data; struct io_rsrc_data *buf_data; struct list_head rsrc_ref_list; struct io_alloc_cache rsrc_node_cache; struct wait_queue_head rsrc_quiesce_wq; unsigned int rsrc_quiesce; u32 pers_next; struct xarray personalities; struct io_wq_hash *hash_map; struct user_struct *user; struct mm_struct *mm_account; struct llist_head fallback_llist; struct delayed_work fallback_work; struct work_struct exit_work; struct list_head tctx_list; struct completion ref_comp; u32 iowq_limits[2]; struct callback_head poll_wq_task_work; struct list_head defer_list; struct io_alloc_cache msg_cache; spinlock_t msg_lock; struct list_head napi_list; spinlock_t napi_lock; ktime_t napi_busy_poll_dt; bool napi_prefer_busy_poll; bool napi_enabled; struct hlist_head napi_ht[16]; unsigned int evfd_last_cq_tail; short unsigned int n_ring_pages; short unsigned int n_sqe_pages; struct page **ring_pages; struct page **sqe_pages; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct io_uring { u32 head; u32 tail; }; struct io_rings { struct io_uring sq; struct io_uring cq; u32 sq_ring_mask; u32 cq_ring_mask; u32 sq_ring_entries; u32 cq_ring_entries; u32 sq_dropped; atomic_t sq_flags; u32 cq_flags; u32 cq_overflow; long: 64; long: 64; struct io_uring_cqe cqes[0]; }; struct io_rsrc_data { struct io_ring_ctx *ctx; u64 **tags; unsigned int nr; u16 rsrc_type; bool quiesce; }; struct io_rsrc_put { u64 tag; union { void *rsrc; struct file *file; struct io_mapped_ubuf *buf; }; }; struct io_rsrc_node { struct io_ring_ctx *ctx; int refs; bool empty; u16 type; struct list_head node; struct io_rsrc_put item; }; struct io_rsrc_update { struct file *file; u64 arg; u32 nr_args; u32 offset; }; struct io_rw { struct kiocb kiocb; u64 addr; u32 len; rwf_t flags; }; struct io_shutdown { struct file *file; int how; }; struct io_socket { struct file *file; int domain; int type; int protocol; int flags; u32 file_slot; long unsigned int nofile; }; struct io_splice { struct file *file_out; loff_t off_out; loff_t off_in; u64 len; int splice_fd_in; unsigned int flags; }; struct io_sq_data { refcount_t refs; atomic_t park_pending; struct mutex lock; struct list_head ctx_list; struct task_struct *thread; struct wait_queue_head wait; unsigned int sq_thread_idle; int sq_cpu; pid_t task_pid; pid_t task_tgid; u64 work_time; long unsigned int state; struct completion exited; }; struct io_sqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 flags; __u32 dropped; __u32 array; __u32 resv1; __u64 user_addr; }; struct user_msghdr; struct io_sr_msg { struct file *file; union { struct compat_msghdr *umsg_compat; struct user_msghdr *umsg; void *buf; }; int len; unsigned int done_io; unsigned int msg_flags; unsigned int nr_multishot_loops; u16 flags; u16 addr_len; u16 buf_group; void *addr; void *msg_control; struct io_kiocb *notif; }; struct statx; struct io_statx { struct file *file; int dfd; unsigned int mask; unsigned int flags; struct filename *filename; struct statx *buffer; }; struct io_sync { struct file *file; loff_t len; loff_t off; int flags; int mode; }; struct io_task_cancel { struct task_struct *task; bool all; }; struct io_tctx_exit { struct callback_head task_work; struct completion completion; struct io_ring_ctx *ctx; }; struct io_tctx_node { struct list_head ctx_node; struct task_struct *task; struct io_ring_ctx *ctx; }; struct io_timeout { struct file *file; u32 off; u32 target_seq; u32 repeats; struct list_head list; struct io_kiocb *head; struct io_kiocb *prev; }; struct io_timeout_data { struct io_kiocb *req; struct hrtimer timer; struct timespec64 ts; enum hrtimer_mode mode; u32 flags; }; struct io_timeout_rem { struct file *file; u64 addr; struct timespec64 ts; u32 flags; bool ltimeout; }; struct io_tlb_area { long unsigned int used; unsigned int index; spinlock_t lock; }; struct io_tlb_slot; struct io_tlb_pool { phys_addr_t start; phys_addr_t end; void *vaddr; long unsigned int nslabs; bool late_alloc; unsigned int nareas; unsigned int area_nslabs; struct io_tlb_area *areas; struct io_tlb_slot *slots; }; struct io_tlb_mem { struct io_tlb_pool defpool; long unsigned int nslabs; struct dentry *debugfs; bool force_bounce; bool for_alloc; atomic_long_t total_used; atomic_long_t used_hiwater; atomic_long_t transient_nslabs; }; struct io_tlb_slot { phys_addr_t orig_addr; size_t alloc_size; short unsigned int list; short unsigned int pad_slots; }; struct io_tw_state {}; struct io_unlink { struct file *file; int dfd; int flags; struct filename *filename; }; struct io_uring_buf { __u64 addr; __u32 len; __u16 bid; __u16 resv; }; struct io_uring_buf_reg { __u64 ring_addr; __u32 ring_entries; __u16 bgid; __u16 flags; __u64 resv[3]; }; struct io_uring_buf_ring { union { struct { __u64 resv1; __u32 resv2; __u16 resv3; __u16 tail; }; struct { struct {} __empty_bufs; struct io_uring_buf bufs[0]; }; }; }; struct io_uring_buf_status { __u32 buf_group; __u32 head; __u32 resv[8]; }; struct io_uring_clock_register { __u32 clockid; __u32 __resv[3]; }; struct io_uring_clone_buffers { __u32 src_fd; __u32 flags; __u32 pad[6]; }; struct io_uring_cmd { struct file *file; const struct io_uring_sqe *sqe; void (*task_work_cb)(struct io_uring_cmd *, unsigned int); u32 cmd_op; u32 flags; u8 pdu[32]; }; struct io_uring_file_index_range { __u32 off; __u32 len; __u64 resv; }; struct io_uring_getevents_arg { __u64 sigmask; __u32 sigmask_sz; __u32 min_wait_usec; __u64 ts; }; struct io_uring_napi { __u32 busy_poll_to; __u8 prefer_busy_poll; __u8 pad[3]; __u64 resv; }; struct io_uring_params { __u32 sq_entries; __u32 cq_entries; __u32 flags; __u32 sq_thread_cpu; __u32 sq_thread_idle; __u32 features; __u32 wq_fd; __u32 resv[3]; struct io_sqring_offsets sq_off; struct io_cqring_offsets cq_off; }; struct io_uring_probe_op { __u8 op; __u8 resv; __u16 flags; __u32 resv2; }; struct io_uring_probe { __u8 last_op; __u8 ops_len; __u16 resv; __u32 resv2[3]; struct io_uring_probe_op ops[0]; }; struct io_uring_restriction { __u16 opcode; union { __u8 register_op; __u8 sqe_op; __u8 sqe_flags; }; __u8 resv; __u32 resv2[3]; }; struct io_uring_rsrc_register { __u32 nr; __u32 flags; __u64 resv2; __u64 data; __u64 tags; }; struct io_uring_rsrc_update { __u32 offset; __u32 resv; __u64 data; }; struct io_uring_rsrc_update2 { __u32 offset; __u32 resv; __u64 data; __u64 tags; __u32 nr; __u32 resv2; }; struct io_uring_sqe { __u8 opcode; __u8 flags; __u16 ioprio; __s32 fd; union { __u64 off; __u64 addr2; struct { __u32 cmd_op; __u32 __pad1; }; }; union { __u64 addr; __u64 splice_off_in; struct { __u32 level; __u32 optname; }; }; __u32 len; union { __kernel_rwf_t rw_flags; __u32 fsync_flags; __u16 poll_events; __u32 poll32_events; __u32 sync_range_flags; __u32 msg_flags; __u32 timeout_flags; __u32 accept_flags; __u32 cancel_flags; __u32 open_flags; __u32 statx_flags; __u32 fadvise_advice; __u32 splice_flags; __u32 rename_flags; __u32 unlink_flags; __u32 hardlink_flags; __u32 xattr_flags; __u32 msg_ring_flags; __u32 uring_cmd_flags; __u32 waitid_flags; __u32 futex_flags; __u32 install_fd_flags; __u32 nop_flags; }; __u64 user_data; union { __u16 buf_index; __u16 buf_group; }; __u16 personality; union { __s32 splice_fd_in; __u32 file_index; __u32 optlen; struct { __u16 addr_len; __u16 __pad3[1]; }; }; union { struct { __u64 addr3; __u64 __pad2[1]; }; __u64 optval; __u8 cmd[0]; }; }; struct io_uring_sync_cancel_reg { __u64 addr; __s32 fd; __u32 flags; struct __kernel_timespec timeout; __u8 opcode; __u8 pad[7]; __u64 pad2[3]; }; struct io_wq; struct io_uring_task { int cached_refs; const struct io_ring_ctx *last; struct io_wq *io_wq; struct file *registered_rings[16]; struct xarray xa; struct wait_queue_head wait; atomic_t in_cancel; atomic_t inflight_tracked; struct percpu_counter inflight; long: 64; long: 64; long: 64; long: 64; struct { struct llist_head task_list; struct callback_head task_work; long: 64; long: 64; long: 64; long: 64; long: 64; }; }; struct io_wait_queue { struct wait_queue_entry wq; struct io_ring_ctx *ctx; unsigned int cq_tail; unsigned int cq_min_tail; unsigned int nr_timeouts; int hit_timeout; ktime_t min_timeout; ktime_t timeout; struct hrtimer t; ktime_t napi_busy_poll_dt; bool napi_prefer_busy_poll; }; struct waitid_info { pid_t pid; uid_t uid; int status; int cause; }; struct io_waitid { struct file *file; int which; pid_t upid; int options; atomic_t refs; struct wait_queue_head *head; struct siginfo *infop; struct waitid_info info; }; struct rusage; struct wait_opts { enum pid_type wo_type; int wo_flags; struct pid *wo_pid; struct waitid_info *wo_info; int wo_stat; struct rusage *wo_rusage; wait_queue_entry_t child_wait; int notask_error; }; struct io_waitid_async { struct io_kiocb *req; struct wait_opts wo; }; struct io_worker { refcount_t ref; int create_index; long unsigned int flags; struct hlist_nulls_node nulls_node; struct list_head all_list; struct task_struct *task; struct io_wq *wq; struct io_wq_work *cur_work; raw_spinlock_t lock; struct completion ref_done; long unsigned int create_state; struct callback_head create_work; int init_retries; union { struct callback_head rcu; struct work_struct work; }; }; typedef struct io_wq_work *free_work_fn(struct io_wq_work *); typedef void io_wq_work_fn(struct io_wq_work *); struct io_wq_acct { unsigned int nr_workers; unsigned int max_workers; int index; atomic_t nr_running; raw_spinlock_t lock; struct io_wq_work_list work_list; long unsigned int flags; }; struct io_wq { long unsigned int state; free_work_fn *free_work; io_wq_work_fn *do_work; struct io_wq_hash *hash; atomic_t worker_refs; struct completion worker_done; struct hlist_node cpuhp_node; struct task_struct *task; struct io_wq_acct acct[2]; raw_spinlock_t lock; struct hlist_nulls_head free_list; struct list_head all_list; struct wait_queue_entry wait; struct io_wq_work *hash_tail[64]; cpumask_var_t cpu_mask; }; struct io_wq_data { struct io_wq_hash *hash; struct task_struct *task; io_wq_work_fn *do_work; free_work_fn *free_work; }; struct io_wq_hash { refcount_t refs; long unsigned int map; struct wait_queue_head wait; }; struct xattr_name; struct xattr_ctx { union { const void *cvalue; void *value; }; void *kvalue; size_t size; struct xattr_name *kname; unsigned int flags; }; struct io_xattr { struct file *file; struct xattr_ctx ctx; struct filename *filename; }; struct ioam6_hdr { __u8 opt_type; __u8 opt_len; char: 8; __u8 type; }; struct ioam6_schema; struct ioam6_namespace { struct rhash_head head; struct callback_head rcu; struct ioam6_schema *schema; __be16 id; __be32 data; __be64 data_wide; }; struct ioam6_pernet_data { struct mutex lock; struct rhashtable namespaces; struct rhashtable schemas; }; struct ioam6_schema { struct rhash_head head; struct callback_head rcu; struct ioam6_namespace *ns; u32 id; int len; __be32 hdr; u8 data[0]; }; struct ioam6_trace_hdr { __be16 namespace_id; char: 2; __u8 overflow: 1; __u8 nodelen: 5; __u8 remlen: 7; union { __be32 type_be32; struct { __u32 bit7: 1; __u32 bit6: 1; __u32 bit5: 1; __u32 bit4: 1; __u32 bit3: 1; __u32 bit2: 1; __u32 bit1: 1; __u32 bit0: 1; __u32 bit15: 1; __u32 bit14: 1; __u32 bit13: 1; __u32 bit12: 1; __u32 bit11: 1; __u32 bit10: 1; __u32 bit9: 1; __u32 bit8: 1; __u32 bit23: 1; __u32 bit22: 1; __u32 bit21: 1; __u32 bit20: 1; __u32 bit19: 1; __u32 bit18: 1; __u32 bit17: 1; __u32 bit16: 1; } type; }; __u8 data[0]; }; struct mpc_ioapic { unsigned char type; unsigned char apicid; unsigned char apicver; unsigned char flags; unsigned int apicaddr; }; struct mp_ioapic_gsi { u32 gsi_base; u32 gsi_end; }; struct irq_domain_ops; struct ioapic_domain_cfg { enum ioapic_domain_type type; const struct irq_domain_ops *ops; struct device_node *dev; }; struct ioapic { int nr_registers; struct IO_APIC_route_entry *saved_registers; struct mpc_ioapic mp_config; struct mp_ioapic_gsi gsi_config; struct ioapic_domain_cfg irqdomain_cfg; struct irq_domain *irqdomain; struct resource *iomem_res; }; struct ioapic_alloc_info { int pin; int node; u32 is_level: 1; u32 active_low: 1; u32 valid: 1; }; struct iocb { __u64 aio_data; __u32 aio_key; __kernel_rwf_t aio_rw_flags; __u16 aio_lio_opcode; __s16 aio_reqprio; __u32 aio_fildes; __u64 aio_buf; __u64 aio_nbytes; __s64 aio_offset; __u64 aio_reserved2; __u32 aio_flags; __u32 aio_resfd; }; struct percentile_stats { u64 total; u64 missed; }; struct latency_stat { union { struct percentile_stats ps; struct blk_rq_stat rqs; }; }; struct rq_wait { wait_queue_head_t wait; atomic_t inflight; }; struct iolatency_grp { struct blkg_policy_data pd; struct latency_stat *stats; struct latency_stat cur_stat; struct blk_iolatency *blkiolat; unsigned int max_depth; struct rq_wait rq_wait; atomic64_t window_start; atomic_t scale_cookie; u64 min_lat_nsec; u64 cur_win_nsec; u64 lat_avg; u64 nr_samples; bool ssd; struct child_latency_info child_lat; }; struct iomap_folio_ops; struct iomap { u64 addr; loff_t offset; u64 length; u16 type; u16 flags; struct block_device *bdev; struct dax_device *dax_dev; void *inline_data; void *private; const struct iomap_folio_ops *folio_ops; u64 validity_cookie; }; struct iomap_dio_ops; struct iomap_dio { struct kiocb *iocb; const struct iomap_dio_ops *dops; loff_t i_size; loff_t size; atomic_t ref; unsigned int flags; int error; size_t done_before; bool wait_for_completion; union { struct { struct iov_iter *iter; struct task_struct *waiter; } submit; struct { struct work_struct work; } aio; }; }; struct iomap_iter; struct iomap_dio_ops { int (*end_io)(struct kiocb *, ssize_t, int, unsigned int); void (*submit_io)(const struct iomap_iter *, struct bio *, loff_t); struct bio_set *bio_set; }; struct iomap_folio_ops { struct folio * (*get_folio)(struct iomap_iter *, loff_t, unsigned int); void (*put_folio)(struct inode *, loff_t, unsigned int, struct folio *); bool (*iomap_valid)(struct inode *, const struct iomap *); }; struct iomap_folio_state { spinlock_t state_lock; unsigned int read_bytes_pending; atomic_t write_bytes_pending; long unsigned int state[0]; }; struct iomap_ioend { struct list_head io_list; u16 io_type; u16 io_flags; struct inode *io_inode; size_t io_size; loff_t io_offset; sector_t io_sector; struct bio io_bio; }; struct iomap_iter { struct inode *inode; loff_t pos; u64 len; s64 processed; unsigned int flags; struct iomap iomap; struct iomap srcmap; void *private; }; struct iomap_ops { int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap *, struct iomap *); int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap *); }; struct iomap_readpage_ctx { struct folio *cur_folio; bool cur_folio_in_bio; struct bio *bio; struct readahead_control *rac; }; struct iomap_swapfile_info { struct iomap iomap; struct swap_info_struct *sis; uint64_t lowest_ppage; uint64_t highest_ppage; long unsigned int nr_pages; int nr_extents; struct file *file; }; struct iomap_writepage_ctx; struct iomap_writeback_ops { int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t, unsigned int); int (*prepare_ioend)(struct iomap_ioend *, int); void (*discard_folio)(struct folio *, loff_t); }; struct iomap_writepage_ctx { struct iomap iomap; struct iomap_ioend *ioend; const struct iomap_writeback_ops *ops; u32 nr_folios; }; struct iommu_domain; struct iommu_attach_handle { struct iommu_domain *domain; }; struct iommu_ops; struct iommu_device { struct list_head list; const struct iommu_ops *ops; struct fwnode_handle *fwnode; struct device *dev; struct iommu_group *singleton_group; u32 max_pasids; }; struct iova_bitmap; struct iommu_iotlb_gather; struct iommu_dirty_bitmap { struct iova_bitmap *bitmap; struct iommu_iotlb_gather *gather; }; struct iommu_dirty_ops { int (*set_dirty_tracking)(struct iommu_domain *, bool); int (*read_and_clear_dirty)(struct iommu_domain *, long unsigned int, size_t, long unsigned int, struct iommu_dirty_bitmap *); }; struct iova { struct rb_node node; long unsigned int pfn_hi; long unsigned int pfn_lo; }; struct iova_rcache; struct iova_domain { spinlock_t iova_rbtree_lock; struct rb_root rbroot; struct rb_node *cached_node; struct rb_node *cached32_node; long unsigned int granule; long unsigned int start_pfn; long unsigned int dma_32bit_pfn; long unsigned int max32_alloc_size; struct iova anchor; struct iova_rcache *rcaches; struct hlist_node cpuhp_dead; }; struct iommu_dma_options { enum iommu_dma_queue_type qt; size_t fq_size; unsigned int fq_timeout; }; struct iova_fq; struct iommu_dma_cookie { enum iommu_dma_cookie_type type; union { struct { struct iova_domain iovad; union { struct iova_fq *single_fq; struct iova_fq *percpu_fq; }; atomic64_t fq_flush_start_cnt; atomic64_t fq_flush_finish_cnt; struct timer_list fq_timer; atomic_t fq_timer_on; }; dma_addr_t msi_iova; }; struct list_head msi_page_list; struct iommu_domain *fq_domain; struct iommu_dma_options options; struct mutex mutex; }; struct iommu_dma_msi_page { struct list_head list; dma_addr_t iova; phys_addr_t phys; }; struct iommu_domain_geometry { dma_addr_t aperture_start; dma_addr_t aperture_end; bool force_aperture; }; typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, long unsigned int, int, void *); struct iommu_domain_ops; struct iopf_group; struct iommu_domain { unsigned int type; const struct iommu_domain_ops *ops; const struct iommu_dirty_ops *dirty_ops; const struct iommu_ops *owner; long unsigned int pgsize_bitmap; struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; int (*iopf_handler)(struct iopf_group *); void *fault_data; union { struct { iommu_fault_handler_t handler; void *handler_token; }; struct { struct mm_struct *mm; int users; struct list_head next; }; }; }; struct iommu_user_data_array; struct iommu_domain_ops { int (*attach_dev)(struct iommu_domain *, struct device *); int (*set_dev_pasid)(struct iommu_domain *, struct device *, ioasid_t); int (*map_pages)(struct iommu_domain *, long unsigned int, phys_addr_t, size_t, size_t, int, gfp_t, size_t *); size_t (*unmap_pages)(struct iommu_domain *, long unsigned int, size_t, size_t, struct iommu_iotlb_gather *); void (*flush_iotlb_all)(struct iommu_domain *); int (*iotlb_sync_map)(struct iommu_domain *, long unsigned int, size_t); void (*iotlb_sync)(struct iommu_domain *, struct iommu_iotlb_gather *); int (*cache_invalidate_user)(struct iommu_domain *, struct iommu_user_data_array *); phys_addr_t (*iova_to_phys)(struct iommu_domain *, dma_addr_t); bool (*enforce_cache_coherency)(struct iommu_domain *); int (*enable_nesting)(struct iommu_domain *); int (*set_pgtable_quirks)(struct iommu_domain *, long unsigned int); void (*free)(struct iommu_domain *); }; struct iommu_fault_page_request { u32 flags; u32 pasid; u32 grpid; u32 perm; u64 addr; u64 private_data[2]; }; struct iommu_fault { u32 type; struct iommu_fault_page_request prm; }; struct iopf_queue; struct iommu_fault_param { struct mutex lock; refcount_t users; struct callback_head rcu; struct device *dev; struct iopf_queue *queue; struct list_head queue_list; struct list_head partial; struct list_head faults; }; struct iommu_fwspec { struct fwnode_handle *iommu_fwnode; u32 flags; unsigned int num_ids; u32 ids[0]; }; struct iommu_group { struct kobject kobj; struct kobject *devices_kobj; struct list_head devices; struct xarray pasid_array; struct mutex mutex; void *iommu_data; void (*iommu_data_release)(void *); char *name; int id; struct iommu_domain *default_domain; struct iommu_domain *blocking_domain; struct iommu_domain *domain; struct list_head entry; unsigned int owner_cnt; void *owner; }; struct iommu_group_attribute { struct attribute attr; ssize_t (*show)(struct iommu_group *, char *); ssize_t (*store)(struct iommu_group *, const char *, size_t); }; struct iommu_iotlb_gather { long unsigned int start; long unsigned int end; size_t pgsize; struct list_head freelist; bool queued; }; struct iommu_user_data; struct of_phandle_args; struct iopf_fault; struct iommu_page_response; struct iommu_ops { bool (*capable)(struct device *, enum iommu_cap); void * (*hw_info)(struct device *, u32 *, u32 *); struct iommu_domain * (*domain_alloc)(unsigned int); struct iommu_domain * (*domain_alloc_user)(struct device *, u32, struct iommu_domain *, const struct iommu_user_data *); struct iommu_domain * (*domain_alloc_paging)(struct device *); struct iommu_domain * (*domain_alloc_sva)(struct device *, struct mm_struct *); struct iommu_device * (*probe_device)(struct device *); void (*release_device)(struct device *); void (*probe_finalize)(struct device *); struct iommu_group * (*device_group)(struct device *); void (*get_resv_regions)(struct device *, struct list_head *); int (*of_xlate)(struct device *, const struct of_phandle_args *); bool (*is_attach_deferred)(struct device *); int (*dev_enable_feat)(struct device *, enum iommu_dev_features); int (*dev_disable_feat)(struct device *, enum iommu_dev_features); void (*page_response)(struct device *, struct iopf_fault *, struct iommu_page_response *); int (*def_domain_type)(struct device *); void (*remove_dev_pasid)(struct device *, ioasid_t, struct iommu_domain *); const struct iommu_domain_ops *default_domain_ops; long unsigned int pgsize_bitmap; struct module *owner; struct iommu_domain *identity_domain; struct iommu_domain *blocked_domain; struct iommu_domain *release_domain; struct iommu_domain *default_domain; u8 user_pasid_table: 1; }; struct iommu_page_response { u32 pasid; u32 grpid; u32 code; }; struct iommu_resv_region { struct list_head list; phys_addr_t start; size_t length; int prot; enum iommu_resv_type type; void (*free)(struct device *, struct iommu_resv_region *); }; struct iommu_user_data { unsigned int type; void *uptr; size_t len; }; struct iommu_user_data_array { unsigned int type; void *uptr; size_t entry_len; u32 entry_num; }; struct iopf_fault { struct iommu_fault fault; struct list_head list; }; struct iopf_group { struct iopf_fault last_fault; struct list_head faults; size_t fault_count; struct list_head pending_node; struct work_struct work; struct iommu_attach_handle *attach_handle; struct iommu_fault_param *fault_param; struct list_head node; u32 cookie; }; struct iopf_queue { struct workqueue_struct *wq; struct list_head devices; struct mutex lock; }; struct ioremap_desc { unsigned int flags; }; struct iova_magazine; struct iova_cpu_rcache { spinlock_t lock; struct iova_magazine *loaded; struct iova_magazine *prev; }; struct iova_fq_entry { long unsigned int iova_pfn; long unsigned int pages; struct list_head freelist; u64 counter; }; struct iova_fq { spinlock_t lock; unsigned int head; unsigned int tail; unsigned int mod_mask; struct iova_fq_entry entries[0]; }; struct iova_magazine { union { long unsigned int size; struct iova_magazine *next; }; long unsigned int pfns[127]; }; struct iova_rcache { spinlock_t lock; unsigned int depot_size; struct iova_magazine *depot; struct iova_cpu_rcache *cpu_rcaches; struct iova_domain *iovad; struct delayed_work work; }; struct ip6_flowlabel { struct ip6_flowlabel *next; __be32 label; atomic_t users; struct in6_addr dst; struct ipv6_txoptions *opt; long unsigned int linger; struct callback_head rcu; u8 share; union { struct pid *pid; kuid_t uid; } owner; long unsigned int lastuse; long unsigned int expires; struct net *fl_net; }; struct ip6_frag_state { u8 *prevhdr; unsigned int hlen; unsigned int mtu; unsigned int left; int offset; int ptr; int hroom; int troom; __be32 frag_id; u8 nexthdr; }; struct ipv6hdr; struct ip6_fraglist_iter { struct ipv6hdr *tmp_hdr; struct sk_buff *frag; int offset; unsigned int hlen; __be32 frag_id; u8 nexthdr; }; struct ip6_mh { __u8 ip6mh_proto; __u8 ip6mh_hdrlen; __u8 ip6mh_type; __u8 ip6mh_reserved; __u16 ip6mh_cksum; __u8 data[0]; }; struct sockaddr_in6 { short unsigned int sin6_family; __be16 sin6_port; __be32 sin6_flowinfo; struct in6_addr sin6_addr; __u32 sin6_scope_id; }; struct ip6_mtuinfo { struct sockaddr_in6 ip6m_addr; __u32 ip6m_mtu; }; struct ip6_ra_chain { struct ip6_ra_chain *next; struct sock *sk; int sel; void (*destructor)(struct sock *); }; struct ip6_rt_info { struct in6_addr daddr; struct in6_addr saddr; u_int32_t mark; }; struct ip6_sf_list { struct ip6_sf_list *sf_next; struct in6_addr sf_addr; long unsigned int sf_count[2]; unsigned char sf_gsresp; unsigned char sf_oldin; unsigned char sf_crcount; struct callback_head rcu; }; struct ip6_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct callback_head rcu; struct in6_addr sl_addr[0]; }; struct ip6_tnl { struct ip6_tnl *next; struct net_device *dev; netdevice_tracker dev_tracker; struct net *net; struct __ip6_tnl_parm parms; struct flowi fl; struct dst_cache dst_cache; struct gro_cells gro_cells; int err_count; long unsigned int err_time; __u32 i_seqno; atomic_t o_seqno; int hlen; int tun_hlen; int encap_hlen; struct ip_tunnel_encap encap; int mlink; }; struct ip6_tnl_encap_ops { size_t (*encap_hlen)(struct ip_tunnel_encap *); int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); }; struct ip6_tnl_net { struct net_device *fb_tnl_dev; struct ip6_tnl *tnls_r_l[32]; struct ip6_tnl *tnls_wc[1]; struct ip6_tnl **tnls[2]; struct ip6_tnl *collect_md_tun; }; struct ip6_tnl_parm { char name[16]; int link; __u8 proto; __u8 encap_limit; __u8 hop_limit; __be32 flowinfo; __u32 flags; struct in6_addr laddr; struct in6_addr raddr; }; struct ip6_tnl_parm2 { char name[16]; int link; __u8 proto; __u8 encap_limit; __u8 hop_limit; __be32 flowinfo; __u32 flags; struct in6_addr laddr; struct in6_addr raddr; __be16 i_flags; __be16 o_flags; __be32 i_key; __be32 o_key; }; struct ip6addrlbl_entry { struct in6_addr prefix; int prefixlen; int ifindex; int addrtype; u32 label; struct hlist_node list; struct callback_head rcu; }; struct ip6addrlbl_init_table { const struct in6_addr *prefix; int prefixlen; u32 label; }; struct ip6fl_iter_state { struct seq_net_private p; struct pid_namespace *pid_ns; int bucket; }; struct ip6gre_net { struct ip6_tnl *tunnels[128]; struct ip6_tnl *collect_md_tun; struct ip6_tnl *collect_md_tun_erspan; struct net_device *fb_tunnel_dev; }; struct ip6rd_flowi { struct flowi6 fl6; struct in6_addr gateway; }; struct ip6t_ip6 { struct in6_addr src; struct in6_addr dst; struct in6_addr smsk; struct in6_addr dmsk; char iniface[16]; char outiface[16]; unsigned char iniface_mask[16]; unsigned char outiface_mask[16]; __u16 proto; __u8 tos; __u8 flags; __u8 invflags; }; struct xt_counters { __u64 pcnt; __u64 bcnt; }; struct ip6t_entry { struct ip6t_ip6 ipv6; unsigned int nfcache; __u16 target_offset; __u16 next_offset; unsigned int comefrom; struct xt_counters counters; unsigned char elems[0]; }; struct xt_target; struct xt_entry_target { union { struct { __u16 target_size; char name[29]; __u8 revision; } user; struct { __u16 target_size; struct xt_target *target; } kernel; __u16 target_size; } u; unsigned char data[0]; }; struct xt_error_target { struct xt_entry_target target; char errorname[30]; }; struct ip6t_error { struct ip6t_entry entry; struct xt_error_target target; }; struct ip6t_get_entries { char name[32]; unsigned int size; struct ip6t_entry entrytable[0]; }; struct ip6t_getinfo { char name[32]; unsigned int valid_hooks; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int num_entries; unsigned int size; }; struct ip6t_icmp { __u8 type; __u8 code[2]; __u8 invflags; }; struct ip6t_replace { char name[32]; unsigned int valid_hooks; unsigned int num_entries; unsigned int size; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int num_counters; struct xt_counters *counters; struct ip6t_entry entries[0]; }; struct xt_standard_target { struct xt_entry_target target; int verdict; }; struct ip6t_standard { struct ip6t_entry entry; struct xt_standard_target target; }; struct ip_auth_hdr { __u8 nexthdr; __u8 hdrlen; __be16 reserved; __be32 spi; __be32 seq_no; __u8 auth_data[0]; }; struct ip_beet_phdr { __u8 nexthdr; __u8 hdrlen; __u8 padlen; __u8 reserved; }; struct ip_comp_hdr { __u8 nexthdr; __u8 flags; __be16 cpi; }; struct ip_conntrack_stat { unsigned int found; unsigned int invalid; unsigned int insert; unsigned int insert_failed; unsigned int clash_resolve; unsigned int drop; unsigned int early_drop; unsigned int error; unsigned int expect_new; unsigned int expect_create; unsigned int expect_delete; unsigned int search_restart; unsigned int chaintoolong; }; struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[2]; u8 init[2]; u8 last_dir; u8 flags; }; struct ip_ct_tcp_state { u_int32_t td_end; u_int32_t td_maxend; u_int32_t td_maxwin; u_int32_t td_maxack; u_int8_t td_scale; u_int8_t flags; }; struct ip_ct_tcp { struct ip_ct_tcp_state seen[2]; u_int8_t state; u_int8_t last_dir; u_int8_t retrans; u_int8_t last_index; u_int32_t last_seq; u_int32_t last_ack; u_int32_t last_end; u_int16_t last_win; u_int8_t last_wscale; u_int8_t last_flags; }; struct ip_esp_hdr { __be32 spi; __be32 seq_no; __u8 enc_data[0]; }; struct ip_frag_state { bool DF; unsigned int hlen; unsigned int ll_rs; unsigned int mtu; unsigned int left; int offset; int ptr; __be16 not_last_frag; }; struct iphdr; struct ip_fraglist_iter { struct sk_buff *frag; struct iphdr *iph; int offset; unsigned int hlen; }; struct ip_sf_list; struct ip_mc_list { struct in_device *interface; __be32 multiaddr; unsigned int sfmode; struct ip_sf_list *sources; struct ip_sf_list *tomb; long unsigned int sfcount[2]; union { struct ip_mc_list *next; struct ip_mc_list *next_rcu; }; struct ip_mc_list *next_hash; struct timer_list timer; int users; refcount_t refcnt; spinlock_t lock; char tm_running; char reporter; char unsolicit_count; char loaded; unsigned char gsquery; unsigned char crcount; struct callback_head rcu; }; struct ip_mreqn { struct in_addr imr_multiaddr; struct in_addr imr_address; int imr_ifindex; }; struct ip_sf_socklist; struct ip_mc_socklist { struct ip_mc_socklist *next_rcu; struct ip_mreqn multi; unsigned int sfmode; struct ip_sf_socklist *sflist; struct callback_head rcu; }; struct ip_mreq_source { __be32 imr_multiaddr; __be32 imr_interface; __be32 imr_sourceaddr; }; struct ip_msfilter { __be32 imsf_multiaddr; __be32 imsf_interface; __u32 imsf_fmode; __u32 imsf_numsrc; union { __be32 imsf_slist[1]; struct { struct {} __empty_imsf_slist_flex; __be32 imsf_slist_flex[0]; }; }; }; struct ip_ra_chain { struct ip_ra_chain *next; struct sock *sk; union { void (*destructor)(struct sock *); struct sock *saved_sk; }; struct callback_head rcu; }; struct kvec { void *iov_base; size_t iov_len; }; struct ip_reply_arg { struct kvec iov[1]; int flags; __wsum csum; int csumoffset; int bound_dev_if; u8 tos; kuid_t uid; }; struct ip_rt_info { __be32 daddr; __be32 saddr; u_int8_t tos; u_int32_t mark; }; struct ip_sf_list { struct ip_sf_list *sf_next; long unsigned int sf_count[2]; __be32 sf_inaddr; unsigned char sf_gsresp; unsigned char sf_oldin; unsigned char sf_crcount; }; struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct callback_head rcu; __be32 sl_addr[0]; }; struct iphdr { __u8 ihl: 4; __u8 version: 4; __u8 tos; __be16 tot_len; __be16 id; __be16 frag_off; __u8 ttl; __u8 protocol; __sum16 check; union { struct { __be32 saddr; __be32 daddr; }; struct { __be32 saddr; __be32 daddr; } addrs; }; }; struct ip_tunnel_parm_kern { char name[16]; long unsigned int i_flags[1]; long unsigned int o_flags[1]; __be32 i_key; __be32 o_key; int link; struct iphdr iph; }; struct ip_tunnel_prl_entry; struct ip_tunnel { struct ip_tunnel *next; struct hlist_node hash_node; struct net_device *dev; netdevice_tracker dev_tracker; struct net *net; long unsigned int err_time; int err_count; u32 i_seqno; atomic_t o_seqno; int tun_hlen; u32 index; u8 erspan_ver; u8 dir; u16 hwid; struct dst_cache dst_cache; struct ip_tunnel_parm_kern parms; int mlink; int encap_hlen; int hlen; struct ip_tunnel_encap encap; struct ip_tunnel_prl_entry *prl; unsigned int prl_count; unsigned int ip_tnl_net_id; struct gro_cells gro_cells; __u32 fwmark; bool collect_md; bool ignore_df; }; struct ip_tunnel_encap_ops { size_t (*encap_hlen)(struct ip_tunnel_encap *); int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); int (*err_handler)(struct sk_buff *, u32); }; struct rtnl_link_ops; struct ip_tunnel_net { struct net_device *fb_tunnel_dev; struct rtnl_link_ops *rtnl_link_ops; struct hlist_head tunnels[128]; struct ip_tunnel *collect_md_tun; int type; }; struct ip_tunnel_parm { char name[16]; int link; __be16 i_flags; __be16 o_flags; __be32 i_key; __be32 o_key; struct iphdr iph; }; struct ip_tunnel_prl { __be32 addr; __u16 flags; __u16 __reserved; __u32 datalen; __u32 __reserved2; }; struct ip_tunnel_prl_entry { struct ip_tunnel_prl_entry *next; __be32 addr; u16 flags; struct callback_head callback_head; }; struct ipc64_perm { __kernel_key_t key; __kernel_uid32_t uid; __kernel_gid32_t gid; __kernel_uid32_t cuid; __kernel_gid32_t cgid; __kernel_mode_t mode; unsigned char __pad1[0]; short unsigned int seq; short unsigned int __pad2; __kernel_ulong_t __unused1; __kernel_ulong_t __unused2; }; struct ipc_ids { int in_use; short unsigned int seq; struct rw_semaphore rwsem; struct idr ipcs_idr; int max_idx; int last_idx; struct rhashtable key_ht; }; struct ipc_namespace { struct ipc_ids ids[3]; int sem_ctls[4]; int used_sems; unsigned int msg_ctlmax; unsigned int msg_ctlmnb; unsigned int msg_ctlmni; struct percpu_counter percpu_msg_bytes; struct percpu_counter percpu_msg_hdrs; size_t shm_ctlmax; size_t shm_ctlall; long unsigned int shm_tot; int shm_ctlmni; int shm_rmid_forced; struct notifier_block ipcns_nb; struct vfsmount *mq_mnt; unsigned int mq_queues_count; unsigned int mq_queues_max; unsigned int mq_msg_max; unsigned int mq_msgsize_max; unsigned int mq_msg_default; unsigned int mq_msgsize_default; struct ctl_table_set mq_set; struct ctl_table_header *mq_sysctls; struct ctl_table_set ipc_set; struct ctl_table_header *ipc_sysctls; struct user_namespace *user_ns; struct ucounts *ucounts; struct llist_node mnt_llist; struct ns_common ns; }; struct ipc_params; struct kern_ipc_perm; struct ipc_ops { int (*getnew)(struct ipc_namespace *, struct ipc_params *); int (*associate)(struct kern_ipc_perm *, int); int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); }; struct ipc_params { key_t key; int flg; union { size_t size; int nsems; } u; }; struct ipc_perm { __kernel_key_t key; __kernel_uid_t uid; __kernel_gid_t gid; __kernel_uid_t cuid; __kernel_gid_t cgid; __kernel_mode_t mode; short unsigned int seq; }; struct ipc_proc_iface { const char *path; const char *header; int ids; int (*show)(struct seq_file *, void *); }; struct ipc_proc_iter { struct ipc_namespace *ns; struct pid_namespace *pid_ns; struct ipc_proc_iface *iface; }; struct ipc_security_struct { u16 sclass; u32 sid; }; struct sockcm_cookie { u64 transmit_time; u32 mark; u32 tsflags; }; struct ipcm6_cookie { struct sockcm_cookie sockc; __s16 hlimit; __s16 tclass; __u16 gso_size; __s8 dontfrag; struct ipv6_txoptions *opt; }; struct ipcm_cookie { struct sockcm_cookie sockc; __be32 addr; int oif; struct ip_options_rcu *opt; __u8 protocol; __u8 ttl; __s16 tos; char priority; __u16 gso_size; }; struct ipfrag_skb_cb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; }; struct sk_buff *next_frag; int frag_run_len; int ip_defrag_offset; }; struct ipq { struct inet_frag_queue q; u8 ecn; u16 max_df_size; int iif; unsigned int rid; struct inet_peer *peer; }; struct ipstats_mib { u64 mibs[38]; struct u64_stats_sync syncp; }; struct ipt_ip { struct in_addr src; struct in_addr dst; struct in_addr smsk; struct in_addr dmsk; char iniface[16]; char outiface[16]; unsigned char iniface_mask[16]; unsigned char outiface_mask[16]; __u16 proto; __u8 flags; __u8 invflags; }; struct ipt_entry { struct ipt_ip ip; unsigned int nfcache; __u16 target_offset; __u16 next_offset; unsigned int comefrom; struct xt_counters counters; unsigned char elems[0]; }; struct ipt_error { struct ipt_entry entry; struct xt_error_target target; }; struct ipt_get_entries { char name[32]; unsigned int size; struct ipt_entry entrytable[0]; }; struct ipt_getinfo { char name[32]; unsigned int valid_hooks; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int num_entries; unsigned int size; }; struct ipt_icmp { __u8 type; __u8 code[2]; __u8 invflags; }; struct ipt_replace { char name[32]; unsigned int valid_hooks; unsigned int num_entries; unsigned int size; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int num_counters; struct xt_counters *counters; struct ipt_entry entries[0]; }; struct ipt_standard { struct ipt_entry entry; struct xt_standard_target target; }; struct ipv6_ac_socklist { struct in6_addr acl_addr; int acl_ifindex; struct ipv6_ac_socklist *acl_next; }; struct udp_table; struct ipv6_bpf_stub { int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32); struct sock * (*udp6_lib_lookup)(const struct net *, const struct in6_addr *, __be16, const struct in6_addr *, __be16, int, int, struct udp_table *, struct sk_buff *); int (*ipv6_setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); int (*ipv6_getsockopt)(struct sock *, int, int, sockptr_t, sockptr_t); int (*ipv6_dev_get_saddr)(struct net *, const struct net_device *, const struct in6_addr *, unsigned int, struct in6_addr *); }; struct ipv6_destopt_hao { __u8 type; __u8 length; struct in6_addr addr; } __attribute__((packed)); struct ipv6_fl_socklist { struct ipv6_fl_socklist *next; struct ip6_flowlabel *fl; struct callback_head rcu; }; struct ipv6_mc_socklist { struct in6_addr addr; int ifindex; unsigned int sfmode; struct ipv6_mc_socklist *next; struct ip6_sf_socklist *sflist; struct callback_head rcu; }; struct ipv6_mreq { struct in6_addr ipv6mr_multiaddr; int ipv6mr_ifindex; }; struct ipv6_opt_hdr { __u8 nexthdr; __u8 hdrlen; }; struct ipv6_params { __s32 disable_ipv6; __s32 autoconf; }; struct ipv6_pinfo { struct in6_addr saddr; struct in6_pktinfo sticky_pktinfo; const struct in6_addr *daddr_cache; const struct in6_addr *saddr_cache; __be32 flow_label; __u32 frag_size; s16 hop_limit; u8 mcast_hops; int ucast_oif; int mcast_oif; union { struct { __u16 srcrt: 1; __u16 osrcrt: 1; __u16 rxinfo: 1; __u16 rxoinfo: 1; __u16 rxhlim: 1; __u16 rxohlim: 1; __u16 hopopts: 1; __u16 ohopopts: 1; __u16 dstopts: 1; __u16 odstopts: 1; __u16 rxflow: 1; __u16 rxtclass: 1; __u16 rxpmtu: 1; __u16 rxorigdstaddr: 1; __u16 recvfragsize: 1; } bits; __u16 all; } rxopt; __u8 srcprefs; __u8 pmtudisc; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; __u32 dst_cookie; struct ipv6_mc_socklist *ipv6_mc_list; struct ipv6_ac_socklist *ipv6_ac_list; struct ipv6_fl_socklist *ipv6_fl_list; struct ipv6_txoptions *opt; struct sk_buff *pktoptions; struct sk_buff *rxpmtu; struct inet6_cork cork; }; struct ipv6_route_iter { struct seq_net_private p; struct fib6_walker w; loff_t skip; struct fib6_table *tbl; int sernum; }; struct ipv6_rpl_sr_hdr { __u8 nexthdr; __u8 hdrlen; __u8 type; __u8 segments_left; __u32 cmpre: 4; __u32 cmpri: 4; __u32 reserved: 4; __u32 pad: 4; __u32 reserved1: 16; union { struct { struct {} __empty_addr; struct in6_addr addr[0]; }; struct { struct {} __empty_data; __u8 data[0]; }; } segments; }; struct ipv6_rt_hdr { __u8 nexthdr; __u8 hdrlen; __u8 type; __u8 segments_left; }; struct ipv6_saddr_dst { const struct in6_addr *addr; int ifindex; int scope; int label; unsigned int prefs; }; struct ipv6_saddr_score { int rule; int addr_type; struct inet6_ifaddr *ifa; long unsigned int scorebits[1]; int scopedist; int matchlen; }; struct ipv6_sr_hdr { __u8 nexthdr; __u8 hdrlen; __u8 type; __u8 segments_left; __u8 first_segment; __u8 flags; __u16 tag; struct in6_addr segments[0]; }; struct neigh_table; struct ipv6_stub { int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *); int (*ipv6_route_input)(struct sk_buff *); struct fib6_table * (*fib6_get_table)(struct net *, u32); int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int); int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, struct fib6_result *, int); void (*fib6_select_path)(const struct net *, struct fib6_result *, struct flowi6 *, int, bool, const struct sk_buff *, int); u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, const struct in6_addr *); int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, struct netlink_ext_ack *); void (*fib6_nh_release)(struct fib6_nh *); void (*fib6_nh_release_dsts)(struct fib6_nh *); void (*fib6_update_sernum)(struct net *, struct fib6_info *); int (*ip6_del_rt)(struct net *, struct fib6_info *, bool); void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *); void (*udpv6_encap_enable)(void); void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool); void (*xfrm6_local_rxpmtu)(struct sk_buff *, u32); int (*xfrm6_udp_encap_rcv)(struct sock *, struct sk_buff *); struct sk_buff * (*xfrm6_gro_udp_encap_rcv)(struct sock *, struct list_head *, struct sk_buff *); int (*xfrm6_rcv_encap)(struct sk_buff *, int, __be32, int); struct neigh_table *nd_tbl; int (*ipv6_fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); struct net_device * (*ipv6_dev_find)(struct net *, const struct in6_addr *, struct net_device *); int (*ip6_xmit)(const struct sock *, struct sk_buff *, struct flowi6 *, __u32, struct ipv6_txoptions *, int, u32); }; struct ipv6_txoptions { refcount_t refcnt; int tot_len; __u16 opt_flen; __u16 opt_nflen; struct ipv6_opt_hdr *hopopt; struct ipv6_opt_hdr *dst0opt; struct ipv6_rt_hdr *srcrt; struct ipv6_opt_hdr *dst1opt; struct callback_head rcu; }; struct ipv6_tel_txoption { struct ipv6_txoptions ops; __u8 dst_opt[8]; }; struct ipv6_tlv_tnl_enc_lim { __u8 type; __u8 length; __u8 encap_limit; }; struct ipv6hdr { __u8 priority: 4; __u8 version: 4; __u8 flow_lbl[3]; __be16 payload_len; __u8 nexthdr; __u8 hop_limit; union { struct { struct in6_addr saddr; struct in6_addr daddr; }; struct { struct in6_addr saddr; struct in6_addr daddr; } addrs; }; }; struct ir_raw_event { union { u32 duration; u32 carrier; }; u8 duty_cycle; unsigned int pulse: 1; unsigned int overflow: 1; unsigned int timeout: 1; unsigned int carrier_report: 1; }; struct rc_dev; struct ir_raw_event_ctrl { struct list_head list; struct task_struct *thread; struct { union { struct __kfifo kfifo; struct ir_raw_event *type; const struct ir_raw_event *const_type; char (*rectype)[0]; struct ir_raw_event *ptr; const struct ir_raw_event *ptr_const; }; struct ir_raw_event buf[512]; } kfifo; ktime_t last_event; struct rc_dev *dev; spinlock_t edge_spinlock; struct timer_list edge_handle; struct ir_raw_event prev_ev; struct ir_raw_event this_ev; u32 bpf_sample; struct bpf_prog_array *progs; }; struct ir_raw_handler { struct list_head list; u64 protocols; int (*decode)(struct rc_dev *, struct ir_raw_event); int (*encode)(enum rc_proto, u32, struct ir_raw_event *, unsigned int); u32 carrier; u32 min_timeout; int (*raw_register)(struct rc_dev *); int (*raw_unregister)(struct rc_dev *); }; struct ir_raw_timings_manchester { unsigned int leader_pulse; unsigned int leader_space; unsigned int clock; unsigned int invert: 1; unsigned int trailer_space; }; struct ir_raw_timings_pd { unsigned int header_pulse; unsigned int header_space; unsigned int bit_pulse; unsigned int bit_space[2]; unsigned int trailer_pulse; unsigned int trailer_space; unsigned int msb_first: 1; }; struct ir_raw_timings_pl { unsigned int header_pulse; unsigned int bit_space; unsigned int bit_pulse[2]; unsigned int trailer_space; unsigned int msb_first: 1; }; struct irq_affinity { unsigned int pre_vectors; unsigned int post_vectors; unsigned int nr_sets; unsigned int set_size[4]; void (*calc_sets)(struct irq_affinity *, unsigned int); void *priv; }; struct irq_affinity_desc { struct cpumask mask; unsigned int is_managed: 1; }; struct irq_affinity_devres { unsigned int count; unsigned int irq[0]; }; struct irq_affinity_notify { unsigned int irq; struct kref kref; struct work_struct work; void (*notify)(struct irq_affinity_notify *, const cpumask_t *); void (*release)(struct kref *); }; struct uv_alloc_info { int limit; int blade; long unsigned int offset; char *name; }; struct msi_desc; struct irq_alloc_info { enum irq_alloc_type type; u32 flags; u32 devid; irq_hw_number_t hwirq; const struct cpumask *mask; struct msi_desc *desc; void *data; union { struct ioapic_alloc_info ioapic; struct uv_alloc_info uv; }; }; typedef struct irq_alloc_info msi_alloc_info_t; struct irq_data; struct msi_msg; struct irq_chip { const char *name; unsigned int (*irq_startup)(struct irq_data *); void (*irq_shutdown)(struct irq_data *); void (*irq_enable)(struct irq_data *); void (*irq_disable)(struct irq_data *); void (*irq_ack)(struct irq_data *); void (*irq_mask)(struct irq_data *); void (*irq_mask_ack)(struct irq_data *); void (*irq_unmask)(struct irq_data *); void (*irq_eoi)(struct irq_data *); int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); int (*irq_retrigger)(struct irq_data *); int (*irq_set_type)(struct irq_data *, unsigned int); int (*irq_set_wake)(struct irq_data *, unsigned int); void (*irq_bus_lock)(struct irq_data *); void (*irq_bus_sync_unlock)(struct irq_data *); void (*irq_suspend)(struct irq_data *); void (*irq_resume)(struct irq_data *); void (*irq_pm_shutdown)(struct irq_data *); void (*irq_calc_mask)(struct irq_data *); void (*irq_print_chip)(struct irq_data *, struct seq_file *); int (*irq_request_resources)(struct irq_data *); void (*irq_release_resources)(struct irq_data *); void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); int (*irq_set_vcpu_affinity)(struct irq_data *, void *); void (*ipi_send_single)(struct irq_data *, unsigned int); void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); int (*irq_nmi_setup)(struct irq_data *); void (*irq_nmi_teardown)(struct irq_data *); long unsigned int flags; }; struct irq_chip_regs { long unsigned int enable; long unsigned int disable; long unsigned int mask; long unsigned int ack; long unsigned int eoi; long unsigned int type; }; struct irq_desc; typedef void (*irq_flow_handler_t)(struct irq_desc *); struct irq_chip_type { struct irq_chip chip; struct irq_chip_regs regs; irq_flow_handler_t handler; u32 type; u32 mask_cache_priv; u32 *mask_cache; }; struct irq_chip_generic { raw_spinlock_t lock; void *reg_base; u32 (*reg_readl)(void *); void (*reg_writel)(u32, void *); void (*suspend)(struct irq_chip_generic *); void (*resume)(struct irq_chip_generic *); unsigned int irq_base; unsigned int irq_cnt; u32 mask_cache; u32 wake_enabled; u32 wake_active; unsigned int num_ct; void *private; long unsigned int installed; long unsigned int unused; struct irq_domain *domain; struct list_head list; struct irq_chip_type chip_types[0]; }; struct irq_common_data { unsigned int state_use_accessors; unsigned int node; void *handler_data; struct msi_desc *msi_desc; cpumask_var_t affinity; cpumask_var_t effective_affinity; }; struct irq_data { u32 mask; unsigned int irq; irq_hw_number_t hwirq; struct irq_common_data *common; struct irq_chip *chip; struct irq_domain *domain; struct irq_data *parent_data; void *chip_data; }; struct irqstat; struct irqaction; struct irq_desc { struct irq_common_data irq_common_data; struct irq_data irq_data; struct irqstat *kstat_irqs; irq_flow_handler_t handle_irq; struct irqaction *action; unsigned int status_use_accessors; unsigned int core_internal_state__do_not_mess_with_it; unsigned int depth; unsigned int wake_depth; unsigned int tot_count; unsigned int irq_count; long unsigned int last_unhandled; unsigned int irqs_unhandled; atomic_t threads_handled; int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; const struct cpumask *percpu_affinity; const struct cpumask *affinity_hint; struct irq_affinity_notify *affinity_notify; cpumask_var_t pending_mask; long unsigned int threads_oneshot; atomic_t threads_active; wait_queue_head_t wait_for_threads; unsigned int nr_actions; unsigned int no_suspend_depth; unsigned int cond_suspend_depth; unsigned int force_resume_depth; struct proc_dir_entry *dir; struct callback_head rcu; struct kobject kobj; struct mutex request_mutex; int parent_irq; struct module *owner; const char *name; struct hlist_node resend_node; long: 64; long: 64; long: 64; }; typedef struct irq_desc *vector_irq_t[256]; struct irq_desc_devres { unsigned int from; unsigned int cnt; }; struct irq_devres { unsigned int irq; void *dev_id; }; struct irq_domain_chip_generic; struct msi_parent_ops; struct irq_domain { struct list_head link; const char *name; const struct irq_domain_ops *ops; void *host_data; unsigned int flags; unsigned int mapcount; struct mutex mutex; struct irq_domain *root; struct fwnode_handle *fwnode; enum irq_domain_bus_token bus_token; struct irq_domain_chip_generic *gc; struct device *dev; struct device *pm_dev; struct irq_domain *parent; const struct msi_parent_ops *msi_parent_ops; void (*exit)(struct irq_domain *); irq_hw_number_t hwirq_max; unsigned int revmap_size; struct xarray revmap_tree; struct irq_data *revmap[0]; }; struct irq_domain_chip_generic { unsigned int irqs_per_chip; unsigned int num_chips; unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; void (*exit)(struct irq_chip_generic *); struct irq_chip_generic *gc[0]; }; struct irq_domain_chip_generic_info { const char *name; irq_flow_handler_t handler; unsigned int irqs_per_chip; unsigned int num_ct; unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; int (*init)(struct irq_chip_generic *); void (*exit)(struct irq_chip_generic *); }; struct irq_domain_info { struct fwnode_handle *fwnode; unsigned int domain_flags; unsigned int size; irq_hw_number_t hwirq_max; int direct_max; unsigned int hwirq_base; unsigned int virq_base; enum irq_domain_bus_token bus_token; const char *name_suffix; const struct irq_domain_ops *ops; void *host_data; struct irq_domain *parent; struct irq_domain_chip_generic_info *dgc_info; int (*init)(struct irq_domain *); void (*exit)(struct irq_domain *); }; struct irq_fwspec; struct irq_domain_ops { int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); void (*unmap)(struct irq_domain *, unsigned int); int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *); int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); void (*free)(struct irq_domain *, unsigned int, unsigned int); int (*activate)(struct irq_domain *, struct irq_data *, bool); void (*deactivate)(struct irq_domain *, struct irq_data *); int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *); }; struct irq_fwspec { struct fwnode_handle *fwnode; int param_count; u32 param[16]; }; struct irq_glue { struct irq_affinity_notify notify; struct cpu_rmap *rmap; u16 index; }; struct irq_info { u8 bus; u8 devfn; struct { u8 link; u16 bitmap; } __attribute__((packed)) irq[4]; u8 slot; u8 rfu; }; struct irq_info___2 { struct hlist_node node; int irq; spinlock_t lock; struct list_head *head; }; struct irq_matrix { unsigned int matrix_bits; unsigned int alloc_start; unsigned int alloc_end; unsigned int alloc_size; unsigned int global_available; unsigned int global_reserved; unsigned int systembits_inalloc; unsigned int total_allocated; unsigned int online_maps; struct cpumap *maps; long unsigned int *system_map; long unsigned int scratch_map[0]; }; struct irq_override_cmp { const struct dmi_system_id *system; unsigned char irq; unsigned char triggering; unsigned char polarity; unsigned char shareable; bool override; }; struct irq_pin_list { struct list_head list; int apic; int pin; }; struct irq_router { char *name; u16 vendor; u16 device; int (*get)(struct pci_dev *, struct pci_dev *, int); int (*set)(struct pci_dev *, struct pci_dev *, int, int); int (*lvl)(struct pci_dev *, struct pci_dev *, int, int); }; struct irq_router_handler { u16 vendor; int (*probe)(struct irq_router *, struct pci_dev *, u16); }; struct irq_routing_table { u32 signature; u16 version; u16 size; u8 rtr_bus; u8 rtr_devfn; u16 exclusive_irqs; u16 rtr_vendor; u16 rtr_device; u32 miniport_data; u8 rfu[11]; u8 checksum; struct irq_info slots[0]; }; struct irq_stack { char stack[16384]; }; typedef irqreturn_t (*irq_handler_t)(int, void *); struct irqaction { irq_handler_t handler; void *dev_id; void *percpu_dev_id; struct irqaction *next; irq_handler_t thread_fn; struct task_struct *thread; struct irqaction *secondary; unsigned int irq; unsigned int flags; long unsigned int thread_flags; long unsigned int thread_mask; const char *name; struct proc_dir_entry *dir; long: 64; long: 64; long: 64; long: 64; }; struct irqchip_fwid { struct fwnode_handle fwnode; unsigned int type; char *name; phys_addr_t *pa; }; struct irqentry_state { union { bool exit_rcu; bool lockdep; }; }; typedef struct irqentry_state irqentry_state_t; struct irqstat { unsigned int cnt; }; struct irqtrace_events { unsigned int irq_events; long unsigned int hardirq_enable_ip; long unsigned int hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; long unsigned int softirq_disable_ip; long unsigned int softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; }; struct irt_routing_table { u32 signature; u8 size; u8 used; u16 exclusive_irqs; struct irq_info slots[0]; }; struct isoch_data { u32 maxbw; u32 n; u32 y; u32 l; u32 rq; struct agp_3_5_dev *dev; }; struct iter_state { struct seq_net_private p; unsigned int bucket; }; struct iterators_bpf { struct bpf_loader_ctx ctx; struct { struct bpf_map_desc rodata; } maps; struct { struct bpf_prog_desc dump_bpf_map; struct bpf_prog_desc dump_bpf_prog; } progs; struct { int dump_bpf_map_fd; int dump_bpf_prog_fd; } links; }; struct itimerspec64 { struct timespec64 it_interval; struct timespec64 it_value; }; struct kobj_attribute { struct attribute attr; ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); }; struct iw_node_attr { struct kobj_attribute kobj_attr; int nid; }; struct transaction_s; typedef struct transaction_s transaction_t; struct jbd2_inode { transaction_t *i_transaction; transaction_t *i_next_transaction; struct list_head i_list; struct inode *i_vfs_inode; long unsigned int i_flags; loff_t i_dirty_start; loff_t i_dirty_end; }; struct jbd2_journal_block_tail { __be32 t_checksum; }; typedef struct journal_s journal_t; struct jbd2_journal_handle { union { transaction_t *h_transaction; journal_t *h_journal; }; handle_t *h_rsv_handle; int h_total_credits; int h_revoke_credits; int h_revoke_credits_requested; int h_ref; int h_err; unsigned int h_sync: 1; unsigned int h_jdata: 1; unsigned int h_reserved: 1; unsigned int h_aborted: 1; unsigned int h_type: 8; unsigned int h_line_no: 16; long unsigned int h_start_jiffies; unsigned int h_requested_credits; unsigned int saved_alloc_context; }; struct journal_header_s { __be32 h_magic; __be32 h_blocktype; __be32 h_sequence; }; typedef struct journal_header_s journal_header_t; struct jbd2_journal_revoke_header_s { journal_header_t r_header; __be32 r_count; }; typedef struct jbd2_journal_revoke_header_s jbd2_journal_revoke_header_t; struct jbd2_revoke_record_s { struct list_head hash; tid_t sequence; long long unsigned int blocknr; }; struct jbd2_revoke_table_s { int hash_size; int hash_shift; struct list_head *hash_table; }; struct transaction_stats_s; struct jbd2_stats_proc_session { journal_t *journal; struct transaction_stats_s *stats; int start; int max; }; struct jit_context { int cleanup_addr; int tail_call_direct_label; int tail_call_indirect_label; }; struct rand_data; struct shash_desc; struct jitterentropy { spinlock_t jent_lock; struct rand_data *entropy_collector; struct crypto_shash *tfm; struct shash_desc *sdesc; }; struct join_entry { u32 token; u32 remote_nonce; u32 local_nonce; u8 join_id; u8 local_id; u8 backup; u8 valid; }; struct journal_block_tag3_s { __be32 t_blocknr; __be32 t_flags; __be32 t_blocknr_high; __be32 t_checksum; }; typedef struct journal_block_tag3_s journal_block_tag3_t; struct journal_block_tag_s { __be32 t_blocknr; __be16 t_checksum; __be16 t_flags; __be32 t_blocknr_high; }; typedef struct journal_block_tag_s journal_block_tag_t; struct journal_head { struct buffer_head *b_bh; spinlock_t b_state_lock; int b_jcount; unsigned int b_jlist; unsigned int b_modified; char *b_frozen_data; char *b_committed_data; transaction_t *b_transaction; transaction_t *b_next_transaction; struct journal_head *b_tnext; struct journal_head *b_tprev; transaction_t *b_cp_transaction; struct journal_head *b_cpnext; struct journal_head *b_cpprev; struct jbd2_buffer_trigger_type *b_triggers; struct jbd2_buffer_trigger_type *b_frozen_triggers; }; struct transaction_run_stats_s { long unsigned int rs_wait; long unsigned int rs_request_delay; long unsigned int rs_running; long unsigned int rs_locked; long unsigned int rs_flushing; long unsigned int rs_logging; __u32 rs_handle_count; __u32 rs_blocks; __u32 rs_blocks_logged; }; struct transaction_stats_s { long unsigned int ts_tid; long unsigned int ts_requested; struct transaction_run_stats_s run; }; struct journal_superblock_s; typedef struct journal_superblock_s journal_superblock_t; struct journal_s { long unsigned int j_flags; int j_errno; struct mutex j_abort_mutex; struct buffer_head *j_sb_buffer; journal_superblock_t *j_superblock; rwlock_t j_state_lock; int j_barrier_count; struct mutex j_barrier; transaction_t *j_running_transaction; transaction_t *j_committing_transaction; transaction_t *j_checkpoint_transactions; wait_queue_head_t j_wait_transaction_locked; wait_queue_head_t j_wait_done_commit; wait_queue_head_t j_wait_commit; wait_queue_head_t j_wait_updates; wait_queue_head_t j_wait_reserved; wait_queue_head_t j_fc_wait; struct mutex j_checkpoint_mutex; struct buffer_head *j_chkpt_bhs[64]; struct shrinker *j_shrinker; struct percpu_counter j_checkpoint_jh_count; transaction_t *j_shrink_transaction; long unsigned int j_head; long unsigned int j_tail; long unsigned int j_free; long unsigned int j_first; long unsigned int j_last; long unsigned int j_fc_first; long unsigned int j_fc_off; long unsigned int j_fc_last; struct block_device *j_dev; int j_blocksize; long long unsigned int j_blk_offset; char j_devname[56]; struct block_device *j_fs_dev; errseq_t j_fs_dev_wb_err; unsigned int j_total_len; atomic_t j_reserved_credits; spinlock_t j_list_lock; struct inode *j_inode; tid_t j_tail_sequence; tid_t j_transaction_sequence; tid_t j_commit_sequence; tid_t j_commit_request; __u8 j_uuid[16]; struct task_struct *j_task; int j_max_transaction_buffers; int j_revoke_records_per_block; int j_transaction_overhead_buffers; long unsigned int j_commit_interval; struct timer_list j_commit_timer; spinlock_t j_revoke_lock; struct jbd2_revoke_table_s *j_revoke; struct jbd2_revoke_table_s *j_revoke_table[2]; struct buffer_head **j_wbuf; struct buffer_head **j_fc_wbuf; int j_wbufsize; int j_fc_wbufsize; pid_t j_last_sync_writer; u64 j_average_commit_time; u32 j_min_batch_time; u32 j_max_batch_time; void (*j_commit_callback)(journal_t *, transaction_t *); int (*j_submit_inode_data_buffers)(struct jbd2_inode *); int (*j_finish_inode_data_buffers)(struct jbd2_inode *); spinlock_t j_history_lock; struct proc_dir_entry *j_proc_entry; struct transaction_stats_s j_stats; unsigned int j_failed_commit; void *j_private; struct crypto_shash *j_chksum_driver; __u32 j_csum_seed; struct lockdep_map j_trans_commit_map; void (*j_fc_cleanup_callback)(struct journal_s *, int, tid_t); int (*j_fc_replay_callback)(struct journal_s *, struct buffer_head *, enum passtype, int, tid_t); int (*j_bmap)(struct journal_s *, sector_t *); }; struct journal_superblock_s { journal_header_t s_header; __be32 s_blocksize; __be32 s_maxlen; __be32 s_first; __be32 s_sequence; __be32 s_start; __be32 s_errno; __be32 s_feature_compat; __be32 s_feature_incompat; __be32 s_feature_ro_compat; __u8 s_uuid[16]; __be32 s_nr_users; __be32 s_dynsuper; __be32 s_max_transaction; __be32 s_max_trans_data; __u8 s_checksum_type; __u8 s_padding2[3]; __be32 s_num_fc_blks; __be32 s_head; __u32 s_padding[40]; __be32 s_checksum; __u8 s_users[768]; }; struct jump_entry { s32 code; s32 target; long int key; }; struct jump_label_patch { const void *code; int size; }; struct k_itimer; struct k_clock { int (*clock_getres)(const clockid_t, struct timespec64 *); int (*clock_set)(const clockid_t, const struct timespec64 *); int (*clock_get_timespec)(const clockid_t, struct timespec64 *); ktime_t (*clock_get_ktime)(const clockid_t); int (*clock_adj)(const clockid_t, struct __kernel_timex *); int (*timer_create)(struct k_itimer *); int (*nsleep)(const clockid_t, int, const struct timespec64 *); int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); int (*timer_del)(struct k_itimer *); void (*timer_get)(struct k_itimer *, struct itimerspec64 *); void (*timer_rearm)(struct k_itimer *); s64 (*timer_forward)(struct k_itimer *, ktime_t); ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); int (*timer_try_to_cancel)(struct k_itimer *); void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); void (*timer_wait_running)(struct k_itimer *); }; struct signal_struct; struct sigqueue; struct k_itimer { struct hlist_node list; struct hlist_node t_hash; spinlock_t it_lock; const struct k_clock *kclock; clockid_t it_clock; timer_t it_id; int it_active; s64 it_overrun; s64 it_overrun_last; int it_requeue_pending; int it_sigev_notify; ktime_t it_interval; struct signal_struct *it_signal; union { struct pid *it_pid; struct task_struct *it_process; }; struct sigqueue *sigq; union { struct { struct hrtimer timer; } real; struct cpu_timer cpu; struct { struct alarm alarmtimer; } alarm; } it; struct callback_head rcu; }; typedef void __signalfn_t(int); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; struct sigaction { __sighandler_t sa_handler; long unsigned int sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; }; struct k_sigaction { struct sigaction sa; }; struct kallsym_iter { loff_t pos; loff_t pos_mod_end; loff_t pos_ftrace_mod_end; loff_t pos_bpf_end; long unsigned int value; unsigned int nameoff; char type; char name[512]; char module_name[56]; int exported; int show_value; }; struct kallsyms_data { long unsigned int *addrs; const char **syms; size_t cnt; size_t found; }; struct karatsuba_ctx { struct karatsuba_ctx *next; mpi_ptr_t tspace; mpi_size_t tspace_size; mpi_ptr_t tp; mpi_size_t tp_size; }; struct kaslr_memory_region { long unsigned int *base; long unsigned int *end; long unsigned int size_tb; }; struct kbd_repeat { int delay; int period; }; struct kbd_struct { unsigned char lockstate; unsigned char slockstate; unsigned char ledmode: 1; unsigned char ledflagstate: 4; char: 3; unsigned char default_ledflagstate: 4; unsigned char kbdmode: 3; int: 1; unsigned char modeflags: 5; }; struct kbdiacr { unsigned char diacr; unsigned char base; unsigned char result; }; struct kbdiacrs { unsigned int kb_cnt; struct kbdiacr kbdiacr[256]; }; struct kbdiacruc { unsigned int diacr; unsigned int base; unsigned int result; }; struct kbdiacrsuc { unsigned int kb_cnt; struct kbdiacruc kbdiacruc[256]; }; struct kbentry { unsigned char kb_table; unsigned char kb_index; short unsigned int kb_value; }; struct kbkeycode { unsigned int scancode; unsigned int keycode; }; struct kbsentry { unsigned char kb_func; unsigned char kb_string[512]; }; struct kcore_list { struct list_head list; long unsigned int addr; size_t size; int type; }; struct kern_ipc_perm { spinlock_t lock; bool deleted; int id; key_t key; kuid_t uid; kgid_t gid; kuid_t cuid; kgid_t cgid; umode_t mode; long unsigned int seq; void *security; struct rhash_head khtnode; struct callback_head rcu; refcount_t refcount; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct kernel_clone_args { u64 flags; int *pidfd; int *child_tid; int *parent_tid; const char *name; int exit_signal; u32 kthread: 1; u32 io_thread: 1; u32 user_worker: 1; u32 no_files: 1; long unsigned int stack; long unsigned int stack_size; long unsigned int tls; pid_t *set_tid; size_t set_tid_size; int cgroup; int idle; int (*fn)(void *); void *fn_arg; struct cgroup *cgrp; struct css_set *cset; }; struct kernel_cpustat { u64 cpustat[10]; }; struct kernel_ethtool_ringparam { u32 rx_buf_len; u8 tcp_data_split; u8 tx_push; u8 rx_push; u32 cqe_size; u32 tx_push_buf_len; u32 tx_push_buf_max_len; }; struct kernel_ethtool_ts_info { u32 cmd; u32 so_timestamping; int phc_index; enum hwtstamp_tx_types tx_types; enum hwtstamp_rx_filters rx_filters; }; struct kernel_hwtstamp_config { int flags; int tx_type; int rx_filter; struct ifreq *ifr; bool copied_to_user; enum hwtstamp_source source; }; struct kernel_param_ops; struct kparam_string; struct kparam_array; struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union { void *arg; const struct kparam_string *str; const struct kparam_array *arr; }; }; struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); }; struct kernel_pkey_params { struct key *key; const char *encoding; const char *hash_algo; char *info; __u32 in_len; union { __u32 out_len; __u32 in2_len; }; enum kernel_pkey_operation op: 8; }; struct kernel_pkey_query { __u32 supported_ops; __u32 key_size; __u16 max_data_size; __u16 max_sig_size; __u16 max_enc_size; __u16 max_dec_size; }; struct kernel_siginfo { struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; }; }; struct kernel_stat { long unsigned int irqs_sum; unsigned int softirqs[10]; }; struct kernel_symbol { int value_offset; int name_offset; int namespace_offset; }; struct kernel_vm86_regs { struct pt_regs pt; short unsigned int es; short unsigned int __esh; short unsigned int ds; short unsigned int __dsh; short unsigned int fs; short unsigned int __fsh; short unsigned int gs; short unsigned int __gsh; }; struct kernfs_open_node; struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; }; struct kernfs_elem_dir { long unsigned int subdirs; struct rb_root children; struct kernfs_root *root; long unsigned int rev; }; struct kernfs_elem_symlink { struct kernfs_node *target_kn; }; struct kernfs_global_locks { struct mutex open_file_mutex[1024]; }; struct simple_xattrs { struct rb_root rb_root; rwlock_t lock; }; struct kernfs_iattrs { kuid_t ia_uid; kgid_t ia_gid; struct timespec64 ia_atime; struct timespec64 ia_mtime; struct timespec64 ia_ctime; struct simple_xattrs xattrs; atomic_t nr_user_xattrs; atomic_t user_xattr_size; }; struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; short unsigned int flags; umode_t mode; union { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; }; u64 id; void *priv; struct kernfs_iattrs *iattr; struct callback_head rcu; }; struct kernfs_open_file { struct kernfs_node *kn; struct file *file; struct seq_file *seq_file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped: 1; bool released: 1; const struct vm_operations_struct *vm_ops; }; struct kernfs_open_node { struct callback_head callback_head; atomic_t event; wait_queue_head_t poll; struct list_head files; unsigned int nr_mmapped; unsigned int nr_to_release; }; struct kernfs_ops { int (*open)(struct kernfs_open_file *); void (*release)(struct kernfs_open_file *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); loff_t (*llseek)(struct kernfs_open_file *, loff_t, int); }; struct kernfs_syscall_ops; struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct idr ino_idr; u32 last_id_lowbits; u32 id_highbits; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; struct rw_semaphore kernfs_rwsem; struct rw_semaphore kernfs_iattr_rwsem; struct rw_semaphore kernfs_supers_rwsem; struct callback_head rcu; }; struct kernfs_super_info { struct super_block *sb; struct kernfs_root *root; const void *ns; struct list_head node; }; struct kernfs_syscall_ops { int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); }; struct kexec_load_limit { struct mutex mutex; int limit; }; struct kexec_segment { union { void *buf; void *kbuf; }; size_t bufsz; long unsigned int mem; size_t memsz; }; struct key_type; struct key_tag; struct keyring_index_key { long unsigned int hash; union { struct { u16 desc_len; char desc[6]; }; long unsigned int x; }; struct key_type *type; struct key_tag *domain_tag; const char *description; }; union key_payload { void *rcu_data0; void *data[4]; }; struct key_user; struct key_restriction; struct key { refcount_t usage; key_serial_t serial; union { struct list_head graveyard_link; struct rb_node serial_node; }; struct rw_semaphore sem; struct key_user *user; void *security; union { time64_t expiry; time64_t revoked_at; }; time64_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; short unsigned int quotalen; short unsigned int datalen; short int state; long unsigned int flags; union { struct keyring_index_key index_key; struct { long unsigned int hash; long unsigned int len_desc; struct key_type *type; struct key_tag *domain_tag; char *description; }; }; union { union key_payload payload; struct { struct list_head name_link; struct assoc_array keys; }; }; struct key_restriction *restrict_link; }; struct key_match_data { bool (*cmp)(const struct key *, const struct key_match_data *); const void *raw_data; void *preparsed; unsigned int lookup_type; }; struct key_preparsed_payload { const char *orig_description; char *description; union key_payload payload; const void *data; size_t datalen; size_t quotalen; time64_t expiry; }; typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); struct key_restriction { key_restrict_link_func_t check; struct key *key; struct key_type *keytype; }; struct key_security_struct { u32 sid; }; struct key_tag { struct callback_head rcu; refcount_t usage; bool removed; }; typedef int (*request_key_actor_t)(struct key *, void *); struct key_type { const char *name; size_t def_datalen; unsigned int flags; int (*vet_description)(const char *); int (*preparse)(struct key_preparsed_payload *); void (*free_preparse)(struct key_preparsed_payload *); int (*instantiate)(struct key *, struct key_preparsed_payload *); int (*update)(struct key *, struct key_preparsed_payload *); int (*match_preparse)(struct key_match_data *); void (*match_free)(struct key_match_data *); void (*revoke)(struct key *); void (*destroy)(struct key *); void (*describe)(const struct key *, struct seq_file *); long int (*read)(const struct key *, char *, size_t); request_key_actor_t request_key; struct key_restriction * (*lookup_restriction)(const char *); int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); struct list_head link; struct lock_class_key lock_class; }; struct key_user { struct rb_node node; struct mutex cons_lock; spinlock_t lock; refcount_t usage; atomic_t nkeys; atomic_t nikeys; kuid_t uid; int qnkeys; int qnbytes; }; struct key_vector { t_key key; unsigned char pos; unsigned char bits; unsigned char slen; union { struct hlist_head leaf; struct { struct {} __empty_tnode; struct key_vector *tnode[0]; }; }; }; struct keyboard_notifier_param { struct vc_data *vc; int down; int shift; int ledstate; unsigned int value; }; struct keyctl_dh_params { union { __s32 private; __s32 priv; }; __s32 prime; __s32 base; }; struct keyctl_kdf_params { char *hashname; char *otherinfo; __u32 otherinfolen; __u32 __spare[8]; }; struct keyctl_pkey_params { __s32 key_id; __u32 in_len; union { __u32 out_len; __u32 in2_len; }; __u32 __spare[7]; }; struct keyctl_pkey_query { __u32 supported_ops; __u32 key_size; __u16 max_data_size; __u16 max_sig_size; __u16 max_enc_size; __u16 max_dec_size; __u32 __spare[10]; }; struct keyring_read_iterator_context { size_t buflen; size_t count; key_serial_t *buffer; }; struct __key_reference_with_attributes; typedef struct __key_reference_with_attributes *key_ref_t; struct keyring_search_context { struct keyring_index_key index_key; const struct cred *cred; struct key_match_data match_data; unsigned int flags; int (*iterator)(const void *, void *); int skipped_ret; bool possessed; key_ref_t result; time64_t now; }; struct rcu_gp_oldstate { long unsigned int rgos_norm; long unsigned int rgos_exp; }; struct kfree_rcu_cpu; struct kfree_rcu_cpu_work { struct rcu_work rcu_work; struct callback_head *head_free; struct rcu_gp_oldstate head_free_gp_snap; struct list_head bulk_head_free[2]; struct kfree_rcu_cpu *krcp; }; struct kfree_rcu_cpu { struct callback_head *head; long unsigned int head_gp_snap; atomic_t head_count; struct list_head bulk_head[2]; atomic_t bulk_count[2]; struct kfree_rcu_cpu_work krw_arr[2]; raw_spinlock_t lock; struct delayed_work monitor_work; bool initialized; struct delayed_work page_cache_work; atomic_t backoff_page_cache_fill; atomic_t work_in_progress; struct hrtimer hrtimer; struct llist_head bkvcache; int nr_bkv_objs; }; struct mm_slot { struct hlist_node hash; struct list_head mm_node; struct mm_struct *mm; }; struct khugepaged_mm_slot { struct mm_slot slot; }; struct khugepaged_scan { struct list_head mm_head; struct khugepaged_mm_slot *mm_slot; long unsigned int address; }; struct kimage_arch { p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; }; struct kimage { kimage_entry_t head; kimage_entry_t *entry; kimage_entry_t *last_entry; long unsigned int start; struct page *control_code_page; struct page *swap_page; void *vmcoreinfo_data_copy; long unsigned int nr_segments; struct kexec_segment segment[16]; struct list_head control_pages; struct list_head dest_pages; struct list_head unusable_pages; long unsigned int control_page; unsigned int type: 1; unsigned int preserve_context: 1; unsigned int file_mode: 1; unsigned int hotplug_support: 1; struct kimage_arch arch; int hp_action; int elfcorehdr_index; bool elfcorehdr_updated; void *elf_headers; long unsigned int elf_headers_sz; long unsigned int elf_load_addr; }; struct kioctx_cpu; struct kioctx { struct percpu_ref users; atomic_t dead; struct percpu_ref reqs; long unsigned int user_id; struct kioctx_cpu *cpu; unsigned int req_batch; unsigned int max_reqs; unsigned int nr_events; long unsigned int mmap_base; long unsigned int mmap_size; struct folio **ring_folios; long int nr_pages; struct rcu_work free_rwork; struct ctx_rq_wait *rq_wait; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct { atomic_t reqs_available; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct { spinlock_t ctx_lock; struct list_head active_reqs; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct { struct mutex ring_lock; wait_queue_head_t wait; long: 64; long: 64; long: 64; long: 64; }; struct { unsigned int tail; unsigned int completed_events; spinlock_t completion_lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct folio *internal_folios[8]; struct file *aio_ring_file; unsigned int id; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct kioctx_cpu { unsigned int reqs_available; }; struct kioctx_table { struct callback_head rcu; unsigned int nr; struct kioctx *table[0]; }; struct klist_waiter { struct list_head list; struct klist_node *node; struct task_struct *process; int woken; }; struct km_event { union { u32 hard; u32 proto; u32 byid; u32 aevent; u32 type; } data; u32 seq; u32 portid; u32 event; struct net *net; }; struct kmalloc_info_struct { const char *name[4]; unsigned int size; }; struct kmalloced_param { struct list_head list; char val[0]; }; struct kmap_ctrl {}; typedef struct kmem_cache *kmem_buckets[14]; struct kmem_cache_order_objects { unsigned int x; }; struct kmem_cache_cpu; struct kmem_cache_node; struct kmem_cache { struct kmem_cache_cpu *cpu_slab; slab_flags_t flags; long unsigned int min_partial; unsigned int size; unsigned int object_size; struct reciprocal_value reciprocal_size; unsigned int offset; unsigned int cpu_partial; unsigned int cpu_partial_slabs; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); unsigned int inuse; unsigned int align; unsigned int red_left_pad; const char *name; struct list_head list; struct kobject kobj; unsigned int remote_node_defrag_ratio; struct kmem_cache_node *node[64]; }; struct kmem_cache_args { unsigned int align; unsigned int useroffset; unsigned int usersize; unsigned int freeptr_offset; bool use_freeptr_offset; void (*ctor)(void *); }; struct kmem_cache_cpu { union { struct { void **freelist; long unsigned int tid; }; freelist_aba_t freelist_tid; }; struct slab *slab; struct slab *partial; local_lock_t lock; }; struct kmem_cache_node { spinlock_t list_lock; long unsigned int nr_partial; struct list_head partial; atomic_long_t nr_slabs; atomic_long_t total_objects; struct list_head full; }; struct kmem_obj_info { void *kp_ptr; struct slab *kp_slab; void *kp_objp; long unsigned int kp_data_offset; struct kmem_cache *kp_slab_cache; void *kp_ret; void *kp_stack[16]; void *kp_free_stack[16]; }; struct kmsg_dump_detail { enum kmsg_dump_reason reason; const char *description; }; struct kmsg_dump_iter { u64 cur_seq; u64 next_seq; }; struct kmsg_dumper { struct list_head list; void (*dump)(struct kmsg_dumper *, struct kmsg_dump_detail *); enum kmsg_dump_reason max_reason; bool registered; }; struct probe; struct kobj_map { struct probe *probes[255]; struct mutex *lock; }; struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(void); void * (*grab_current_ns)(void); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(void); void (*drop_ns)(void *); }; struct kobj_uevent_env { char *argv[3]; char *envp[64]; int envp_idx; char buf[2048]; int buflen; }; struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; }; struct kparam_string { unsigned int maxlen; char *string; }; struct kpp_request; struct kpp_alg { int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); int (*generate_public_key)(struct kpp_request *); int (*compute_shared_secret)(struct kpp_request *); unsigned int (*max_size)(struct crypto_kpp *); int (*init)(struct crypto_kpp *); void (*exit)(struct crypto_kpp *); struct crypto_alg base; }; struct kpp_instance { void (*free)(struct kpp_instance *); union { struct { char head[48]; struct crypto_instance base; } s; struct kpp_alg alg; }; }; struct kpp_request { struct crypto_async_request base; struct scatterlist *src; struct scatterlist *dst; unsigned int src_len; unsigned int dst_len; void *__ctx[0]; }; struct kpp_secret { short unsigned int type; short unsigned int len; }; struct kprobe_blacklist_entry { struct list_head list; long unsigned int start_addr; long unsigned int end_addr; }; struct prev_kprobe { struct kprobe *kp; long unsigned int status; long unsigned int old_flags; long unsigned int saved_flags; }; struct kprobe_ctlblk { long unsigned int kprobe_status; long unsigned int kprobe_old_flags; long unsigned int kprobe_saved_flags; struct prev_kprobe prev_kprobe; }; struct kprobe_insn_cache { struct mutex mutex; void * (*alloc)(void); void (*free)(void *); const char *sym; struct list_head pages; size_t insn_size; int nr_garbage; }; struct kprobe_insn_page { struct list_head list; kprobe_opcode_t *insns; struct kprobe_insn_cache *cache; int nused; int ngarbage; char slot_used[0]; }; struct kprobe_trace_entry_head { struct trace_entry ent; long unsigned int ip; }; struct kretprobe_instance; typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); struct kretprobe { struct kprobe kp; kretprobe_handler_t handler; kretprobe_handler_t entry_handler; int maxactive; int nmissed; size_t data_size; struct rethook *rh; }; struct kretprobe_blackpoint { const char *name; void *addr; }; struct kretprobe_instance { struct rethook_node node; char data[0]; }; struct kretprobe_trace_entry_head { struct trace_entry ent; long unsigned int func; long unsigned int ret_ip; }; struct kset_uevent_ops; struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; }; struct kset_uevent_ops { int (* const filter)(const struct kobject *); const char * (* const name)(const struct kobject *); int (* const uevent)(const struct kobject *, struct kobj_uevent_env *); }; struct ksignal { struct k_sigaction ka; kernel_siginfo_t info; int sig; }; struct ksm_rmap_item; struct ksm_mm_slot { struct mm_slot slot; struct ksm_rmap_item *rmap_list; }; struct ksm_stable_node; struct ksm_rmap_item { struct ksm_rmap_item *rmap_list; union { struct anon_vma *anon_vma; int nid; }; struct mm_struct *mm; long unsigned int address; unsigned int oldchecksum; rmap_age_t age; rmap_age_t remaining_skips; union { struct rb_node node; struct { struct ksm_stable_node *head; struct hlist_node hlist; }; }; }; struct ksm_scan { struct ksm_mm_slot *mm_slot; long unsigned int address; struct ksm_rmap_item **rmap_list; long unsigned int seqnr; }; struct ksm_stable_node { union { struct rb_node node; struct { struct list_head *head; struct { struct hlist_node hlist_dup; struct list_head list; }; }; }; struct hlist_head hlist; union { long unsigned int kpfn; long unsigned int chain_prune_time; }; int rmap_hlist_len; int nid; }; struct kstat { u32 result_mask; umode_t mode; unsigned int nlink; uint32_t blksize; u64 attributes; u64 attributes_mask; u64 ino; dev_t dev; dev_t rdev; kuid_t uid; kgid_t gid; loff_t size; struct timespec64 atime; struct timespec64 mtime; struct timespec64 ctime; struct timespec64 btime; u64 blocks; u64 mnt_id; u32 dio_mem_align; u32 dio_offset_align; u64 change_cookie; u64 subvol; u32 atomic_write_unit_min; u32 atomic_write_unit_max; u32 atomic_write_segments_max; }; struct kstatfs { long int f_type; long int f_bsize; u64 f_blocks; u64 f_bfree; u64 f_bavail; u64 f_files; u64 f_ffree; __kernel_fsid_t f_fsid; long int f_namelen; long int f_frsize; long int f_flags; long int f_spare[4]; }; struct statmount { __u32 size; __u32 mnt_opts; __u64 mask; __u32 sb_dev_major; __u32 sb_dev_minor; __u64 sb_magic; __u32 sb_flags; __u32 fs_type; __u64 mnt_id; __u64 mnt_parent_id; __u32 mnt_id_old; __u32 mnt_parent_id_old; __u64 mnt_attr; __u64 mnt_propagation; __u64 mnt_peer_group; __u64 mnt_master; __u64 propagate_from; __u32 mnt_root; __u32 mnt_point; __u64 mnt_ns_id; __u64 __spare2[49]; char str[0]; }; struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; }; struct kstatmount { struct statmount *buf; size_t bufsize; struct vfsmount *mnt; u64 mask; struct path root; struct statmount sm; struct seq_file seq; }; struct ktermios { tcflag_t c_iflag; tcflag_t c_oflag; tcflag_t c_cflag; tcflag_t c_lflag; cc_t c_line; cc_t c_cc[19]; speed_t c_ispeed; speed_t c_ospeed; }; struct kthread { long unsigned int flags; unsigned int cpu; int result; int (*threadfn)(void *); void *data; struct completion parked; struct completion exited; struct cgroup_subsys_state *blkcg_css; char *full_name; }; struct kthread_create_info { char *full_name; int (*threadfn)(void *); void *data; int node; struct task_struct *result; struct completion *done; struct list_head list; }; struct kthread_work; typedef void (*kthread_work_func_t)(struct kthread_work *); struct kthread_worker; struct kthread_work { struct list_head node; kthread_work_func_t func; struct kthread_worker *worker; int canceling; }; struct kthread_delayed_work { struct kthread_work work; struct timer_list timer; }; struct kthread_flush_work { struct kthread_work work; struct completion done; }; struct kthread_worker { unsigned int flags; raw_spinlock_t lock; struct list_head work_list; struct list_head delayed_work_list; struct task_struct *task; struct kthread_work *current_work; }; struct ktime_timestamps { u64 mono; u64 boot; u64 real; }; struct kvfree_rcu_bulk_data { struct list_head list; struct rcu_gp_oldstate gp_snap; long unsigned int nr_records; void *records[0]; }; struct kvm_memslots { u64 generation; atomic_long_t last_used_slot; struct rb_root_cached hva_tree; struct rb_root gfn_tree; struct hlist_head id_hash[128]; int node_idx; }; struct kvm_vm_stat_generic { u64 remote_tlb_flush; u64 remote_tlb_flush_requests; }; struct kvm_vm_stat { struct kvm_vm_stat_generic generic; u64 mmu_shadow_zapped; u64 mmu_pte_write; u64 mmu_pde_zapped; u64 mmu_flooded; u64 mmu_recycled; u64 mmu_cache_miss; u64 mmu_unsync; union { struct { atomic64_t pages_4k; atomic64_t pages_2m; atomic64_t pages_1g; }; atomic64_t pages[3]; }; u64 nx_lpage_splits; u64 max_mmu_page_hash_collisions; u64 max_mmu_rmap_size; }; struct kvm_pic; struct kvm_ioapic; struct kvm_pit; struct kvm_xen_hvm_config { __u32 flags; __u32 msr; __u64 blob_addr_32; __u64 blob_addr_64; __u8 blob_size_32; __u8 blob_size_64; __u8 pad2[30]; }; struct kvm_mmu_memory_cache { gfp_t gfp_zero; gfp_t gfp_custom; u64 init_value; struct kmem_cache *kmem_cache; int capacity; int nobjs; void **objects; }; struct kvm_apic_map; struct kvm_x86_msr_filter; struct kvm_x86_pmu_event_filter; struct kvm_arch { long unsigned int n_used_mmu_pages; long unsigned int n_requested_mmu_pages; long unsigned int n_max_mmu_pages; unsigned int indirect_shadow_pages; u8 mmu_valid_gen; u8 vm_type; bool has_private_mem; bool has_protected_state; bool pre_fault_allowed; struct hlist_head mmu_page_hash[4096]; struct list_head active_mmu_pages; struct list_head zapped_obsolete_pages; struct list_head possible_nx_huge_pages; spinlock_t mmu_unsync_pages_lock; u64 shadow_mmio_value; struct iommu_domain *iommu_domain; bool iommu_noncoherent; atomic_t noncoherent_dma_count; atomic_t assigned_device_count; struct kvm_pic *vpic; struct kvm_ioapic *vioapic; struct kvm_pit *vpit; atomic_t vapics_in_nmi_mode; struct mutex apic_map_lock; struct kvm_apic_map *apic_map; atomic_t apic_map_dirty; bool apic_access_memslot_enabled; bool apic_access_memslot_inhibited; struct rw_semaphore apicv_update_lock; long unsigned int apicv_inhibit_reasons; gpa_t wall_clock; bool mwait_in_guest; bool hlt_in_guest; bool pause_in_guest; bool cstate_in_guest; long unsigned int irq_sources_bitmap; s64 kvmclock_offset; raw_spinlock_t tsc_write_lock; u64 last_tsc_nsec; u64 last_tsc_write; u32 last_tsc_khz; u64 last_tsc_offset; u64 cur_tsc_nsec; u64 cur_tsc_write; u64 cur_tsc_offset; u64 cur_tsc_generation; int nr_vcpus_matched_tsc; u32 default_tsc_khz; bool user_set_tsc; u64 apic_bus_cycle_ns; seqcount_raw_spinlock_t pvclock_sc; bool use_master_clock; u64 master_kernel_ns; u64 master_cycle_now; struct delayed_work kvmclock_update_work; struct delayed_work kvmclock_sync_work; struct kvm_xen_hvm_config xen_hvm_config; struct hlist_head mask_notifier_list; bool backwards_tsc_observed; bool boot_vcpu_runs_old_kvmclock; u32 bsp_vcpu_id; u64 disabled_quirks; enum kvm_irqchip_mode irqchip_mode; u8 nr_reserved_ioapic_pins; bool disabled_lapic_found; bool x2apic_format; bool x2apic_broadcast_quirk_disabled; bool guest_can_read_msr_platform_info; bool exception_payload_enabled; bool triple_fault_event; bool bus_lock_detection_enabled; bool enable_pmu; u32 notify_window; u32 notify_vmexit_flags; bool exit_on_emulation_error; u32 user_space_msr_mask; struct kvm_x86_msr_filter *msr_filter; u32 hypercall_exit_enabled; bool sgx_provisioning_allowed; struct kvm_x86_pmu_event_filter *pmu_event_filter; struct task_struct *nx_huge_page_recovery_thread; atomic64_t tdp_mmu_pages; struct list_head tdp_mmu_roots; spinlock_t tdp_mmu_pages_lock; bool shadow_root_allocated; u32 max_vcpu_ids; bool disable_nx_huge_pages; struct kvm_mmu_memory_cache split_shadow_page_cache; struct kvm_mmu_memory_cache split_page_header_cache; struct kvm_mmu_memory_cache split_desc_cache; }; struct srcu_data; struct srcu_usage; struct srcu_struct { unsigned int srcu_idx; struct srcu_data *sda; struct lockdep_map dep_map; struct srcu_usage *srcu_sup; }; struct kvm_io_bus; struct kvm_stat_data; struct kvm { rwlock_t mmu_lock; struct mutex slots_lock; struct mutex slots_arch_lock; struct mm_struct *mm; long unsigned int nr_memslot_pages; struct kvm_memslots __memslots[2]; struct kvm_memslots *memslots[1]; struct xarray vcpu_array; atomic_t nr_memslots_dirty_logging; spinlock_t mn_invalidate_lock; long unsigned int mn_active_invalidate_count; struct rcuwait mn_memslots_update_rcuwait; spinlock_t gpc_lock; struct list_head gpc_list; atomic_t online_vcpus; int max_vcpus; int created_vcpus; int last_boosted_vcpu; struct list_head vm_list; struct mutex lock; struct kvm_io_bus *buses[4]; struct list_head ioeventfds; struct kvm_vm_stat stat; struct kvm_arch arch; refcount_t users_count; struct mutex irq_lock; struct list_head devices; u64 manual_dirty_log_protect; struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; struct srcu_struct srcu; struct srcu_struct irq_srcu; pid_t userspace_pid; bool override_halt_poll_ns; unsigned int max_halt_poll_ns; u32 dirty_ring_size; bool dirty_ring_with_bitmap; bool vm_bugged; bool vm_dead; char stats_id[48]; }; struct kvm_lapic; struct kvm_apic_map { struct callback_head rcu; enum kvm_apic_logical_mode logical_mode; u32 max_apic_id; union { struct kvm_lapic *xapic_flat_map[8]; struct kvm_lapic *xapic_cluster_map[64]; }; struct kvm_lapic *phys_map[0]; }; struct kvm_rmap_head; struct kvm_lpage_info; struct kvm_arch_memory_slot { struct kvm_rmap_head *rmap[3]; struct kvm_lpage_info *lpage_info[2]; short unsigned int *gfn_write_track; }; union kvm_mmu_page_role { u32 word; struct { unsigned int level: 4; unsigned int has_4_byte_gpte: 1; unsigned int quadrant: 2; unsigned int direct: 1; unsigned int access: 3; unsigned int invalid: 1; unsigned int efer_nx: 1; unsigned int cr0_wp: 1; unsigned int smep_andnot_wp: 1; unsigned int smap_andnot_wp: 1; unsigned int ad_disabled: 1; unsigned int guest_mode: 1; unsigned int passthrough: 1; char: 5; unsigned int smm: 8; }; }; union kvm_mmu_extended_role { u32 word; struct { unsigned int valid: 1; unsigned int execonly: 1; unsigned int cr4_pse: 1; unsigned int cr4_pke: 1; unsigned int cr4_smap: 1; unsigned int cr4_smep: 1; unsigned int cr4_la57: 1; unsigned int efer_lma: 1; }; }; union kvm_cpu_role { u64 as_u64; struct { union kvm_mmu_page_role base; union kvm_mmu_extended_role ext; }; }; struct kvm_cpuid_entry2 { __u32 function; __u32 index; __u32 flags; __u32 eax; __u32 ebx; __u32 ecx; __u32 edx; __u32 padding[3]; }; struct kvm_debug_exit_arch { __u32 exception; __u32 pad; __u64 pc; __u64 dr6; __u64 dr7; }; struct kvm_dirty_gfn { __u32 flags; __u32 slot; __u64 offset; }; struct kvm_dirty_ring { u32 dirty_index; u32 reset_index; u32 size; u32 soft_limit; struct kvm_dirty_gfn *dirty_gfns; int index; }; struct kvm_dtable { __u64 base; __u16 limit; __u16 padding[3]; }; struct kvm_enc_region { __u64 addr; __u64 size; }; struct kvm_hyperv_exit { __u32 type; __u32 pad1; union { struct { __u32 msr; __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; } synic; struct { __u64 input; __u64 result; __u64 params[2]; } hcall; struct { __u32 msr; __u32 pad2; __u64 control; __u64 status; __u64 send_page; __u64 recv_page; __u64 pending_page; } syndbg; } u; }; struct kvm_hypervisor_cpuid { u32 base; u32 limit; }; struct kvm_io_device; struct kvm_io_range { gpa_t addr; int len; struct kvm_io_device *dev; }; struct kvm_io_bus { int dev_count; int ioeventfd_count; struct kvm_io_range range[0]; }; struct kvm_lpage_info { int disallow_lpage; }; struct kvm_memory_slot { struct hlist_node id_node[2]; struct interval_tree_node hva_node[2]; struct rb_node gfn_node[2]; gfn_t base_gfn; long unsigned int npages; long unsigned int *dirty_bitmap; struct kvm_arch_memory_slot arch; long unsigned int userspace_addr; u32 flags; short int id; u16 as_id; }; struct kvm_mmio_fragment { gpa_t gpa; void *data; unsigned int len; }; struct kvm_page_fault; struct x86_exception; struct kvm_mmu_page; struct kvm_mmu_root_info { gpa_t pgd; hpa_t hpa; }; struct rsvd_bits_validate { u64 rsvd_bits_mask[10]; u64 bad_mt_xwr; }; struct kvm_vcpu; struct kvm_mmu { long unsigned int (*get_guest_pgd)(struct kvm_vcpu *); u64 (*get_pdptr)(struct kvm_vcpu *, int); int (*page_fault)(struct kvm_vcpu *, struct kvm_page_fault *); void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *); gpa_t (*gva_to_gpa)(struct kvm_vcpu *, struct kvm_mmu *, gpa_t, u64, struct x86_exception *); int (*sync_spte)(struct kvm_vcpu *, struct kvm_mmu_page *, int); struct kvm_mmu_root_info root; union kvm_cpu_role cpu_role; union kvm_mmu_page_role root_role; u32 pkru_mask; struct kvm_mmu_root_info prev_roots[3]; u8 permissions[16]; u64 *pae_root; u64 *pml4_root; u64 *pml5_root; struct rsvd_bits_validate shadow_zero_check; struct rsvd_bits_validate guest_rsvd_check; u64 pdptrs[4]; }; struct kvm_mtrr { u64 var[16]; u64 fixed_64k; u64 fixed_16k[2]; u64 fixed_4k[8]; u64 deftype; }; struct kvm_vmx_nested_state_hdr { __u64 vmxon_pa; __u64 vmcs12_pa; struct { __u16 flags; } smm; __u16 pad; __u32 flags; __u64 preemption_timer_deadline; }; struct kvm_svm_nested_state_hdr { __u64 vmcb_pa; }; struct kvm_vmx_nested_state_data { __u8 vmcs12[4096]; __u8 shadow_vmcs12[4096]; }; struct kvm_svm_nested_state_data { __u8 vmcb12[4096]; }; struct kvm_nested_state { __u16 flags; __u16 format; __u32 size; union { struct kvm_vmx_nested_state_hdr vmx; struct kvm_svm_nested_state_hdr svm; __u8 pad[120]; } hdr; union { struct { struct {} __empty_vmx; struct kvm_vmx_nested_state_data vmx[0]; }; struct { struct {} __empty_svm; struct kvm_svm_nested_state_data svm[0]; }; } data; }; struct kvm_pio_request { long unsigned int linear_rip; long unsigned int count; int in; int port; int size; }; struct kvm_pmc { enum pmc_type type; u8 idx; bool is_paused; bool intr; u64 counter; u64 emulated_counter; u64 eventsel; struct perf_event *perf_event; struct kvm_vcpu *vcpu; u64 current_config; }; struct kvm_pmu { u8 version; unsigned int nr_arch_gp_counters; unsigned int nr_arch_fixed_counters; unsigned int available_event_types; u64 fixed_ctr_ctrl; u64 fixed_ctr_ctrl_rsvd; u64 global_ctrl; u64 global_status; u64 counter_bitmask[2]; u64 global_ctrl_rsvd; u64 global_status_rsvd; u64 reserved_bits; u64 raw_event_mask; struct kvm_pmc gp_counters[8]; struct kvm_pmc fixed_counters[3]; union { long unsigned int reprogram_pmi[1]; atomic64_t __reprogram_pmi; }; long unsigned int all_valid_pmc_idx[1]; long unsigned int pmc_in_use[1]; u64 ds_area; u64 pebs_enable; u64 pebs_enable_rsvd; u64 pebs_data_cfg; u64 pebs_data_cfg_rsvd; u64 host_cross_mapped_mask; bool need_cleanup; u8 event_count; }; struct kvm_queued_exception { bool pending; bool injected; bool has_error_code; u8 vector; u32 error_code; long unsigned int payload; bool has_payload; }; struct kvm_queued_interrupt { bool injected; bool soft; u8 nr; }; struct kvm_regs { __u64 rax; __u64 rbx; __u64 rcx; __u64 rdx; __u64 rsi; __u64 rdi; __u64 rsp; __u64 rbp; __u64 r8; __u64 r9; __u64 r10; __u64 r11; __u64 r12; __u64 r13; __u64 r14; __u64 r15; __u64 rip; __u64 rflags; }; struct kvm_rmap_head { long unsigned int val; }; struct kvm_xen_exit { __u32 type; union { struct { __u32 longmode; __u32 cpl; __u64 input; __u64 result; __u64 params[6]; } hcall; } u; }; struct kvm_segment { __u64 base; __u32 limit; __u16 selector; __u8 type; __u8 present; __u8 dpl; __u8 db; __u8 s; __u8 l; __u8 g; __u8 avl; __u8 unusable; __u8 padding; }; struct kvm_sregs { struct kvm_segment cs; struct kvm_segment ds; struct kvm_segment es; struct kvm_segment fs; struct kvm_segment gs; struct kvm_segment ss; struct kvm_segment tr; struct kvm_segment ldt; struct kvm_dtable gdt; struct kvm_dtable idt; __u64 cr0; __u64 cr2; __u64 cr3; __u64 cr4; __u64 cr8; __u64 efer; __u64 apic_base; __u64 interrupt_bitmap[4]; }; struct kvm_vcpu_events { struct { __u8 injected; __u8 nr; __u8 has_error_code; __u8 pending; __u32 error_code; } exception; struct { __u8 injected; __u8 nr; __u8 soft; __u8 shadow; } interrupt; struct { __u8 injected; __u8 pending; __u8 masked; __u8 pad; } nmi; __u32 sipi_vector; __u32 flags; struct { __u8 smm; __u8 pending; __u8 smm_inside_nmi; __u8 latched_init; } smi; struct { __u8 pending; } triple_fault; __u8 reserved[26]; __u8 exception_has_payload; __u64 exception_payload; }; struct kvm_sync_regs { struct kvm_regs regs; struct kvm_sregs sregs; struct kvm_vcpu_events events; }; struct kvm_run { __u8 request_interrupt_window; __u8 immediate_exit__unsafe; __u8 padding1[6]; __u32 exit_reason; __u8 ready_for_interrupt_injection; __u8 if_flag; __u16 flags; __u64 cr8; __u64 apic_base; union { struct { __u64 hardware_exit_reason; } hw; struct { __u64 hardware_entry_failure_reason; __u32 cpu; } fail_entry; struct { __u32 exception; __u32 error_code; } ex; struct { __u8 direction; __u8 size; __u16 port; __u32 count; __u64 data_offset; } io; struct { struct kvm_debug_exit_arch arch; } debug; struct { __u64 phys_addr; __u8 data[8]; __u32 len; __u8 is_write; } mmio; struct { __u64 phys_addr; __u8 data[8]; __u32 len; __u8 is_write; } iocsr_io; struct { __u64 nr; __u64 args[6]; __u64 ret; union { __u64 flags; }; } hypercall; struct { __u64 rip; __u32 is_write; __u32 pad; } tpr_access; struct { __u8 icptcode; __u16 ipa; __u32 ipb; } s390_sieic; __u64 s390_reset_flags; struct { __u64 trans_exc_code; __u32 pgm_code; } s390_ucontrol; struct { __u32 dcrn; __u32 data; __u8 is_write; } dcr; struct { __u32 suberror; __u32 ndata; __u64 data[16]; } internal; struct { __u32 suberror; __u32 ndata; __u64 flags; union { struct { __u8 insn_size; __u8 insn_bytes[15]; }; }; } emulation_failure; struct { __u64 gprs[32]; } osi; struct { __u64 nr; __u64 ret; __u64 args[9]; } papr_hcall; struct { __u16 subchannel_id; __u16 subchannel_nr; __u32 io_int_parm; __u32 io_int_word; __u32 ipb; __u8 dequeued; } s390_tsch; struct { __u32 epr; } epr; struct { __u32 type; __u32 ndata; union { __u64 data[16]; }; } system_event; struct { __u64 addr; __u8 ar; __u8 reserved; __u8 fc; __u8 sel1; __u16 sel2; } s390_stsi; struct { __u8 vector; } eoi; struct kvm_hyperv_exit hyperv; struct { __u64 esr_iss; __u64 fault_ipa; } arm_nisv; struct { __u8 error; __u8 pad[7]; __u32 reason; __u32 index; __u64 data; } msr; struct kvm_xen_exit xen; struct { long unsigned int extension_id; long unsigned int function_id; long unsigned int args[6]; long unsigned int ret[2]; } riscv_sbi; struct { long unsigned int csr_num; long unsigned int new_value; long unsigned int write_mask; long unsigned int ret_value; } riscv_csr; struct { __u32 flags; } notify; struct { __u64 flags; __u64 gpa; __u64 size; } memory_fault; char padding[256]; }; __u64 kvm_valid_regs; __u64 kvm_dirty_regs; union { struct kvm_sync_regs regs; char padding[2048]; } s; }; struct kvm_stat_data { struct kvm *kvm; const struct _kvm_stats_desc *desc; enum kvm_stat_kind kind; }; struct x86_emulate_ctxt; struct pvclock_vcpu_time_info { u32 version; u32 pad0; u64 tsc_timestamp; u64 system_time; u32 tsc_to_system_mul; s8 tsc_shift; u8 flags; u8 pad[2]; }; struct kvm_vcpu_arch { long unsigned int regs[17]; u32 regs_avail; u32 regs_dirty; long unsigned int cr0; long unsigned int cr0_guest_owned_bits; long unsigned int cr2; long unsigned int cr3; long unsigned int cr4; long unsigned int cr4_guest_owned_bits; long unsigned int cr4_guest_rsvd_bits; long unsigned int cr8; u32 host_pkru; u32 pkru; u32 hflags; u64 efer; u64 apic_base; struct kvm_lapic *apic; bool load_eoi_exitmap_pending; long unsigned int ioapic_handled_vectors[4]; long unsigned int apic_attention; int32_t apic_arb_prio; int mp_state; u64 ia32_misc_enable_msr; u64 smbase; u64 smi_count; bool at_instruction_boundary; bool tpr_access_reporting; bool xfd_no_write_intercept; u64 ia32_xss; u64 microcode_version; u64 arch_capabilities; u64 perf_capabilities; struct kvm_mmu *mmu; struct kvm_mmu root_mmu; struct kvm_mmu guest_mmu; struct kvm_mmu nested_mmu; struct kvm_mmu *walk_mmu; struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; struct kvm_mmu_memory_cache mmu_shadow_page_cache; struct kvm_mmu_memory_cache mmu_shadowed_info_cache; struct kvm_mmu_memory_cache mmu_page_header_cache; struct fpu_guest guest_fpu; u64 xcr0; u64 guest_supported_xcr0; struct kvm_pio_request pio; void *pio_data; void *sev_pio_data; unsigned int sev_pio_count; u8 event_exit_inst_len; bool exception_from_userspace; struct kvm_queued_exception exception; struct kvm_queued_exception exception_vmexit; struct kvm_queued_interrupt interrupt; int halt_request; int cpuid_nent; struct kvm_cpuid_entry2 *cpuid_entries; struct kvm_hypervisor_cpuid kvm_cpuid; bool is_amd_compatible; struct { long unsigned int enabled[1]; } governed_features; u64 reserved_gpa_bits; int maxphyaddr; struct x86_emulate_ctxt *emulate_ctxt; bool emulate_regs_need_sync_to_vcpu; bool emulate_regs_need_sync_from_vcpu; int (*complete_userspace_io)(struct kvm_vcpu *); gpa_t time; struct pvclock_vcpu_time_info hv_clock; unsigned int hw_tsc_khz; struct gfn_to_pfn_cache pv_time; bool pvclock_set_guest_stopped_request; struct { u8 preempted; u64 msr_val; u64 last_steal; struct gfn_to_hva_cache cache; } st; u64 l1_tsc_offset; u64 tsc_offset; u64 last_guest_tsc; u64 last_host_tsc; u64 tsc_offset_adjustment; u64 this_tsc_nsec; u64 this_tsc_write; u64 this_tsc_generation; bool tsc_catchup; bool tsc_always_catchup; s8 virtual_tsc_shift; u32 virtual_tsc_mult; u32 virtual_tsc_khz; s64 ia32_tsc_adjust_msr; u64 msr_ia32_power_ctl; u64 l1_tsc_scaling_ratio; u64 tsc_scaling_ratio; atomic_t nmi_queued; unsigned int nmi_pending; bool nmi_injected; bool smi_pending; u8 handling_intr_from_guest; struct kvm_mtrr mtrr_state; u64 pat; unsigned int switch_db_regs; long unsigned int db[4]; long unsigned int dr6; long unsigned int dr7; long unsigned int eff_db[4]; long unsigned int guest_debug_dr7; u64 msr_platform_info; u64 msr_misc_features_enables; u64 mcg_cap; u64 mcg_status; u64 mcg_ctl; u64 mcg_ext_ctl; u64 *mce_banks; u64 *mci_ctl2_banks; u64 mmio_gva; unsigned int mmio_access; gfn_t mmio_gfn; u64 mmio_gen; struct kvm_pmu pmu; long unsigned int singlestep_rip; cpumask_var_t wbinvd_dirty_mask; long unsigned int last_retry_eip; long unsigned int last_retry_addr; struct { bool halted; gfn_t gfns[64]; struct gfn_to_hva_cache data; u64 msr_en_val; u64 msr_int_val; u16 vec; u32 id; bool send_user_only; u32 host_apf_flags; bool delivery_as_pf_vmexit; bool pageready_pending; } apf; struct { u64 length; u64 status; } osvw; struct { u64 msr_val; struct gfn_to_hva_cache data; } pv_eoi; u64 msr_kvm_poll_control; struct { bool pv_unhalted; } pv; int pending_ioapic_eoi; int pending_external_vector; bool preempted_in_kernel; bool l1tf_flush_l1d; int last_vmentry_cpu; u64 msr_hwcr; struct { u32 features; bool enforce; } pv_cpuid; bool guest_state_protected; bool pdptrs_from_userspace; }; struct kvm_vcpu_stat_generic { u64 halt_successful_poll; u64 halt_attempted_poll; u64 halt_poll_invalid; u64 halt_wakeup; u64 halt_poll_success_ns; u64 halt_poll_fail_ns; u64 halt_wait_ns; u64 halt_poll_success_hist[32]; u64 halt_poll_fail_hist[32]; u64 halt_wait_hist[32]; u64 blocking; }; struct kvm_vcpu_stat { struct kvm_vcpu_stat_generic generic; u64 pf_taken; u64 pf_fixed; u64 pf_emulate; u64 pf_spurious; u64 pf_fast; u64 pf_mmio_spte_created; u64 pf_guest; u64 tlb_flush; u64 invlpg; u64 exits; u64 io_exits; u64 mmio_exits; u64 signal_exits; u64 irq_window_exits; u64 nmi_window_exits; u64 l1d_flush; u64 halt_exits; u64 request_irq_exits; u64 irq_exits; u64 host_state_reload; u64 fpu_reload; u64 insn_emulation; u64 insn_emulation_fail; u64 hypercalls; u64 irq_injections; u64 nmi_injections; u64 req_event; u64 nested_run; u64 directed_yield_attempted; u64 directed_yield_successful; u64 preemption_reported; u64 preemption_other; u64 guest_mode; u64 notify_window_exits; }; struct kvm_vcpu { struct kvm *kvm; int cpu; int vcpu_id; int vcpu_idx; int ____srcu_idx; int srcu_depth; int mode; u64 requests; long unsigned int guest_debug; struct mutex mutex; struct kvm_run *run; struct rcuwait wait; struct pid *pid; int sigset_active; sigset_t sigset; unsigned int halt_poll_ns; bool valid_wakeup; int mmio_needed; int mmio_read_completed; int mmio_is_write; int mmio_cur_fragment; int mmio_nr_fragments; struct kvm_mmio_fragment mmio_fragments[2]; bool wants_to_run; bool preempted; bool ready; bool scheduled_out; struct kvm_vcpu_arch arch; struct kvm_vcpu_stat stat; char stats_id[48]; struct kvm_dirty_ring dirty_ring; struct kvm_memory_slot *last_used_slot; u64 last_used_slot_gen; }; struct msr_bitmap_range { u32 flags; u32 nmsrs; u32 base; long unsigned int *bitmap; }; struct kvm_x86_msr_filter { u8 count; bool default_allow: 1; struct msr_bitmap_range ranges[16]; }; struct kvm_x86_nested_ops { void (*leave_nested)(struct kvm_vcpu *); bool (*is_exception_vmexit)(struct kvm_vcpu *, u8, u32); int (*check_events)(struct kvm_vcpu *); bool (*has_events)(struct kvm_vcpu *, bool); void (*triple_fault)(struct kvm_vcpu *); int (*get_state)(struct kvm_vcpu *, struct kvm_nested_state *, unsigned int); int (*set_state)(struct kvm_vcpu *, struct kvm_nested_state *, struct kvm_nested_state *); bool (*get_nested_state_pages)(struct kvm_vcpu *); int (*write_log_dirty)(struct kvm_vcpu *, gpa_t); int (*enable_evmcs)(struct kvm_vcpu *, uint16_t *); uint16_t (*get_evmcs_version)(struct kvm_vcpu *); void (*hv_inject_synthetic_vmexit_post_tlb_flush)(struct kvm_vcpu *); }; typedef void cpu_emergency_virt_cb(void); struct x86_instruction_info; struct msr_data; struct kvm_x86_ops { const char *name; int (*check_processor_compatibility)(void); int (*enable_virtualization_cpu)(void); void (*disable_virtualization_cpu)(void); cpu_emergency_virt_cb *emergency_disable_virtualization_cpu; void (*hardware_unsetup)(void); bool (*has_emulated_msr)(struct kvm *, u32); void (*vcpu_after_set_cpuid)(struct kvm_vcpu *); unsigned int vm_size; int (*vm_init)(struct kvm *); void (*vm_destroy)(struct kvm *); int (*vcpu_precreate)(struct kvm *); int (*vcpu_create)(struct kvm_vcpu *); void (*vcpu_free)(struct kvm_vcpu *); void (*vcpu_reset)(struct kvm_vcpu *, bool); void (*prepare_switch_to_guest)(struct kvm_vcpu *); void (*vcpu_load)(struct kvm_vcpu *, int); void (*vcpu_put)(struct kvm_vcpu *); void (*update_exception_bitmap)(struct kvm_vcpu *); int (*get_msr)(struct kvm_vcpu *, struct msr_data *); int (*set_msr)(struct kvm_vcpu *, struct msr_data *); u64 (*get_segment_base)(struct kvm_vcpu *, int); void (*get_segment)(struct kvm_vcpu *, struct kvm_segment *, int); int (*get_cpl)(struct kvm_vcpu *); void (*set_segment)(struct kvm_vcpu *, struct kvm_segment *, int); void (*get_cs_db_l_bits)(struct kvm_vcpu *, int *, int *); bool (*is_valid_cr0)(struct kvm_vcpu *, long unsigned int); void (*set_cr0)(struct kvm_vcpu *, long unsigned int); void (*post_set_cr3)(struct kvm_vcpu *, long unsigned int); bool (*is_valid_cr4)(struct kvm_vcpu *, long unsigned int); void (*set_cr4)(struct kvm_vcpu *, long unsigned int); int (*set_efer)(struct kvm_vcpu *, u64); void (*get_idt)(struct kvm_vcpu *, struct desc_ptr *); void (*set_idt)(struct kvm_vcpu *, struct desc_ptr *); void (*get_gdt)(struct kvm_vcpu *, struct desc_ptr *); void (*set_gdt)(struct kvm_vcpu *, struct desc_ptr *); void (*sync_dirty_debug_regs)(struct kvm_vcpu *); void (*set_dr7)(struct kvm_vcpu *, long unsigned int); void (*cache_reg)(struct kvm_vcpu *, enum kvm_reg); long unsigned int (*get_rflags)(struct kvm_vcpu *); void (*set_rflags)(struct kvm_vcpu *, long unsigned int); bool (*get_if_flag)(struct kvm_vcpu *); void (*flush_tlb_all)(struct kvm_vcpu *); void (*flush_tlb_current)(struct kvm_vcpu *); void (*flush_tlb_gva)(struct kvm_vcpu *, gva_t); void (*flush_tlb_guest)(struct kvm_vcpu *); int (*vcpu_pre_run)(struct kvm_vcpu *); enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *, bool); int (*handle_exit)(struct kvm_vcpu *, enum exit_fastpath_completion); int (*skip_emulated_instruction)(struct kvm_vcpu *); void (*update_emulated_instruction)(struct kvm_vcpu *); void (*set_interrupt_shadow)(struct kvm_vcpu *, int); u32 (*get_interrupt_shadow)(struct kvm_vcpu *); void (*patch_hypercall)(struct kvm_vcpu *, unsigned char *); void (*inject_irq)(struct kvm_vcpu *, bool); void (*inject_nmi)(struct kvm_vcpu *); void (*inject_exception)(struct kvm_vcpu *); void (*cancel_injection)(struct kvm_vcpu *); int (*interrupt_allowed)(struct kvm_vcpu *, bool); int (*nmi_allowed)(struct kvm_vcpu *, bool); bool (*get_nmi_mask)(struct kvm_vcpu *); void (*set_nmi_mask)(struct kvm_vcpu *, bool); bool (*is_vnmi_pending)(struct kvm_vcpu *); bool (*set_vnmi_pending)(struct kvm_vcpu *); void (*enable_nmi_window)(struct kvm_vcpu *); void (*enable_irq_window)(struct kvm_vcpu *); void (*update_cr8_intercept)(struct kvm_vcpu *, int, int); const bool x2apic_icr_is_split; const long unsigned int required_apicv_inhibits; bool allow_apicv_in_x2apic_without_x2apic_virtualization; void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *); void (*hwapic_irr_update)(struct kvm_vcpu *, int); void (*hwapic_isr_update)(int); void (*load_eoi_exitmap)(struct kvm_vcpu *, u64 *); void (*set_virtual_apic_mode)(struct kvm_vcpu *); void (*set_apic_access_page_addr)(struct kvm_vcpu *); void (*deliver_interrupt)(struct kvm_lapic *, int, int, int); int (*sync_pir_to_irr)(struct kvm_vcpu *); int (*set_tss_addr)(struct kvm *, unsigned int); int (*set_identity_map_addr)(struct kvm *, u64); u8 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool); void (*load_mmu_pgd)(struct kvm_vcpu *, hpa_t, int); bool (*has_wbinvd_exit)(void); u64 (*get_l2_tsc_offset)(struct kvm_vcpu *); u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *); void (*write_tsc_offset)(struct kvm_vcpu *); void (*write_tsc_multiplier)(struct kvm_vcpu *); void (*get_exit_info)(struct kvm_vcpu *, u32 *, u64 *, u64 *, u32 *, u32 *); int (*check_intercept)(struct kvm_vcpu *, struct x86_instruction_info *, enum x86_intercept_stage, struct x86_exception *); void (*handle_exit_irqoff)(struct kvm_vcpu *); int cpu_dirty_log_size; void (*update_cpu_dirty_logging)(struct kvm_vcpu *); const struct kvm_x86_nested_ops *nested_ops; void (*vcpu_blocking)(struct kvm_vcpu *); void (*vcpu_unblocking)(struct kvm_vcpu *); int (*pi_update_irte)(struct kvm *, unsigned int, uint32_t, bool); void (*pi_start_assignment)(struct kvm *); void (*apicv_pre_state_restore)(struct kvm_vcpu *); void (*apicv_post_state_restore)(struct kvm_vcpu *); bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *); int (*set_hv_timer)(struct kvm_vcpu *, u64, bool *); void (*cancel_hv_timer)(struct kvm_vcpu *); void (*setup_mce)(struct kvm_vcpu *); int (*dev_get_attr)(u32, u64, u64 *); int (*mem_enc_ioctl)(struct kvm *, void *); int (*mem_enc_register_region)(struct kvm *, struct kvm_enc_region *); int (*mem_enc_unregister_region)(struct kvm *, struct kvm_enc_region *); int (*vm_copy_enc_context_from)(struct kvm *, unsigned int); int (*vm_move_enc_context_from)(struct kvm *, unsigned int); void (*guest_memory_reclaimed)(struct kvm *); int (*get_feature_msr)(u32, u64 *); int (*check_emulate_instruction)(struct kvm_vcpu *, int, void *, int); bool (*apic_init_signal_blocked)(struct kvm_vcpu *); int (*enable_l2_tlb_flush)(struct kvm_vcpu *); void (*migrate_timers)(struct kvm_vcpu *); void (*msr_filter_changed)(struct kvm_vcpu *); int (*complete_emulated_msr)(struct kvm_vcpu *, int); void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *, u8); long unsigned int (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *); gva_t (*get_untagged_addr)(struct kvm_vcpu *, gva_t, unsigned int); void * (*alloc_apic_backing_page)(struct kvm_vcpu *); int (*gmem_prepare)(struct kvm *, kvm_pfn_t, gfn_t, int); void (*gmem_invalidate)(kvm_pfn_t, kvm_pfn_t); int (*private_max_mapping_level)(struct kvm *, kvm_pfn_t); }; struct kvm_x86_pmu_event_filter { __u32 action; __u32 nevents; __u32 fixed_counter_bitmap; __u32 flags; __u32 nr_includes; __u32 nr_excludes; __u64 *includes; __u64 *excludes; __u64 events[0]; }; struct kyber_cpu_latency { atomic_t buckets[48]; }; struct kyber_ctx_queue { spinlock_t lock; struct list_head rq_list[4]; }; struct sbq_wait { struct sbitmap_queue *sbq; struct wait_queue_entry wait; }; struct kyber_hctx_data { spinlock_t lock; struct list_head rqs[4]; unsigned int cur_domain; unsigned int batching; struct kyber_ctx_queue *kcqs; struct sbitmap kcq_map[4]; struct sbq_wait domain_wait[4]; struct sbq_wait_state *domain_ws[4]; atomic_t wait_index[4]; }; struct kyber_queue_data { struct request_queue *q; dev_t dev; struct sbitmap_queue domain_tokens[4]; unsigned int async_depth; struct kyber_cpu_latency *cpu_latency; struct timer_list timer; unsigned int latency_buckets[48]; long unsigned int latency_timeout[3]; int domain_p99[3]; u64 latency_targets[3]; }; struct kye_tablet_info { __u32 product; __s32 x_logical_maximum; __s32 y_logical_maximum; __s32 pressure_logical_maximum; __s32 x_physical_maximum; __s32 y_physical_maximum; __s8 unit_exponent; __s8 unit; bool has_mouse; unsigned int control_rsize; const __u8 *control_rdesc; }; union l1_cache { struct { unsigned int line_size: 8; unsigned int lines_per_tag: 8; unsigned int assoc: 8; unsigned int size_in_kb: 8; }; unsigned int val; }; union l2_cache { struct { unsigned int line_size: 8; unsigned int lines_per_tag: 4; unsigned int assoc: 4; unsigned int size_in_kb: 16; }; unsigned int val; }; union l3_cache { struct { unsigned int line_size: 8; unsigned int lines_per_tag: 4; unsigned int assoc: 4; unsigned int res: 2; unsigned int size_encoded: 14; }; unsigned int val; }; typedef int (*lookup_by_table_id_t)(struct net *, u32); struct l3mdev_handler { lookup_by_table_id_t dev_lookup; }; struct l3mdev_ops { u32 (*l3mdev_fib_table)(const struct net_device *); struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16); struct sk_buff * (*l3mdev_l3_out)(struct net_device *, struct sock *, struct sk_buff *, u16); struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *, struct flowi6 *); }; struct lacpdu_header { struct ethhdr hdr; struct lacpdu lacpdu; }; struct ladder_device_state { struct { u32 promotion_count; u32 demotion_count; u64 promotion_time_ns; u64 demotion_time_ns; } threshold; struct { int promotion_count; int demotion_count; } stats; }; struct ladder_device { struct ladder_device_state states[10]; }; struct latch_tree_ops { bool (*less)(struct latch_tree_node *, struct latch_tree_node *); int (*comp)(void *, struct latch_tree_node *); }; struct latch_tree_root { seqcount_latch_t seq; struct rb_root tree[2]; }; struct latched_seq { seqcount_latch_t latch; u64 val[2]; }; struct sched_domain; struct lb_env { struct sched_domain *sd; struct rq *src_rq; int src_cpu; int dst_cpu; struct rq *dst_rq; struct cpumask *dst_grpmask; int new_dst_cpu; enum cpu_idle_type idle; long int imbalance; struct cpumask *cpus; unsigned int flags; unsigned int loop; unsigned int loop_break; unsigned int loop_max; enum fbq_type fbq_type; enum migration_type migration_type; struct list_head tasks; }; struct ld_semaphore { atomic_long_t count; raw_spinlock_t wait_lock; unsigned int wait_readers; struct list_head read_wait; struct list_head write_wait; struct lockdep_map dep_map; }; struct ldsem_waiter { struct list_head list; struct task_struct *task; }; struct ldt_struct { struct desc_struct *entries; unsigned int nr_entries; int slot; }; struct ldttss_desc { u16 limit0; u16 base0; u16 base1: 8; u16 type: 5; u16 dpl: 2; u16 p: 1; u16 limit1: 4; u16 zero0: 3; u16 g: 1; u16 base2: 8; u32 base3; u32 zero1; }; typedef struct ldttss_desc ldt_desc; typedef struct ldttss_desc tss_desc; struct learning_pkt { u8 mac_dst[6]; u8 mac_src[6]; __be16 type; u8 padding[46]; }; struct lease_manager_operations { bool (*lm_break)(struct file_lease *); int (*lm_change)(struct file_lease *, int, struct list_head *); void (*lm_setup)(struct file_lease *, void **); bool (*lm_breaker_owns_lease)(struct file_lease *); }; struct led_trigger {}; struct legacy_fs_context { char *legacy_data; size_t data_size; enum legacy_fs_param param_type; }; struct legacy_pic { int nr_legacy_irqs; struct irq_chip *chip; void (*mask)(unsigned int); void (*unmask)(unsigned int); void (*mask_all)(void); void (*restore_mask)(void); void (*init)(int); int (*probe)(void); int (*irq_pending)(unsigned int); void (*make_irq)(unsigned int); }; struct level_datum { struct mls_level *level; unsigned char isalias; }; struct lifebook_data { struct input_dev *dev2; char phys[32]; }; struct limit_names { const char *name; const char *unit; }; struct linger { int l_onoff; int l_linger; }; struct link_mode_info { int speed; u8 lanes; u8 duplex; }; struct linked_reg { u8 frameno; union { u8 spi; u8 regno; }; bool is_reg; }; struct linked_regs { int cnt; struct linked_reg entries[6]; }; struct linkinfo_reply_data { struct ethnl_reply_data base; struct ethtool_link_ksettings ksettings; struct ethtool_link_settings *lsettings; }; struct linkmodes_reply_data { struct ethnl_reply_data base; struct ethtool_link_ksettings ksettings; struct ethtool_link_settings *lsettings; bool peer_empty; }; struct linkstate_reply_data { struct ethnl_reply_data base; int link; int sqi; int sqi_max; struct ethtool_link_ext_stats link_stats; bool link_ext_state_provided; struct ethtool_link_ext_state_info ethtool_link_ext_state_info; }; struct linux_binprm; struct linux_binfmt { struct list_head lh; struct module *module; int (*load_binary)(struct linux_binprm *); int (*load_shlib)(struct file *); int (*core_dump)(struct coredump_params *); long unsigned int min_coredump; }; struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; }; struct linux_binprm { struct vm_area_struct *vma; long unsigned int vma_pages; long unsigned int argmin; struct mm_struct *mm; long unsigned int p; unsigned int have_execfd: 1; unsigned int execfd_creds: 1; unsigned int secureexec: 1; unsigned int point_of_no_return: 1; struct file *executable; struct file *interpreter; struct file *file; struct cred *cred; int unsafe; unsigned int per_clear; int argc; int envc; const char *filename; const char *interp; const char *fdpath; unsigned int interp_flags; int execfd; long unsigned int loader; long unsigned int exec; struct rlimit rlim_stack; char buf[256]; }; struct linux_binprm__safe_trusted { struct file *file; }; struct linux_dirent { long unsigned int d_ino; long unsigned int d_off; short unsigned int d_reclen; char d_name[0]; }; struct linux_dirent64 { u64 d_ino; s64 d_off; short unsigned int d_reclen; unsigned char d_type; char d_name[0]; }; struct linux_efi_initrd { long unsigned int base; long unsigned int size; }; struct linux_efi_memreserve { int size; atomic_t count; phys_addr_t next; struct { phys_addr_t base; phys_addr_t size; } entry[0]; }; struct linux_efi_random_seed { u32 size; u8 bits[0]; }; struct linux_efi_tpm_eventlog { u32 size; u32 final_events_preboot_size; u8 version; u8 log[0]; }; struct linux_logo { int type; unsigned int width; unsigned int height; unsigned int clutsize; const unsigned char *clut; const unsigned char *data; }; struct linux_mib { long unsigned int mibs[132]; }; struct linux_tls_mib { long unsigned int mibs[13]; }; struct lirc_scancode { __u64 timestamp; __u16 flags; __u16 rc_proto; __u32 keycode; __u64 scancode; }; struct lirc_fh { struct list_head list; struct rc_dev *rc; int carrier_low; struct { union { struct __kfifo kfifo; unsigned int *type; const unsigned int *const_type; char (*rectype)[0]; unsigned int *ptr; const unsigned int *ptr_const; }; unsigned int buf[0]; } rawir; struct { union { struct __kfifo kfifo; struct lirc_scancode *type; const struct lirc_scancode *const_type; char (*rectype)[0]; struct lirc_scancode *ptr; const struct lirc_scancode *ptr_const; }; struct lirc_scancode buf[0]; } scancodes; wait_queue_head_t wait_poll; u8 send_mode; u8 rec_mode; }; struct list_lru_node; struct list_lru { struct list_lru_node *node; struct list_head list; int shrinker_id; bool memcg_aware; struct xarray xa; }; struct list_lru_one { struct list_head list; long int nr_items; }; struct list_lru_memcg { struct callback_head rcu; struct list_lru_one node[0]; }; struct list_lru_memcg_table { struct list_lru_memcg *mlru; struct mem_cgroup *memcg; }; struct list_lru_node { spinlock_t lock; struct list_lru_one lru; long int nr_items; long: 64; long: 64; long: 64; long: 64; }; struct listener { struct list_head list; pid_t pid; char valid; }; struct listener_list { struct rw_semaphore sem; struct list_head list; }; struct listeners { struct callback_head rcu; long unsigned int masks[0]; }; struct load_info { const char *name; struct module *mod; Elf64_Ehdr *hdr; long unsigned int len; Elf64_Shdr *sechdrs; char *secstrings; char *strtab; long unsigned int symoffs; long unsigned int stroffs; long unsigned int init_typeoffs; long unsigned int core_typeoffs; bool sig_ok; long unsigned int mod_kallsyms_init_off; struct { unsigned int sym; unsigned int str; unsigned int mod; unsigned int vers; unsigned int info; unsigned int pcpu; } index; }; struct location; struct loc_track { long unsigned int max; long unsigned int count; struct location *loc; loff_t idx; }; struct local_ports { u32 range; bool warned; }; struct location { depot_stack_handle_t handle; long unsigned int count; long unsigned int addr; long unsigned int waste; long long int sum_time; long int min_time; long int max_time; long int min_pid; long int max_pid; long unsigned int cpus[2]; nodemask_t nodes; }; struct lock_chain { unsigned int irq_context: 2; unsigned int depth: 6; unsigned int base: 24; struct hlist_node entry; u64 chain_key; }; typedef int (*lock_cmp_fn)(const struct lockdep_map *, const struct lockdep_map *); typedef void (*lock_print_fn)(const struct lockdep_map *); struct lock_trace; struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct list_head locks_after; struct list_head locks_before; const struct lockdep_subclass_key *key; lock_cmp_fn cmp_fn; lock_print_fn print_fn; unsigned int subclass; unsigned int dep_gen_id; long unsigned int usage_mask; const struct lock_trace *usage_traces[10]; const char *name; int name_version; u8 wait_type_inner; u8 wait_type_outer; u8 lock_type; }; struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; const struct lock_trace *trace; u16 distance; u8 dep; u8 only_xr; struct lock_list *parent; }; struct lock_manager_operations { void *lm_mod_owner; fl_owner_t (*lm_get_owner)(fl_owner_t); void (*lm_put_owner)(fl_owner_t); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_lock_expirable)(struct file_lock *); void (*lm_expire_lock)(void); }; struct lock_trace { struct hlist_node hash_entry; u32 hash; u32 nr_entries; long unsigned int entries[0]; }; struct locks_iterator { int li_cpu; loff_t li_pos; }; struct logic_pio_host_ops { u32 (*in)(void *, long unsigned int, size_t); void (*out)(void *, long unsigned int, u32, size_t); u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int); void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int); }; struct logic_pio_hwaddr { struct list_head list; struct fwnode_handle *fwnode; resource_size_t hw_start; resource_size_t io_start; resource_size_t size; long unsigned int flags; void *hostdata; const struct logic_pio_host_ops *ops; }; struct logo_data { int depth; int needs_directpalette; int needs_truepalette; int needs_cmapreset; const struct linux_logo *logo; }; struct lookup_args { int offset; const struct in6_addr *addr; }; struct loop_cmd { struct list_head list_entry; bool use_aio; atomic_t ref; long int ret; struct kiocb iocb; struct bio_vec *bvec; struct cgroup_subsys_state *blkcg_css; struct cgroup_subsys_state *memcg_css; }; struct loop_info64 { __u64 lo_device; __u64 lo_inode; __u64 lo_rdevice; __u64 lo_offset; __u64 lo_sizelimit; __u32 lo_number; __u32 lo_encrypt_type; __u32 lo_encrypt_key_size; __u32 lo_flags; __u8 lo_file_name[64]; __u8 lo_crypt_name[64]; __u8 lo_encrypt_key[32]; __u64 lo_init[2]; }; struct loop_config { __u32 fd; __u32 block_size; struct loop_info64 info; __u64 __reserved[8]; }; struct loop_device { int lo_number; loff_t lo_offset; loff_t lo_sizelimit; int lo_flags; char lo_file_name[64]; struct file *lo_backing_file; struct block_device *lo_device; gfp_t old_gfp_mask; spinlock_t lo_lock; int lo_state; spinlock_t lo_work_lock; struct workqueue_struct *workqueue; struct work_struct rootcg_work; struct list_head rootcg_cmd_list; struct list_head idle_worker_list; struct rb_root worker_tree; struct timer_list timer; bool use_dio; bool sysfs_inited; struct request_queue *lo_queue; struct blk_mq_tag_set tag_set; struct gendisk *lo_disk; struct mutex lo_mutex; bool idr_visible; }; struct loop_info { int lo_number; __kernel_old_dev_t lo_device; long unsigned int lo_inode; __kernel_old_dev_t lo_rdevice; int lo_offset; int lo_encrypt_type; int lo_encrypt_key_size; int lo_flags; char lo_name[64]; unsigned char lo_encrypt_key[32]; long unsigned int lo_init[2]; char reserved[4]; }; struct loop_worker { struct rb_node rb_node; struct work_struct work; struct list_head cmd_list; struct list_head idle_list; struct loop_device *lo; struct cgroup_subsys_state *blkcg_css; long unsigned int last_ran_at; }; struct loopback_dev { struct rc_dev *dev; u32 txmask; u32 txcarrier; u32 txduty; bool idle; bool wideband; bool carrierreport; u32 rxcarriermin; u32 rxcarriermax; }; union lower_chunk { union lower_chunk *next; long unsigned int data[256]; }; struct lpi_constraints { acpi_handle handle; int min_dstate; }; struct lpi_device_constraint { int uid; int min_dstate; int function_states; }; struct lpi_device_constraint_amd { char *name; int enabled; int function_states; int min_dstate; }; struct lpi_device_info { char *name; int enabled; union acpi_object *package; }; struct lpit_residency_info { struct acpi_generic_address gaddr; u64 frequency; void *iomem_addr; }; struct lpm_trie_node; struct lpm_trie { struct bpf_map map; struct lpm_trie_node *root; size_t n_entries; size_t max_prefixlen; size_t data_size; spinlock_t lock; }; struct lpm_trie_node { struct callback_head rcu; struct lpm_trie_node *child[2]; u32 prefixlen; u32 flags; u8 data[0]; }; struct lpss8250_board; struct lpss8250 { struct dw8250_port_data data; struct lpss8250_board *board; struct dw_dma_chip dma_chip; struct dw_dma_slave dma_param; u8 dma_maxburst; }; struct lpss8250_board { long unsigned int freq; unsigned int base_baud; int (*setup)(struct lpss8250 *, struct uart_port *); void (*exit)(struct lpss8250 *); }; struct zswap_lruvec_state {}; struct pglist_data; struct lruvec { struct list_head lists[5]; spinlock_t lru_lock; long unsigned int anon_cost; long unsigned int file_cost; atomic_long_t nonresident_age; long unsigned int refaults[2]; long unsigned int flags; struct pglist_data *pgdat; struct zswap_lruvec_state zswap_lruvec_state; }; struct lruvec_stats { long int state[31]; long int state_local[31]; long int state_pending[31]; }; struct lruvec_stats_percpu { long int state[31]; long int state_prev[31]; }; struct skcipher_alg_common { unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; unsigned int chunksize; unsigned int statesize; struct crypto_alg base; }; struct lskcipher_alg { int (*setkey)(struct crypto_lskcipher *, const u8 *, unsigned int); int (*encrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); int (*decrypt)(struct crypto_lskcipher *, const u8 *, u8 *, unsigned int, u8 *, u32); int (*init)(struct crypto_lskcipher *); void (*exit)(struct crypto_lskcipher *); struct skcipher_alg_common co; }; struct lskcipher_instance { void (*free)(struct lskcipher_instance *); union { struct { char head[64]; struct crypto_instance base; } s; struct lskcipher_alg alg; }; }; struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_ib; int lbs_inode; int lbs_sock; int lbs_superblock; int lbs_ipc; int lbs_key; int lbs_msg_msg; int lbs_perf_event; int lbs_task; int lbs_xattr_count; int lbs_tun_dev; int lbs_bdev; }; struct lsm_ctx { __u64 id; __u64 flags; __u64 len; __u64 ctx_len; __u8 ctx[0]; }; struct lsm_ibendport_audit { const char *dev_name; u8 port; }; struct lsm_ibpkey_audit { u64 subnet_prefix; u16 pkey; }; struct lsm_id { const char *name; u64 id; }; struct lsm_info { const char *name; enum lsm_order order; long unsigned int flags; int *enabled; int (*init)(void); struct lsm_blob_sizes *blobs; }; struct lsm_ioctlop_audit { struct path path; u16 cmd; }; struct lsm_network_audit { int netif; const struct sock *sk; u16 family; __be16 dport; __be16 sport; union { struct { __be32 daddr; __be32 saddr; } v4; struct { struct in6_addr daddr; struct in6_addr saddr; } v6; } fam; }; struct security_hook_list; struct static_key_false; struct lsm_static_call { struct static_call_key *key; void *trampoline; struct security_hook_list *hl; struct static_key_false *active; }; struct lsm_static_calls_table { struct lsm_static_call binder_set_context_mgr[4]; struct lsm_static_call binder_transaction[4]; struct lsm_static_call binder_transfer_binder[4]; struct lsm_static_call binder_transfer_file[4]; struct lsm_static_call ptrace_access_check[4]; struct lsm_static_call ptrace_traceme[4]; struct lsm_static_call capget[4]; struct lsm_static_call capset[4]; struct lsm_static_call capable[4]; struct lsm_static_call quotactl[4]; struct lsm_static_call quota_on[4]; struct lsm_static_call syslog[4]; struct lsm_static_call settime[4]; struct lsm_static_call vm_enough_memory[4]; struct lsm_static_call bprm_creds_for_exec[4]; struct lsm_static_call bprm_creds_from_file[4]; struct lsm_static_call bprm_check_security[4]; struct lsm_static_call bprm_committing_creds[4]; struct lsm_static_call bprm_committed_creds[4]; struct lsm_static_call fs_context_submount[4]; struct lsm_static_call fs_context_dup[4]; struct lsm_static_call fs_context_parse_param[4]; struct lsm_static_call sb_alloc_security[4]; struct lsm_static_call sb_delete[4]; struct lsm_static_call sb_free_security[4]; struct lsm_static_call sb_free_mnt_opts[4]; struct lsm_static_call sb_eat_lsm_opts[4]; struct lsm_static_call sb_mnt_opts_compat[4]; struct lsm_static_call sb_remount[4]; struct lsm_static_call sb_kern_mount[4]; struct lsm_static_call sb_show_options[4]; struct lsm_static_call sb_statfs[4]; struct lsm_static_call sb_mount[4]; struct lsm_static_call sb_umount[4]; struct lsm_static_call sb_pivotroot[4]; struct lsm_static_call sb_set_mnt_opts[4]; struct lsm_static_call sb_clone_mnt_opts[4]; struct lsm_static_call move_mount[4]; struct lsm_static_call dentry_init_security[4]; struct lsm_static_call dentry_create_files_as[4]; struct lsm_static_call path_unlink[4]; struct lsm_static_call path_mkdir[4]; struct lsm_static_call path_rmdir[4]; struct lsm_static_call path_mknod[4]; struct lsm_static_call path_post_mknod[4]; struct lsm_static_call path_truncate[4]; struct lsm_static_call path_symlink[4]; struct lsm_static_call path_link[4]; struct lsm_static_call path_rename[4]; struct lsm_static_call path_chmod[4]; struct lsm_static_call path_chown[4]; struct lsm_static_call path_chroot[4]; struct lsm_static_call path_notify[4]; struct lsm_static_call inode_alloc_security[4]; struct lsm_static_call inode_free_security[4]; struct lsm_static_call inode_free_security_rcu[4]; struct lsm_static_call inode_init_security[4]; struct lsm_static_call inode_init_security_anon[4]; struct lsm_static_call inode_create[4]; struct lsm_static_call inode_post_create_tmpfile[4]; struct lsm_static_call inode_link[4]; struct lsm_static_call inode_unlink[4]; struct lsm_static_call inode_symlink[4]; struct lsm_static_call inode_mkdir[4]; struct lsm_static_call inode_rmdir[4]; struct lsm_static_call inode_mknod[4]; struct lsm_static_call inode_rename[4]; struct lsm_static_call inode_readlink[4]; struct lsm_static_call inode_follow_link[4]; struct lsm_static_call inode_permission[4]; struct lsm_static_call inode_setattr[4]; struct lsm_static_call inode_post_setattr[4]; struct lsm_static_call inode_getattr[4]; struct lsm_static_call inode_xattr_skipcap[4]; struct lsm_static_call inode_setxattr[4]; struct lsm_static_call inode_post_setxattr[4]; struct lsm_static_call inode_getxattr[4]; struct lsm_static_call inode_listxattr[4]; struct lsm_static_call inode_removexattr[4]; struct lsm_static_call inode_post_removexattr[4]; struct lsm_static_call inode_set_acl[4]; struct lsm_static_call inode_post_set_acl[4]; struct lsm_static_call inode_get_acl[4]; struct lsm_static_call inode_remove_acl[4]; struct lsm_static_call inode_post_remove_acl[4]; struct lsm_static_call inode_need_killpriv[4]; struct lsm_static_call inode_killpriv[4]; struct lsm_static_call inode_getsecurity[4]; struct lsm_static_call inode_setsecurity[4]; struct lsm_static_call inode_listsecurity[4]; struct lsm_static_call inode_getsecid[4]; struct lsm_static_call inode_copy_up[4]; struct lsm_static_call inode_copy_up_xattr[4]; struct lsm_static_call inode_setintegrity[4]; struct lsm_static_call kernfs_init_security[4]; struct lsm_static_call file_permission[4]; struct lsm_static_call file_alloc_security[4]; struct lsm_static_call file_release[4]; struct lsm_static_call file_free_security[4]; struct lsm_static_call file_ioctl[4]; struct lsm_static_call file_ioctl_compat[4]; struct lsm_static_call mmap_addr[4]; struct lsm_static_call mmap_file[4]; struct lsm_static_call file_mprotect[4]; struct lsm_static_call file_lock[4]; struct lsm_static_call file_fcntl[4]; struct lsm_static_call file_set_fowner[4]; struct lsm_static_call file_send_sigiotask[4]; struct lsm_static_call file_receive[4]; struct lsm_static_call file_open[4]; struct lsm_static_call file_post_open[4]; struct lsm_static_call file_truncate[4]; struct lsm_static_call task_alloc[4]; struct lsm_static_call task_free[4]; struct lsm_static_call cred_alloc_blank[4]; struct lsm_static_call cred_free[4]; struct lsm_static_call cred_prepare[4]; struct lsm_static_call cred_transfer[4]; struct lsm_static_call cred_getsecid[4]; struct lsm_static_call kernel_act_as[4]; struct lsm_static_call kernel_create_files_as[4]; struct lsm_static_call kernel_module_request[4]; struct lsm_static_call kernel_load_data[4]; struct lsm_static_call kernel_post_load_data[4]; struct lsm_static_call kernel_read_file[4]; struct lsm_static_call kernel_post_read_file[4]; struct lsm_static_call task_fix_setuid[4]; struct lsm_static_call task_fix_setgid[4]; struct lsm_static_call task_fix_setgroups[4]; struct lsm_static_call task_setpgid[4]; struct lsm_static_call task_getpgid[4]; struct lsm_static_call task_getsid[4]; struct lsm_static_call current_getsecid_subj[4]; struct lsm_static_call task_getsecid_obj[4]; struct lsm_static_call task_setnice[4]; struct lsm_static_call task_setioprio[4]; struct lsm_static_call task_getioprio[4]; struct lsm_static_call task_prlimit[4]; struct lsm_static_call task_setrlimit[4]; struct lsm_static_call task_setscheduler[4]; struct lsm_static_call task_getscheduler[4]; struct lsm_static_call task_movememory[4]; struct lsm_static_call task_kill[4]; struct lsm_static_call task_prctl[4]; struct lsm_static_call task_to_inode[4]; struct lsm_static_call userns_create[4]; struct lsm_static_call ipc_permission[4]; struct lsm_static_call ipc_getsecid[4]; struct lsm_static_call msg_msg_alloc_security[4]; struct lsm_static_call msg_msg_free_security[4]; struct lsm_static_call msg_queue_alloc_security[4]; struct lsm_static_call msg_queue_free_security[4]; struct lsm_static_call msg_queue_associate[4]; struct lsm_static_call msg_queue_msgctl[4]; struct lsm_static_call msg_queue_msgsnd[4]; struct lsm_static_call msg_queue_msgrcv[4]; struct lsm_static_call shm_alloc_security[4]; struct lsm_static_call shm_free_security[4]; struct lsm_static_call shm_associate[4]; struct lsm_static_call shm_shmctl[4]; struct lsm_static_call shm_shmat[4]; struct lsm_static_call sem_alloc_security[4]; struct lsm_static_call sem_free_security[4]; struct lsm_static_call sem_associate[4]; struct lsm_static_call sem_semctl[4]; struct lsm_static_call sem_semop[4]; struct lsm_static_call netlink_send[4]; struct lsm_static_call d_instantiate[4]; struct lsm_static_call getselfattr[4]; struct lsm_static_call setselfattr[4]; struct lsm_static_call getprocattr[4]; struct lsm_static_call setprocattr[4]; struct lsm_static_call ismaclabel[4]; struct lsm_static_call secid_to_secctx[4]; struct lsm_static_call secctx_to_secid[4]; struct lsm_static_call release_secctx[4]; struct lsm_static_call inode_invalidate_secctx[4]; struct lsm_static_call inode_notifysecctx[4]; struct lsm_static_call inode_setsecctx[4]; struct lsm_static_call inode_getsecctx[4]; struct lsm_static_call unix_stream_connect[4]; struct lsm_static_call unix_may_send[4]; struct lsm_static_call socket_create[4]; struct lsm_static_call socket_post_create[4]; struct lsm_static_call socket_socketpair[4]; struct lsm_static_call socket_bind[4]; struct lsm_static_call socket_connect[4]; struct lsm_static_call socket_listen[4]; struct lsm_static_call socket_accept[4]; struct lsm_static_call socket_sendmsg[4]; struct lsm_static_call socket_recvmsg[4]; struct lsm_static_call socket_getsockname[4]; struct lsm_static_call socket_getpeername[4]; struct lsm_static_call socket_getsockopt[4]; struct lsm_static_call socket_setsockopt[4]; struct lsm_static_call socket_shutdown[4]; struct lsm_static_call socket_sock_rcv_skb[4]; struct lsm_static_call socket_getpeersec_stream[4]; struct lsm_static_call socket_getpeersec_dgram[4]; struct lsm_static_call sk_alloc_security[4]; struct lsm_static_call sk_free_security[4]; struct lsm_static_call sk_clone_security[4]; struct lsm_static_call sk_getsecid[4]; struct lsm_static_call sock_graft[4]; struct lsm_static_call inet_conn_request[4]; struct lsm_static_call inet_csk_clone[4]; struct lsm_static_call inet_conn_established[4]; struct lsm_static_call secmark_relabel_packet[4]; struct lsm_static_call secmark_refcount_inc[4]; struct lsm_static_call secmark_refcount_dec[4]; struct lsm_static_call req_classify_flow[4]; struct lsm_static_call tun_dev_alloc_security[4]; struct lsm_static_call tun_dev_create[4]; struct lsm_static_call tun_dev_attach_queue[4]; struct lsm_static_call tun_dev_attach[4]; struct lsm_static_call tun_dev_open[4]; struct lsm_static_call sctp_assoc_request[4]; struct lsm_static_call sctp_bind_connect[4]; struct lsm_static_call sctp_sk_clone[4]; struct lsm_static_call sctp_assoc_established[4]; struct lsm_static_call mptcp_add_subflow[4]; struct lsm_static_call key_alloc[4]; struct lsm_static_call key_permission[4]; struct lsm_static_call key_getsecurity[4]; struct lsm_static_call key_post_create_or_update[4]; struct lsm_static_call audit_rule_init[4]; struct lsm_static_call audit_rule_known[4]; struct lsm_static_call audit_rule_match[4]; struct lsm_static_call audit_rule_free[4]; struct lsm_static_call bpf[4]; struct lsm_static_call bpf_map[4]; struct lsm_static_call bpf_prog[4]; struct lsm_static_call bpf_map_create[4]; struct lsm_static_call bpf_map_free[4]; struct lsm_static_call bpf_prog_load[4]; struct lsm_static_call bpf_prog_free[4]; struct lsm_static_call bpf_token_create[4]; struct lsm_static_call bpf_token_free[4]; struct lsm_static_call bpf_token_cmd[4]; struct lsm_static_call bpf_token_capable[4]; struct lsm_static_call locked_down[4]; struct lsm_static_call perf_event_open[4]; struct lsm_static_call perf_event_alloc[4]; struct lsm_static_call perf_event_read[4]; struct lsm_static_call perf_event_write[4]; struct lsm_static_call uring_override_creds[4]; struct lsm_static_call uring_sqpoll[4]; struct lsm_static_call uring_cmd[4]; struct lsm_static_call initramfs_populated[4]; struct lsm_static_call bdev_alloc_security[4]; struct lsm_static_call bdev_free_security[4]; struct lsm_static_call bdev_setintegrity[4]; }; struct lwq { spinlock_t lock; struct llist_node *ready; struct llist_head new; }; struct lwtunnel_encap_ops { int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *); void (*destroy_state)(struct lwtunnel_state *); int (*output)(struct net *, struct sock *, struct sk_buff *); int (*input)(struct sk_buff *); int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); int (*get_encap_size)(struct lwtunnel_state *); int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); int (*xmit)(struct sk_buff *); struct module *owner; }; struct lwtunnel_state { __u16 type; __u16 flags; __u16 headroom; atomic_t refcnt; int (*orig_output)(struct net *, struct sock *, struct sk_buff *); int (*orig_input)(struct sk_buff *); struct callback_head rcu; __u8 data[0]; }; struct lzma2_dec { enum lzma2_seq sequence; enum lzma2_seq next_sequence; uint32_t uncompressed; uint32_t compressed; bool need_dict_reset; bool need_props; }; struct lzma_len_dec { uint16_t choice; uint16_t choice2; uint16_t low[128]; uint16_t mid[128]; uint16_t high[256]; }; struct lzma_dec { uint32_t rep0; uint32_t rep1; uint32_t rep2; uint32_t rep3; enum lzma_state state; uint32_t len; uint32_t lc; uint32_t literal_pos_mask; uint32_t pos_mask; uint16_t is_match[192]; uint16_t is_rep[12]; uint16_t is_rep0[12]; uint16_t is_rep1[12]; uint16_t is_rep2[12]; uint16_t is_rep0_long[192]; uint16_t dist_slot[256]; uint16_t dist_special[114]; uint16_t dist_align[16]; struct lzma_len_dec match_len_dec; struct lzma_len_dec rep_len_dec; uint16_t literal[12288]; }; struct lzma_header { uint8_t pos; uint32_t dict_size; uint64_t dst_size; } __attribute__((packed)); struct ma_topiary { struct maple_enode *head; struct maple_enode *tail; struct maple_tree *mtree; }; struct maple_node; struct ma_wr_state { struct ma_state *mas; struct maple_node *node; long unsigned int r_min; long unsigned int r_max; enum maple_type type; unsigned char offset_end; long unsigned int *pivots; long unsigned int end_piv; void **slots; void *entry; void *content; }; struct mac_driver_desc { __be16 signature; __be16 block_size; __be32 block_count; }; struct mac_partition { __be16 signature; __be16 res1; __be32 map_count; __be32 start_block; __be32 block_count; char name[32]; char type[32]; __be32 data_start; __be32 data_count; __be32 status; __be32 boot_start; __be32 boot_size; __be32 boot_load; __be32 boot_load2; __be32 boot_entry; __be32 boot_entry2; __be32 boot_cksum; char processor[16]; }; struct machine_ops { void (*restart)(char *); void (*halt)(void); void (*power_off)(void); void (*shutdown)(void); void (*crash_shutdown)(struct pt_regs *); void (*emergency_restart)(void); }; struct macsec_info { sci_t sci; }; struct mmu_gather; struct madvise_walk_private { struct mmu_gather *tlb; bool pageout; }; struct mafield { const char *prefix; int field; }; struct map_attribute { struct attribute attr; ssize_t (*show)(struct efi_runtime_map_entry *, char *); }; struct map_files_info { long unsigned int start; long unsigned int end; fmode_t mode; }; struct map_info { struct map_info *next; struct mm_struct *mm; long unsigned int vaddr; }; struct map_iter { void *key; bool done; }; struct map_range { long unsigned int start; long unsigned int end; unsigned int page_size_mask; }; struct maple_alloc { long unsigned int total; unsigned char node_count; unsigned int request_count; struct maple_alloc *slot[30]; }; struct maple_pnode; struct maple_metadata { unsigned char end; unsigned char gap; }; struct maple_arange_64 { struct maple_pnode *parent; long unsigned int pivot[9]; void *slot[10]; long unsigned int gap[10]; struct maple_metadata meta; }; struct maple_big_node { struct maple_pnode *parent; long unsigned int pivot[33]; union { struct maple_enode *slot[34]; struct { long unsigned int padding[21]; long unsigned int gap[21]; }; }; unsigned char b_end; enum maple_type type; }; struct maple_range_64 { struct maple_pnode *parent; long unsigned int pivot[15]; union { void *slot[16]; struct { void *pad[15]; struct maple_metadata meta; }; }; }; struct maple_node { union { struct { struct maple_pnode *parent; void *slot[31]; }; struct { void *pad; struct callback_head rcu; struct maple_enode *piv_parent; unsigned char parent_slot; enum maple_type type; unsigned char slot_len; unsigned int ma_flags; }; struct maple_range_64 mr64; struct maple_arange_64 ma64; struct maple_alloc alloc; }; }; struct maple_subtree_state { struct ma_state *orig_l; struct ma_state *orig_r; struct ma_state *l; struct ma_state *m; struct ma_state *r; struct ma_topiary *free; struct ma_topiary *destroy; struct maple_big_node *bn; }; struct maple_topiary { struct maple_pnode *parent; struct maple_enode *next; }; struct match_token { int token; const char *pattern; }; struct math_emu_info { long int ___orig_eip; struct pt_regs *regs; }; struct mb_cache { struct hlist_bl_head *c_hash; int c_bucket_bits; long unsigned int c_max_entries; spinlock_t c_list_lock; struct list_head c_list; long unsigned int c_entry_count; struct shrinker *c_shrink; struct work_struct c_shrink_work; }; struct mb_cache_entry { struct list_head e_list; struct hlist_bl_node e_hash_list; atomic_t e_refcnt; u32 e_key; long unsigned int e_flags; u64 e_value; }; struct mbox_controller; struct mbox_client; struct mbox_chan { struct mbox_controller *mbox; unsigned int txdone_method; struct mbox_client *cl; struct completion tx_complete; void *active_req; unsigned int msg_count; unsigned int msg_free; void *msg_data[20]; spinlock_t lock; void *con_priv; }; struct mbox_chan_ops { int (*send_data)(struct mbox_chan *, void *); int (*flush)(struct mbox_chan *, long unsigned int); int (*startup)(struct mbox_chan *); void (*shutdown)(struct mbox_chan *); bool (*last_tx_done)(struct mbox_chan *); bool (*peek_data)(struct mbox_chan *); }; struct mbox_client { struct device *dev; bool tx_block; long unsigned int tx_tout; bool knows_txdone; void (*rx_callback)(struct mbox_client *, void *); void (*tx_prepare)(struct mbox_client *, void *); void (*tx_done)(struct mbox_client *, void *, int); }; struct mbox_controller { struct device *dev; const struct mbox_chan_ops *ops; struct mbox_chan *chans; int num_chans; bool txdone_irq; bool txdone_poll; unsigned int txpoll_period; struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); struct hrtimer poll_hrt; spinlock_t poll_hrt_lock; struct list_head node; }; struct rtc_time; struct mc146818_get_time_callback_param { struct rtc_time *time; unsigned char ctrl; unsigned char century; }; struct mca_config { __u64 lmce_disabled: 1; __u64 disabled: 1; __u64 ser: 1; __u64 recovery: 1; __u64 bios_cmci_threshold: 1; __u64 initialized: 1; __u64 __reserved: 58; bool dont_log_ce; bool cmci_disabled; bool ignore_ce; bool print_all; int monarch_timeout; int panic_timeout; u32 rip_msr; s8 bootlog; }; struct storm_bank { u64 history; u64 timestamp; bool in_storm_mode; bool poll_only; }; struct mca_storm_desc { struct storm_bank banks[64]; u8 stormy_bank_count; bool poll_mode; }; struct mce { __u64 status; __u64 misc; __u64 addr; __u64 mcgstatus; __u64 ip; __u64 tsc; __u64 time; __u8 cpuvendor; __u8 inject_flags; __u8 severity; __u8 pad; __u32 cpuid; __u8 cs; __u8 bank; __u8 cpu; __u8 finished; __u32 extcpu; __u32 socketid; __u32 apicid; __u64 mcgcap; __u64 synd; __u64 ipid; __u64 ppin; __u32 microcode; __u64 kflags; }; struct mce_bank { u64 ctl; __u64 init: 1; __u64 lsb_in_status: 1; __u64 __reserved_1: 62; }; struct mce_bank_dev { struct device_attribute attr; char attrname[16]; u8 bank; }; struct mce_evt_llist { struct llist_node llnode; struct mce mce; }; struct mce_vendor_flags { __u64 overflow_recov: 1; __u64 succor: 1; __u64 smca: 1; __u64 zen_ifu_quirk: 1; __u64 amd_threshold: 1; __u64 p5: 1; __u64 winchip: 1; __u64 snb_ifu_quirk: 1; __u64 skx_repmov_quirk: 1; __u64 __reserved_0: 55; }; struct mcs_spinlock { struct mcs_spinlock *next; int locked; int count; }; struct md5_state { u32 hash[4]; u32 block[16]; u64 byte_count; }; struct mdio_bus_stats { u64_stats_t transfers; u64_stats_t errors; u64_stats_t writes; u64_stats_t reads; struct u64_stats_sync syncp; }; struct reset_control; struct mii_bus; struct mdio_device { struct device dev; struct mii_bus *bus; char modalias[32]; int (*bus_match)(struct device *, const struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; int reset_state; struct gpio_desc *reset_gpio; struct reset_control *reset_ctrl; unsigned int reset_assert_delay; unsigned int reset_deassert_delay; }; struct mdio_driver_common { struct device_driver driver; int flags; }; struct mei_bus_message { u8 hbm_cmd; u8 data[0]; }; struct mei_fw_status { int count; u32 status[6]; }; struct mei_cfg { const struct mei_fw_status fw_status; bool (*quirk_probe)(const struct pci_dev *); const char *kind; size_t dma_size[3]; u32 fw_ver_supported: 1; u32 hw_trc_supported: 1; }; struct mei_dma_data { u8 buffer_id; void *vaddr; dma_addr_t daddr; size_t size; }; struct mei_device; struct mei_me_client; struct mei_cl_device; struct mei_cl { struct list_head link; struct mei_device *dev; enum file_state state; wait_queue_head_t tx_wait; wait_queue_head_t rx_wait; wait_queue_head_t wait; wait_queue_head_t ev_wait; struct fasync_struct *ev_async; int status; struct mei_me_client *me_cl; const struct file *fp; u8 host_client_id; struct list_head vtag_map; u8 tx_flow_ctrl_creds; u8 rx_flow_ctrl_creds; u8 timer_count; u8 notify_en; u8 notify_ev; u8 tx_cb_queued; enum mei_file_transaction_states writing_state; struct list_head rd_pending; spinlock_t rd_completed_lock; struct list_head rd_completed; struct mei_dma_data dma; u8 dma_mapped; struct mei_cl_device *cldev; }; struct mei_msg_data { size_t size; unsigned char *data; }; struct mei_ext_hdr; struct mei_cl_cb { struct list_head list; struct mei_cl *cl; enum mei_cb_file_ops fop_type; struct mei_msg_data buf; size_t buf_idx; u8 vtag; const struct file *fp; int status; u32 internal: 1; u32 blocking: 1; struct mei_ext_hdr *ext_hdr; }; typedef void (*mei_cldev_cb_t)(struct mei_cl_device *); struct mei_cl_device { struct list_head bus_list; struct mei_device *bus; struct device dev; struct mei_me_client *me_cl; struct mei_cl *cl; char name[32]; struct work_struct rx_work; mei_cldev_cb_t rx_cb; struct work_struct notif_work; mei_cldev_cb_t notif_cb; unsigned int do_match: 1; unsigned int is_added: 1; void *priv_data; }; struct mei_cl_device_id { char name[32]; uuid_le uuid; __u8 version; kernel_ulong_t driver_info; }; struct mei_cl_driver { struct device_driver driver; const char *name; const struct mei_cl_device_id *id_table; int (*probe)(struct mei_cl_device *, const struct mei_cl_device_id *); void (*remove)(struct mei_cl_device *); }; struct mei_cl_vtag { struct list_head list; const struct file *fp; u8 vtag; u8 pending_read: 1; }; struct mei_client { __u32 max_msg_length; __u8 protocol_version; __u8 reserved[3]; }; struct mei_connect_client_data { union { uuid_le in_client_uuid; struct mei_client out_client_properties; }; }; struct mei_connect_client_vtag { uuid_le in_client_uuid; __u8 vtag; __u8 reserved[3]; }; struct mei_connect_client_data_vtag { union { struct mei_connect_client_vtag connect; struct mei_client out_client_properties; }; }; struct mei_dev_timeouts { long unsigned int hw_ready; int connect; long unsigned int cl_connect; int client_init; long unsigned int pgi; unsigned int d0i3; long unsigned int hbm; long unsigned int mkhi_recv; }; struct mei_dma_dscr { void *vaddr; dma_addr_t daddr; size_t size; }; struct mei_fw_version { u8 platform; u8 major; u16 minor; u16 buildno; u16 hotfix; }; struct mei_hw_ops; struct mei_device { struct device *dev; struct cdev cdev; int minor; struct list_head write_list; struct list_head write_waiting_list; struct list_head ctrl_wr_list; struct list_head ctrl_rd_list; u8 tx_queue_limit; struct list_head file_list; long int open_handle_count; struct mutex device_lock; struct delayed_work timer_work; bool recvd_hw_ready; wait_queue_head_t wait_hw_ready; wait_queue_head_t wait_pg; wait_queue_head_t wait_hbm_start; long unsigned int reset_count; enum mei_dev_state dev_state; enum mei_hbm_state hbm_state; enum mei_dev_pxp_mode pxp_mode; u16 init_clients_timer; enum mei_pg_event pg_event; struct dev_pm_domain pg_domain; unsigned char rd_msg_buf[512]; u32 rd_msg_hdr[512]; int rd_msg_hdr_count; bool hbuf_is_ready; struct mei_dma_dscr dr_dscr[3]; struct hbm_version version; unsigned int hbm_f_pg_supported: 1; unsigned int hbm_f_dc_supported: 1; unsigned int hbm_f_dot_supported: 1; unsigned int hbm_f_ev_supported: 1; unsigned int hbm_f_fa_supported: 1; unsigned int hbm_f_ie_supported: 1; unsigned int hbm_f_os_supported: 1; unsigned int hbm_f_dr_supported: 1; unsigned int hbm_f_vt_supported: 1; unsigned int hbm_f_cap_supported: 1; unsigned int hbm_f_cd_supported: 1; unsigned int hbm_f_gsc_supported: 1; struct mei_fw_version fw_ver[3]; unsigned int fw_f_fw_ver_supported: 1; unsigned int fw_ver_received: 1; struct rw_semaphore me_clients_rwsem; struct list_head me_clients; long unsigned int me_clients_map[4]; long unsigned int host_clients_map[4]; bool allow_fixed_address; bool override_fixed_address; struct mei_dev_timeouts timeouts; struct work_struct reset_work; struct work_struct bus_rescan_work; struct list_head device_list; struct mutex cl_bus_lock; const char *kind; struct dentry *dbgfs_dir; struct mei_fw_status saved_fw_status; enum mei_dev_state saved_dev_state; bool saved_fw_status_flag; enum mei_dev_reset_to_pxp gsc_reset_to_pxp; const struct mei_hw_ops *ops; char hw[0]; }; struct mei_ext_hdr { u8 type; u8 length; }; struct mei_ext_hdr_gsc_f2h { struct mei_ext_hdr hdr; u8 client_id; u8 reserved; u32 fence_id; u32 written; }; struct mei_gsc_sgl { u32 low; u32 high; u32 length; }; struct mei_ext_hdr_gsc_h2f { struct mei_ext_hdr hdr; u8 client_id; u8 addr_type; u32 fence_id; u8 input_address_count; u8 output_address_count; u8 reserved[2]; struct mei_gsc_sgl sgl[0]; }; struct mei_ext_hdr_vtag { struct mei_ext_hdr hdr; u8 vtag; u8 reserved; }; struct mei_ext_meta_hdr { u8 count; u8 size; u8 reserved[2]; u8 hdrs[0]; }; struct mei_fixup { const uuid_le uuid; void (*hook)(struct mei_cl_device *); }; struct mei_hbm_cl_cmd { u8 hbm_cmd; u8 me_addr; u8 host_addr; u8 data; }; struct mei_hw_ops { bool (*host_is_ready)(struct mei_device *); bool (*hw_is_ready)(struct mei_device *); int (*hw_reset)(struct mei_device *, bool); int (*hw_start)(struct mei_device *); int (*hw_config)(struct mei_device *); int (*fw_status)(struct mei_device *, struct mei_fw_status *); int (*trc_status)(struct mei_device *, u32 *); enum mei_pg_state (*pg_state)(struct mei_device *); bool (*pg_in_transition)(struct mei_device *); bool (*pg_is_enabled)(struct mei_device *); void (*intr_clear)(struct mei_device *); void (*intr_enable)(struct mei_device *); void (*intr_disable)(struct mei_device *); void (*synchronize_irq)(struct mei_device *); int (*hbuf_free_slots)(struct mei_device *); bool (*hbuf_is_ready)(struct mei_device *); u32 (*hbuf_depth)(const struct mei_device *); int (*write)(struct mei_device *, const void *, size_t, const void *, size_t); int (*rdbuf_full_slots)(struct mei_device *); u32 (*read_hdr)(const struct mei_device *); int (*read)(struct mei_device *, unsigned char *, long unsigned int); }; struct mei_me_client { struct list_head list; struct kref refcnt; struct mei_client_properties props; u8 client_id; u8 tx_flow_ctrl_creds; u8 connect_count; u8 bus_added; }; struct mei_me_hw { const struct mei_cfg *cfg; void *mem_addr; int irq; enum mei_pg_state pg_state; bool d0i3_supported; u8 hbuf_depth; int (*read_fws)(const struct mei_device *, int, u32 *); struct task_struct *polling_thread; wait_queue_head_t wait_active; bool is_active; }; struct mei_msg_hdr { u32 me_addr: 8; u32 host_addr: 8; u32 length: 9; u32 reserved: 3; u32 extended: 1; u32 dma_ring: 1; u32 internal: 1; u32 msg_complete: 1; u32 extension[0]; }; struct mei_nfc_cmd { u8 command; u8 status; u16 req_id; u32 reserved; u16 data_size; u8 sub_command; u8 data[0]; } __attribute__((packed)); struct mei_nfc_if_version { u8 radio_version_sw[3]; u8 reserved[3]; u8 radio_version_hw[3]; u8 i2c_addr; u8 fw_ivn; u8 vendor_id; u8 radio_type; }; struct mei_nfc_reply { u8 command; u8 status; u16 req_id; u32 reserved; u16 data_size; u8 sub_command; u8 reply_status; u8 data[0]; }; struct mei_os_ver { __le16 build; __le16 reserved1; u8 os_type; u8 major; u8 minor; u8 reserved2; }; struct mem_cgroup_id { int id; refcount_t ref; }; struct vmpressure { long unsigned int scanned; long unsigned int reclaimed; long unsigned int tree_scanned; long unsigned int tree_reclaimed; spinlock_t sr_lock; struct list_head events; struct mutex events_lock; struct work_struct work; }; struct wb_domain { spinlock_t lock; struct fprop_global completions; struct timer_list period_timer; long unsigned int period_time; long unsigned int dirty_limit_tstamp; long unsigned int dirty_limit; }; struct wb_completion { atomic_t cnt; wait_queue_head_t *waitq; }; struct memcg_cgwb_frn { u64 bdi_id; int memcg_id; u64 at; struct wb_completion done; }; struct memcg_vmstats; struct memcg_vmstats_percpu; struct mem_cgroup_per_node; struct mem_cgroup { struct cgroup_subsys_state css; struct mem_cgroup_id id; long: 64; long: 64; long: 64; struct page_counter memory; union { struct page_counter swap; struct page_counter memsw; }; struct list_head memory_peaks; struct list_head swap_peaks; spinlock_t peaks_lock; struct work_struct high_work; struct vmpressure vmpressure; bool oom_group; int swappiness; struct cgroup_file events_file; struct cgroup_file events_local_file; struct cgroup_file swap_events_file; struct memcg_vmstats *vmstats; atomic_long_t memory_events[9]; atomic_long_t memory_events_local[9]; long unsigned int socket_pressure; int kmemcg_id; struct obj_cgroup *objcg; struct obj_cgroup *orig_objcg; struct list_head objcg_list; struct memcg_vmstats_percpu *vmstats_percpu; struct list_head cgwb_list; struct wb_domain cgwb_domain; struct memcg_cgwb_frn cgwb_frn[4]; struct deferred_split deferred_split_queue; struct mem_cgroup_per_node *nodeinfo[0]; long: 64; long: 64; }; struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; atomic_t generation; }; struct shrinker_info; struct mem_cgroup_per_node { struct mem_cgroup *memcg; struct lruvec_stats_percpu *lruvec_stats_percpu; struct lruvec_stats *lruvec_stats; struct shrinker_info *shrinker_info; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad1_; struct lruvec lruvec; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad2_; long unsigned int lru_zone_size[20]; struct mem_cgroup_reclaim_iter iter; long: 64; long: 64; }; typedef struct pglist_data pg_data_t; struct mem_cgroup_reclaim_cookie { pg_data_t *pgdat; int generation; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; long unsigned int dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; }; struct mem_section_usage; struct mem_section { long unsigned int section_mem_map; struct mem_section_usage *usage; }; struct mem_section_usage { struct callback_head rcu; long unsigned int subsection_map[1]; long unsigned int pageblock_flags[0]; }; struct mem_size_stats { long unsigned int resident; long unsigned int shared_clean; long unsigned int shared_dirty; long unsigned int private_clean; long unsigned int private_dirty; long unsigned int referenced; long unsigned int anonymous; long unsigned int lazyfree; long unsigned int anonymous_thp; long unsigned int shmem_thp; long unsigned int file_thp; long unsigned int swap; long unsigned int shared_hugetlb; long unsigned int private_hugetlb; long unsigned int ksm; u64 pss; u64 pss_anon; u64 pss_file; u64 pss_shmem; u64 pss_dirty; u64 pss_locked; u64 swap_pss; }; struct memblock_region; struct memblock_type { long unsigned int cnt; long unsigned int max; phys_addr_t total_size; struct memblock_region *regions; char *name; }; struct memblock { bool bottom_up; phys_addr_t current_limit; struct memblock_type memory; struct memblock_type reserved; }; struct memblock_region { phys_addr_t base; phys_addr_t size; enum memblock_flags flags; int nid; }; struct membuf { void *p; size_t left; }; struct memcg_stock_pcp { local_lock_t stock_lock; struct mem_cgroup *cached; unsigned int nr_pages; struct obj_cgroup *cached_objcg; struct pglist_data *cached_pgdat; unsigned int nr_bytes; int nr_slab_reclaimable_b; int nr_slab_unreclaimable_b; struct work_struct work; long unsigned int flags; }; struct memcg_vmstats { long int state[38]; long unsigned int events[20]; long int state_local[38]; long unsigned int events_local[20]; long int state_pending[38]; long unsigned int events_pending[20]; atomic64_t stats_updates; }; struct memcg_vmstats_percpu { unsigned int stats_updates; struct memcg_vmstats_percpu *parent; struct memcg_vmstats *vmstats; long int state[38]; long unsigned int events[20]; long int state_prev[38]; long unsigned int events_prev[20]; long: 64; }; struct memdev { const char *name; const struct file_operations *fops; fmode_t fmode; umode_t mode; }; struct memmap_attribute { struct attribute attr; ssize_t (*show)(struct firmware_map_entry *, char *); }; struct memory_dev_type { struct list_head tier_sibling; struct list_head list; int adistance; nodemask_t nodes; struct kref kref; }; struct memory_failure_entry { long unsigned int pfn; int flags; }; struct memory_failure_cpu { struct { union { struct __kfifo kfifo; struct memory_failure_entry *type; const struct memory_failure_entry *const_type; char (*rectype)[0]; struct memory_failure_entry *ptr; const struct memory_failure_entry *ptr_const; }; struct memory_failure_entry buf[16]; } fifo; raw_spinlock_t lock; struct work_struct work; }; struct memory_failure_stats { long unsigned int total; long unsigned int ignored; long unsigned int failed; long unsigned int delayed; long unsigned int recovered; }; struct memory_notify { long unsigned int altmap_start_pfn; long unsigned int altmap_nr_pages; long unsigned int start_pfn; long unsigned int nr_pages; int status_change_nid_normal; int status_change_nid; }; struct memory_stat { const char *name; unsigned int idx; }; struct memory_tier { struct list_head list; struct list_head memory_types; int adistance_start; struct device dev; nodemask_t lower_tier_mask; }; struct mempolicy { atomic_t refcnt; short unsigned int mode; short unsigned int flags; nodemask_t nodes; int home_node; union { nodemask_t cpuset_mems_allowed; nodemask_t user_nodemask; } w; }; struct mempolicy_operations { int (*create)(struct mempolicy *, const nodemask_t *); void (*rebind)(struct mempolicy *, const nodemask_t *); }; struct memtype { u64 start; u64 end; u64 subtree_max_end; enum page_cache_mode type; struct rb_node rb; }; struct menu_device { int needs_update; int tick_wakeup; u64 next_timer_ns; unsigned int bucket; unsigned int correction_factor[12]; unsigned int intervals[8]; int interval_ptr; }; struct xfrm_md_info { u32 if_id; int link; struct dst_entry *dst_orig; }; struct metadata_dst { struct dst_entry dst; enum metadata_type type; union { struct ip_tunnel_info tun_info; struct hw_port_info port_info; struct macsec_info macsec_info; struct xfrm_md_info xfrm_info; } u; }; struct mr_mfc { struct rhlist_head mnode; short unsigned int mfc_parent; int mfc_flags; union { struct { long unsigned int expires; struct sk_buff_head unresolved; } unres; struct { long unsigned int last_assert; int minvif; int maxvif; long unsigned int bytes; long unsigned int pkt; long unsigned int wrong_if; long unsigned int lastuse; unsigned char ttls[32]; refcount_t refcount; } res; } mfc_un; struct list_head list; struct callback_head rcu; void (*free)(struct callback_head *); }; struct mfc_cache_cmp_arg { __be32 mfc_mcastgrp; __be32 mfc_origin; }; struct mfc_cache { struct mr_mfc _c; union { struct { __be32 mfc_mcastgrp; __be32 mfc_origin; }; struct mfc_cache_cmp_arg cmparg; }; }; struct mfc_entry_notifier_info { struct fib_notifier_info info; struct mr_mfc *mfc; u32 tb_id; }; struct mfcctl { struct in_addr mfcc_origin; struct in_addr mfcc_mcastgrp; vifi_t mfcc_parent; unsigned char mfcc_ttls[32]; unsigned int mfcc_pkt_cnt; unsigned int mfcc_byte_cnt; unsigned int mfcc_wrong_if; int mfcc_expire; }; struct microcode_header_amd { u32 data_code; u32 patch_id; u16 mc_patch_data_id; u8 mc_patch_data_len; u8 init_flag; u32 mc_patch_data_checksum; u32 nb_dev_id; u32 sb_dev_id; u16 processor_rev_id; u8 nb_rev_id; u8 sb_rev_id; u8 bios_api_rev; u8 reserved1[3]; u32 match_reg[8]; }; struct microcode_amd { struct microcode_header_amd hdr; unsigned int mpb[0]; }; struct microcode_header_intel { unsigned int hdrver; unsigned int rev; unsigned int date; unsigned int sig; unsigned int cksum; unsigned int ldrver; unsigned int pf; unsigned int datasize; unsigned int totalsize; unsigned int metasize; unsigned int min_req_ver; unsigned int reserved; }; struct microcode_intel { struct microcode_header_intel hdr; unsigned int bits[0]; }; struct microcode_ops { enum ucode_state (*request_microcode_fw)(int, struct device *); void (*microcode_fini_cpu)(int); enum ucode_state (*apply_microcode)(int); int (*collect_cpu_info)(int, struct cpu_signature *); void (*finalize_late_load)(int); unsigned int nmi_safe: 1; unsigned int use_nmi: 1; }; struct mid8250_board; struct mid8250 { int line; int dma_index; struct pci_dev *dma_dev; struct uart_8250_dma dma; struct mid8250_board *board; struct hsu_dma_chip dma_chip; }; struct mid8250_board { long unsigned int freq; unsigned int base_baud; unsigned int bar; int (*setup)(struct mid8250 *, struct uart_port *); void (*exit)(struct mid8250 *); }; struct migrate_pages_stats { int nr_succeeded; int nr_failed_pages; int nr_thp_succeeded; int nr_thp_failed; int nr_thp_split; int nr_split; }; struct migrate_struct { ext4_lblk_t first_block; ext4_lblk_t last_block; ext4_lblk_t curr_block; ext4_fsblk_t first_pblock; ext4_fsblk_t last_pblock; }; struct set_affinity_pending; struct migration_arg { struct task_struct *task; int dest_cpu; struct set_affinity_pending *pending; }; struct migration_mpol { struct mempolicy *pol; long unsigned int ilx; }; struct migration_swap_arg { struct task_struct *src_task; struct task_struct *dst_task; int src_cpu; int dst_cpu; }; struct migration_target_control { int nid; nodemask_t *nmask; gfp_t gfp_mask; enum migrate_reason reason; }; struct phy_package_shared; struct mii_bus { struct module *owner; const char *name; char id[61]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16); int (*read_c45)(struct mii_bus *, int, int, int); int (*write_c45)(struct mii_bus *, int, int, int, u16); int (*reset)(struct mii_bus *); struct mdio_bus_stats stats[32]; struct mutex mdio_lock; struct device *parent; enum { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4, } state; struct device dev; struct mdio_device *mdio_map[32]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32]; int reset_delay_us; int reset_post_delay_us; struct gpio_desc *reset_gpiod; struct mutex shared_lock; struct phy_package_shared *shared[32]; }; struct mii_ioctl_data { __u16 phy_id; __u16 reg_num; __u16 val_in; __u16 val_out; }; struct mii_timestamper { bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int); void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int); int (*hwtstamp)(struct mii_timestamper *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); void (*link_state)(struct mii_timestamper *, struct phy_device *); int (*ts_info)(struct mii_timestamper *, struct kernel_ethtool_ts_info *); struct device *device; }; struct min_heap_callbacks { bool (*less)(const void *, const void *, void *); void (*swp)(void *, void *, void *); }; struct min_heap_char { int nr; int size; char *data; char preallocated[0]; }; typedef struct min_heap_char min_heap_char; struct min_max_quirk { const char * const *pnp_ids; struct { u32 min; u32 max; } board_id; u32 x_min; u32 x_max; u32 y_min; u32 y_max; }; struct minix_super_block { __u16 s_ninodes; __u16 s_nzones; __u16 s_imap_blocks; __u16 s_zmap_blocks; __u16 s_firstdatazone; __u16 s_log_zone_size; __u32 s_max_size; __u16 s_magic; __u16 s_state; __u32 s_zones; }; struct mip6_report_rate_limiter { spinlock_t lock; ktime_t stamp; int iif; struct in6_addr src; struct in6_addr dst; }; struct miscdevice { int minor; const char *name; const struct file_operations *fops; struct list_head list; struct device *parent; struct device *this_device; const struct attribute_group **groups; const char *nodename; umode_t mode; }; struct mkhi_fw_ver_block { u16 minor; u8 major; u8 platform; u16 buildno; u16 hotfix; }; struct mkhi_fw_ver { struct mkhi_fw_ver_block ver[3]; }; struct mkhi_rule_id { __le16 rule_type; u8 feature_id; u8 reserved; }; struct mkhi_fwcaps { struct mkhi_rule_id id; u8 len; u8 data[0]; } __attribute__((packed)); struct mkhi_msg_hdr { u8 group_id; u8 command; u8 reserved; u8 result; }; struct mkhi_gfx_mem_ready { struct mkhi_msg_hdr hdr; u32 flags; }; struct mkhi_msg { struct mkhi_msg_hdr hdr; u8 data[0]; }; struct ml_effect_state { struct ff_effect *effect; long unsigned int flags; int count; long unsigned int play_at; long unsigned int stop_at; long unsigned int adj_at; }; struct ml_device { void *private; struct ml_effect_state states[16]; int gain; struct timer_list timer; struct input_dev *dev; int (*play_effect)(struct input_dev *, void *, struct ff_effect *); }; struct mld2_grec { __u8 grec_type; __u8 grec_auxwords; __be16 grec_nsrcs; struct in6_addr grec_mca; struct in6_addr grec_src[0]; }; struct mld2_query { struct icmp6hdr mld2q_hdr; struct in6_addr mld2q_mca; __u8 mld2q_qrv: 3; __u8 mld2q_suppress: 1; __u8 mld2q_resv2: 4; __u8 mld2q_qqic; __be16 mld2q_nsrcs; struct in6_addr mld2q_srcs[0]; }; struct mld2_report { struct icmp6hdr mld2r_hdr; struct mld2_grec mld2r_grec[0]; }; struct mld_msg { struct icmp6hdr mld_hdr; struct in6_addr mld_mca; }; struct mlock_fbatch { local_lock_t lock; struct folio_batch fbatch; }; struct mm_cid { u64 time; int cid; }; struct mm_reply_data { struct ethnl_reply_data base; struct ethtool_mm_state state; struct ethtool_mm_stats stats; }; struct xol_area; struct uprobes_state { struct xol_area *xol_area; }; struct mm_struct { struct { struct { atomic_t mm_count; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct maple_tree mm_mt; long unsigned int mmap_base; long unsigned int mmap_legacy_base; long unsigned int task_size; pgd_t *pgd; atomic_t membarrier_state; atomic_t mm_users; struct mm_cid *pcpu_cid; long unsigned int mm_cid_next_scan; atomic_long_t pgtables_bytes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_lock; struct list_head mmlist; int mm_lock_seq; long unsigned int hiwater_rss; long unsigned int hiwater_vm; long unsigned int total_vm; long unsigned int locked_vm; atomic64_t pinned_vm; long unsigned int data_vm; long unsigned int exec_vm; long unsigned int stack_vm; long unsigned int def_flags; seqcount_t write_protect_seq; spinlock_t arg_lock; long unsigned int start_code; long unsigned int end_code; long unsigned int start_data; long unsigned int end_data; long unsigned int start_brk; long unsigned int brk; long unsigned int start_stack; long unsigned int arg_start; long unsigned int arg_end; long unsigned int env_start; long unsigned int env_end; long unsigned int saved_auxv[50]; struct percpu_counter rss_stat[4]; struct linux_binfmt *binfmt; mm_context_t context; long unsigned int flags; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct user_namespace *user_ns; struct file *exe_file; long unsigned int numa_next_scan; long unsigned int numa_scan_offset; int numa_scan_seq; atomic_t tlb_flush_pending; atomic_t tlb_flush_batched; struct uprobes_state uprobes_state; atomic_long_t hugetlb_usage; struct work_struct async_put_work; long unsigned int ksm_merging_pages; long unsigned int ksm_rmap_items; atomic_long_t ksm_zero_pages; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; long unsigned int cpu_bitmap[0]; }; struct mm_struct__safe_rcu_or_null { struct file *exe_file; }; struct mm_walk_ops; struct mm_walk { const struct mm_walk_ops *ops; struct mm_struct *mm; pgd_t *pgd; struct vm_area_struct *vma; enum page_walk_action action; bool no_vma; void *private; }; struct mm_walk_ops { int (*pgd_entry)(pgd_t *, long unsigned int, long unsigned int, struct mm_walk *); int (*p4d_entry)(p4d_t *, long unsigned int, long unsigned int, struct mm_walk *); int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *); int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *); int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *); int (*pte_hole)(long unsigned int, long unsigned int, int, struct mm_walk *); int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *); int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *); int (*pre_vma)(long unsigned int, long unsigned int, struct mm_walk *); void (*post_vma)(struct mm_walk *); enum page_walk_lock walk_lock; }; struct mmap_unlock_irq_work { struct irq_work irq_work; struct mm_struct *mm; }; struct mminit_pfnnid_cache { long unsigned int last_start; long unsigned int last_end; int last_nid; }; struct mmp_struct { __le32 mmp_magic; __le32 mmp_seq; __le64 mmp_time; char mmp_nodename[64]; char mmp_bdevname[32]; __le16 mmp_check_interval; __le16 mmp_pad1; __le32 mmp_pad2[226]; __le32 mmp_checksum; }; struct mmpin { struct user_struct *user; unsigned int num_pg; }; struct user_msghdr { void *msg_name; int msg_namelen; struct iovec *msg_iov; __kernel_size_t msg_iovlen; void *msg_control; __kernel_size_t msg_controllen; unsigned int msg_flags; }; struct mmsghdr { struct user_msghdr msg_hdr; unsigned int msg_len; }; struct encoded_page; struct mmu_gather_batch { struct mmu_gather_batch *next; unsigned int nr; unsigned int max; struct encoded_page *encoded_pages[0]; }; struct mmu_gather { struct mm_struct *mm; long unsigned int start; long unsigned int end; unsigned int fullmm: 1; unsigned int need_flush_all: 1; unsigned int freed_tables: 1; unsigned int delayed_rmap: 1; unsigned int cleared_ptes: 1; unsigned int cleared_pmds: 1; unsigned int cleared_puds: 1; unsigned int cleared_p4ds: 1; unsigned int vma_exec: 1; unsigned int vma_huge: 1; unsigned int vma_pfn: 1; unsigned int batch_count; struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[8]; }; struct mmu_notifier_range { long unsigned int start; long unsigned int end; }; struct mnt_id_req { __u32 size; __u32 spare; __u64 mnt_id; __u64 param; __u64 mnt_ns_id; }; struct uid_gid_extent { u32 first; u32 lower_first; u32 count; }; struct uid_gid_map { union { struct { struct uid_gid_extent extent[5]; u32 nr_extents; }; struct { struct uid_gid_extent *forward; struct uid_gid_extent *reverse; }; }; }; struct mnt_idmap { struct uid_gid_map uid_map; struct uid_gid_map gid_map; refcount_t count; }; struct mount; struct mnt_namespace { struct ns_common ns; struct mount *root; struct rb_root mounts; struct user_namespace *user_ns; struct ucounts *ucounts; u64 seq; wait_queue_head_t poll; u64 event; unsigned int nr_mounts; unsigned int pending_mounts; struct rb_node mnt_ns_tree_node; refcount_t passive; }; struct mnt_ns_info { __u32 size; __u32 nr_mounts; __u64 mnt_ns_id; }; struct mnt_pcp { int mnt_count; int mnt_writers; }; struct orc_entry; struct mod_arch_specific { unsigned int num_orcs; int *orc_unwind_ip; struct orc_entry *orc_unwind; }; struct mod_initfree { struct llist_node node; void *init_text; void *init_data; void *init_rodata; }; struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; char *typetab; }; struct mod_tree_node { struct module *mod; struct latch_tree_node node; }; struct mod_tree_root { struct latch_tree_root root; long unsigned int addr_min; long unsigned int addr_max; }; struct module_param_attrs; struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; }; struct module_memory { void *base; unsigned int size; struct mod_tree_node mtn; }; struct module_attribute; struct module_sect_attrs; struct module_notes_attrs; struct trace_event_call; struct trace_eval_map; struct static_call_site; struct module { enum module_state state; struct list_head list; char name[56]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const s32 *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; bool using_gplonly_symbols; bool sig_ok; bool async_probe_requested; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(void); long: 64; long: 64; struct module_memory mem[7]; struct mod_arch_specific arch; long unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; void *noinstr_text_start; unsigned int noinstr_text_size; unsigned int num_tracepoints; tracepoint_ptr_t *tracepoints_ptrs; unsigned int num_srcu_structs; struct srcu_struct **srcu_struct_ptrs; unsigned int num_bpf_raw_events; struct bpf_raw_event_map *bpf_raw_events; unsigned int btf_data_size; unsigned int btf_base_data_size; void *btf_data; void *btf_base_data; struct jump_entry *jump_entries; unsigned int num_jump_entries; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_eval_map **trace_evals; unsigned int num_trace_evals; unsigned int num_ftrace_callsites; long unsigned int *ftrace_callsites; void *kprobes_text_start; unsigned int kprobes_text_size; long unsigned int *kprobe_blacklist; unsigned int num_kprobe_blacklist; int num_static_call_sites; struct static_call_site *static_call_sites; struct list_head source_list; struct list_head target_list; void (*exit)(void); atomic_t refcnt; struct error_injection_entry *ei_funcs; unsigned int num_ei_funcs; }; struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); }; struct module_notes_attrs { struct kobject *dir; unsigned int notes; struct bin_attribute attrs[0]; }; struct param_attribute { struct module_attribute mattr; const struct kernel_param *param; }; struct module_param_attrs { unsigned int num; struct attribute_group grp; struct param_attribute attrs[0]; }; struct module_reply_data { struct ethnl_reply_data base; struct ethtool_module_power_mode_params power; }; struct module_sect_attr { struct bin_attribute battr; long unsigned int address; }; struct module_sect_attrs { struct attribute_group grp; unsigned int nsections; struct module_sect_attr attrs[0]; }; struct module_signature { u8 algo; u8 hash; u8 id_type; u8 signer_len; u8 key_id_len; u8 __pad[3]; __be32 sig_len; }; struct module_string { struct list_head next; struct module *module; char *str; }; struct module_use { struct list_head source_list; struct list_head target_list; struct module *source; struct module *target; }; struct module_version_attribute { struct module_attribute mattr; const char *module_name; const char *version; }; struct modules_array { struct module **mods; int mods_cnt; int mods_cap; }; struct modversion_info { long unsigned int crc; char name[56]; }; struct vfsmount { struct dentry *mnt_root; struct super_block *mnt_sb; int mnt_flags; struct mnt_idmap *mnt_idmap; }; struct mountpoint; struct mount { struct hlist_node mnt_hash; struct mount *mnt_parent; struct dentry *mnt_mountpoint; struct vfsmount mnt; union { struct callback_head mnt_rcu; struct llist_node mnt_llist; }; struct mnt_pcp *mnt_pcp; struct list_head mnt_mounts; struct list_head mnt_child; struct list_head mnt_instance; const char *mnt_devname; union { struct rb_node mnt_node; struct list_head mnt_list; }; struct list_head mnt_expire; struct list_head mnt_share; struct list_head mnt_slave_list; struct list_head mnt_slave; struct mount *mnt_master; struct mnt_namespace *mnt_ns; struct mountpoint *mnt_mp; union { struct hlist_node mnt_mp_list; struct hlist_node mnt_umount; }; struct list_head mnt_umounting; struct fsnotify_mark_connector *mnt_fsnotify_marks; __u32 mnt_fsnotify_mask; int mnt_id; u64 mnt_id_unique; int mnt_group_id; int mnt_expiry_mark; struct hlist_head mnt_pins; struct hlist_head mnt_stuck_children; }; struct mount_attr { __u64 attr_set; __u64 attr_clr; __u64 propagation; __u64 userns_fd; }; struct mount_kattr { unsigned int attr_set; unsigned int attr_clr; unsigned int propagation; unsigned int lookup_flags; bool recurse; struct user_namespace *mnt_userns; struct mnt_idmap *mnt_idmap; }; struct mount_opts { int token; int mount_opt; int flags; }; struct mountpoint { struct hlist_node m_hash; struct dentry *m_dentry; struct hlist_head m_list; int m_count; }; struct movable_operations { bool (*isolate_page)(struct page *, isolate_mode_t); int (*migrate_page)(struct page *, struct page *, enum migrate_mode); void (*putback_page)(struct page *); }; struct move_extent { __u32 reserved; __u32 donor_fd; __u64 orig_start; __u64 donor_start; __u64 len; __u64 moved_len; }; struct mp_chip_data { struct list_head irq_2_pin; struct IO_APIC_route_entry entry; bool is_level; bool active_low; bool isa_irq; u32 count; }; struct mpage_da_data { struct inode *inode; struct writeback_control *wbc; unsigned int can_map: 1; long unsigned int first_page; long unsigned int next_page; long unsigned int last_page; struct ext4_map_blocks map; struct ext4_io_submit io_submit; unsigned int do_map: 1; unsigned int scanned_until_end: 1; unsigned int journalled_more_data: 1; }; typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int); struct mpage_data { struct bio *bio; sector_t last_block_in_bio; get_block_t *get_block; }; struct mpage_readpage_args { struct bio *bio; struct folio *folio; unsigned int nr_pages; bool is_readahead; sector_t last_block_in_bio; struct buffer_head map_bh; long unsigned int first_logical_block; get_block_t *get_block; }; struct mpc_bus { unsigned char type; unsigned char busid; unsigned char bustype[6]; }; struct mpc_cpu { unsigned char type; unsigned char apicid; unsigned char apicver; unsigned char cpuflag; unsigned int cpufeature; unsigned int featureflag; unsigned int reserved[2]; }; struct mpc_intsrc { unsigned char type; unsigned char irqtype; short unsigned int irqflag; unsigned char srcbus; unsigned char srcbusirq; unsigned char dstapic; unsigned char dstirq; }; struct mpc_lintsrc { unsigned char type; unsigned char irqtype; short unsigned int irqflag; unsigned char srcbusid; unsigned char srcbusirq; unsigned char destapic; unsigned char destapiclint; }; struct mpc_table { char signature[4]; short unsigned int length; char spec; char checksum; char oem[8]; char productid[12]; unsigned int oemptr; short unsigned int oemsize; short unsigned int oemcount; unsigned int lapic; unsigned int reserved; }; struct mpf_intel { char signature[4]; unsigned int physptr; unsigned char length; unsigned char specification; unsigned char checksum; unsigned char feature1; unsigned char feature2; unsigned char feature3; unsigned char feature4; unsigned char feature5; }; struct mpls_pcpu_stats; struct mpls_dev { int input_enabled; struct net_device *dev; struct mpls_pcpu_stats *stats; struct ctl_table_header *sysctl; struct callback_head rcu; }; struct mpls_entry_decoded { u32 label; u8 ttl; u8 tc; u8 bos; }; struct mpls_iptunnel_encap { u8 labels; u8 ttl_propagate; u8 default_ttl; u8 reserved1; u32 label[0]; }; struct mpls_label { __be32 entry; }; struct mpls_link_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 rx_noroute; }; struct mpls_nh { struct net_device *nh_dev; unsigned int nh_flags; u8 nh_labels; u8 nh_via_alen; u8 nh_via_table; u8 nh_reserved1; u32 nh_label[0]; }; struct mpls_pcpu_stats { struct mpls_link_stats stats; struct u64_stats_sync syncp; }; struct mpls_route { struct callback_head rt_rcu; u8 rt_protocol; u8 rt_payload_type; u8 rt_max_alen; u8 rt_ttl_propagate; u8 rt_nhn; u8 rt_nhn_alive; u8 rt_nh_size; u8 rt_via_offset; u8 rt_reserved1; struct mpls_nh rt_nh[0]; }; struct mpls_route_config { u32 rc_protocol; u32 rc_ifindex; u8 rc_via_table; u8 rc_via_alen; u8 rc_via[32]; u32 rc_label; u8 rc_ttl_propagate; u8 rc_output_labels; u32 rc_output_label[30]; u32 rc_nlflags; enum mpls_payload_type rc_payload_type; struct nl_info rc_nlinfo; struct rtnexthop *rc_mp; int rc_mp_len; }; struct mpls_shim_hdr { __be32 label_stack_entry; }; struct mptcp_addr_info { u8 id; sa_family_t family; __be16 port; union { struct in_addr addr; struct in6_addr addr6; }; }; struct mptcp_data_frag { struct list_head list; u64 data_seq; u16 data_len; u16 offset; u16 overhead; u16 already_sent; struct page *page; }; struct mptcp_delegated_action { struct napi_struct napi; struct list_head head; }; struct mptcp_diag_ctx { long int s_slot; long int s_num; unsigned int l_slot; unsigned int l_num; }; struct mptcp_ext { union { u64 data_ack; u32 data_ack32; }; u64 data_seq; u32 subflow_seq; u16 data_len; __sum16 csum; u8 use_map: 1; u8 dsn64: 1; u8 data_fin: 1; u8 use_ack: 1; u8 ack64: 1; u8 mpc_map: 1; u8 frozen: 1; u8 reset_transient: 1; u8 reset_reason: 4; u8 csum_reqd: 1; u8 infinite_map: 1; }; struct mptcp_info { __u8 mptcpi_subflows; __u8 mptcpi_add_addr_signal; __u8 mptcpi_add_addr_accepted; __u8 mptcpi_subflows_max; __u8 mptcpi_add_addr_signal_max; __u8 mptcpi_add_addr_accepted_max; __u32 mptcpi_flags; __u32 mptcpi_token; __u64 mptcpi_write_seq; __u64 mptcpi_snd_una; __u64 mptcpi_rcv_nxt; __u8 mptcpi_local_addr_used; __u8 mptcpi_local_addr_max; __u8 mptcpi_csum_enabled; __u32 mptcpi_retransmits; __u64 mptcpi_bytes_retrans; __u64 mptcpi_bytes_sent; __u64 mptcpi_bytes_received; __u64 mptcpi_bytes_acked; __u8 mptcpi_subflows_total; __u8 reserved[3]; __u32 mptcpi_last_data_sent; __u32 mptcpi_last_data_recv; __u32 mptcpi_last_ack_recv; }; struct mptcp_full_info { __u32 size_tcpinfo_kernel; __u32 size_tcpinfo_user; __u32 size_sfinfo_kernel; __u32 size_sfinfo_user; __u32 num_subflows; __u32 size_arrays_user; __u64 subflow_info; __u64 tcp_info; struct mptcp_info mptcp_info; }; struct mptcp_mib { long unsigned int mibs[68]; }; struct mptcp_rm_list { u8 ids[8]; u8 nr; }; struct mptcp_options_received { u64 sndr_key; u64 rcvr_key; u64 data_ack; u64 data_seq; u32 subflow_seq; u16 data_len; __sum16 csum; u16 suboptions; u32 token; u32 nonce; u16 use_map: 1; u16 dsn64: 1; u16 data_fin: 1; u16 use_ack: 1; u16 ack64: 1; u16 mpc_map: 1; u16 reset_reason: 4; u16 reset_transient: 1; u16 echo: 1; u16 backup: 1; u16 deny_join_id0: 1; u16 __unused: 2; u8 join_id; u64 thmac; u8 hmac[20]; struct mptcp_addr_info addr; struct mptcp_rm_list rm_list; u64 ahmac; u64 fail_seq; }; struct mptcp_out_options { u16 suboptions; struct mptcp_rm_list rm_list; u8 join_id; u8 backup; u8 reset_reason: 4; u8 reset_transient: 1; u8 csum_reqd: 1; u8 allow_join_id0: 1; union { struct { u64 sndr_key; u64 rcvr_key; u64 data_seq; u32 subflow_seq; u16 data_len; __sum16 csum; }; struct { struct mptcp_addr_info addr; u64 ahmac; }; struct { struct mptcp_ext ext_copy; u64 fail_seq; }; struct { u32 nonce; u32 token; u64 thmac; u8 hmac[20]; }; }; }; struct mptcp_pernet { struct ctl_table_header *ctl_table_hdr; unsigned int add_addr_timeout; unsigned int blackhole_timeout; unsigned int close_timeout; unsigned int stale_loss_cnt; atomic_t active_disable_times; long unsigned int active_disable_stamp; u8 mptcp_enabled; u8 checksum_enabled; u8 allow_join_initial_addr_port; u8 pm_type; char scheduler[16]; }; struct mptcp_sock; struct mptcp_pm_add_entry { struct list_head list; struct mptcp_addr_info addr; u8 retrans_times; struct timer_list add_timer; struct mptcp_sock *sock; }; struct mptcp_pm_addr_entry { struct list_head list; struct mptcp_addr_info addr; u8 flags; int ifindex; struct socket *lsk; }; struct mptcp_pm_data { struct mptcp_addr_info local; struct mptcp_addr_info remote; struct list_head anno_list; struct list_head userspace_pm_local_addr_list; spinlock_t lock; u8 addr_signal; bool server_side; bool work_pending; bool accept_addr; bool accept_subflow; bool remote_deny_join_id0; u8 add_addr_signaled; u8 add_addr_accepted; u8 local_addr_used; u8 pm_type; u8 subflows; u8 status; long unsigned int id_avail_bitmap[4]; struct mptcp_rm_list rm_list_tx; struct mptcp_rm_list rm_list_rx; }; struct mptcp_pm_local { struct mptcp_addr_info addr; u8 flags; int ifindex; }; struct mptcp_subflow_context; struct mptcp_sched_data { bool reinject; u8 subflows; struct mptcp_subflow_context *contexts[8]; }; struct mptcp_sched_ops { int (*get_subflow)(struct mptcp_sock *, struct mptcp_sched_data *); char name[16]; struct module *owner; struct list_head list; void (*init)(struct mptcp_sock *); void (*release)(struct mptcp_sock *); }; struct mptcp_sendmsg_info { int mss_now; int size_goal; u16 limit; u16 sent; unsigned int flags; bool data_lock_held; }; struct mptcp_skb_cb { u64 map_seq; u64 end_seq; u32 offset; u8 has_rxtstamp: 1; }; struct mptcp_sock { struct inet_connection_sock sk; u64 local_key; u64 remote_key; u64 write_seq; u64 bytes_sent; u64 snd_nxt; u64 bytes_received; u64 ack_seq; atomic64_t rcv_wnd_sent; u64 rcv_data_fin_seq; u64 bytes_retrans; u64 bytes_consumed; int rmem_fwd_alloc; int snd_burst; int old_wspace; u64 recovery_snd_nxt; u64 bytes_acked; u64 snd_una; u64 wnd_end; u32 last_data_sent; u32 last_data_recv; u32 last_ack_recv; long unsigned int timer_ival; u32 token; int rmem_released; long unsigned int flags; long unsigned int cb_flags; bool recovery; bool can_ack; bool fully_established; bool rcv_data_fin; bool snd_data_fin_enable; bool rcv_fastclose; bool use_64bit_ack; bool csum_enabled; bool allow_infinite_fallback; u8 pending_state; u8 mpc_endpoint_id; u8 recvmsg_inq: 1; u8 cork: 1; u8 nodelay: 1; u8 fastopening: 1; u8 in_accept_queue: 1; u8 free_first: 1; u8 rcvspace_init: 1; u32 notsent_lowat; int keepalive_cnt; int keepalive_idle; int keepalive_intvl; struct work_struct work; struct sk_buff *ooo_last_skb; struct rb_root out_of_order_queue; struct sk_buff_head receive_queue; struct list_head conn_list; struct list_head rtx_queue; struct mptcp_data_frag *first_pending; struct list_head join_list; struct sock *first; struct mptcp_pm_data pm; struct mptcp_sched_ops *sched; struct { u32 space; u32 copied; u64 time; u64 rtt_us; } rcvq_space; u8 scaling_ratio; u32 subflow_id; u32 setsockopt_seq; char ca_name[16]; }; struct sockaddr_in { __kernel_sa_family_t sin_family; __be16 sin_port; struct in_addr sin_addr; unsigned char __pad[8]; }; struct mptcp_subflow_addrs { union { __kernel_sa_family_t sa_family; struct sockaddr sa_local; struct sockaddr_in sin_local; struct sockaddr_in6 sin6_local; struct __kernel_sockaddr_storage ss_local; }; union { struct sockaddr sa_remote; struct sockaddr_in sin_remote; struct sockaddr_in6 sin6_remote; struct __kernel_sockaddr_storage ss_remote; }; }; struct mptcp_subflow_context { struct list_head node; union { struct { long unsigned int avg_pacing_rate; u64 local_key; u64 remote_key; u64 idsn; u64 map_seq; u32 snd_isn; u32 token; u32 rel_write_seq; u32 map_subflow_seq; u32 ssn_offset; u32 map_data_len; __wsum map_data_csum; u32 map_csum_len; u32 request_mptcp: 1; u32 request_join: 1; u32 request_bkup: 1; u32 mp_capable: 1; u32 mp_join: 1; u32 fully_established: 1; u32 pm_notified: 1; u32 conn_finished: 1; u32 map_valid: 1; u32 map_csum_reqd: 1; u32 map_data_fin: 1; u32 mpc_map: 1; u32 backup: 1; u32 send_mp_prio: 1; u32 send_mp_fail: 1; u32 send_fastclose: 1; u32 send_infinite_map: 1; u32 remote_key_valid: 1; u32 disposable: 1; u32 stale: 1; u32 valid_csum_seen: 1; u32 is_mptfo: 1; u32 close_event_done: 1; u32 mpc_drop: 1; u32 __unused: 8; bool data_avail; bool scheduled; u32 remote_nonce; u64 thmac; u32 local_nonce; u32 remote_token; union { u8 hmac[20]; u64 iasn; }; s16 local_id; u8 remote_id; u8 reset_seen: 1; u8 reset_transient: 1; u8 reset_reason: 4; u8 stale_count; u32 subflow_id; long int delegated_status; long unsigned int fail_tout; }; struct { long unsigned int avg_pacing_rate; u64 local_key; u64 remote_key; u64 idsn; u64 map_seq; u32 snd_isn; u32 token; u32 rel_write_seq; u32 map_subflow_seq; u32 ssn_offset; u32 map_data_len; __wsum map_data_csum; u32 map_csum_len; u32 request_mptcp: 1; u32 request_join: 1; u32 request_bkup: 1; u32 mp_capable: 1; u32 mp_join: 1; u32 fully_established: 1; u32 pm_notified: 1; u32 conn_finished: 1; u32 map_valid: 1; u32 map_csum_reqd: 1; u32 map_data_fin: 1; u32 mpc_map: 1; u32 backup: 1; u32 send_mp_prio: 1; u32 send_mp_fail: 1; u32 send_fastclose: 1; u32 send_infinite_map: 1; u32 remote_key_valid: 1; u32 disposable: 1; u32 stale: 1; u32 valid_csum_seen: 1; u32 is_mptfo: 1; u32 close_event_done: 1; u32 mpc_drop: 1; u32 __unused: 8; bool data_avail; bool scheduled; u32 remote_nonce; u64 thmac; u32 local_nonce; u32 remote_token; union { u8 hmac[20]; u64 iasn; }; s16 local_id; u8 remote_id; u8 reset_seen: 1; u8 reset_transient: 1; u8 reset_reason: 4; u8 stale_count; u32 subflow_id; long int delegated_status; long unsigned int fail_tout; } reset; }; struct list_head delegated_node; u32 setsockopt_seq; u32 stale_rcv_tstamp; int cached_sndbuf; struct sock *tcp_sock; struct sock *conn; const struct inet_connection_sock_af_ops *icsk_af_ops; void (*tcp_state_change)(struct sock *); void (*tcp_error_report)(struct sock *); struct callback_head rcu; }; struct mptcp_subflow_data { __u32 size_subflow_data; __u32 num_subflows; __u32 size_kernel; __u32 size_user; }; struct mptcp_subflow_info { __u32 id; struct mptcp_subflow_addrs addrs; }; struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; u64 snt_synack; bool tfo_listener; bool is_mptcp; bool req_usec_ts; bool drop_req; u32 txhash; u32 rcv_isn; u32 snt_isn; u32 ts_off; u32 last_oow_ack_time; u32 rcv_nxt; u8 syn_tos; }; struct mptcp_subflow_request_sock { struct tcp_request_sock sk; u16 mp_capable: 1; u16 mp_join: 1; u16 backup: 1; u16 request_bkup: 1; u16 csum_reqd: 1; u16 allow_join_id0: 1; u8 local_id; u8 remote_id; u64 local_key; u64 idsn; u32 token; u32 ssn_offset; u64 thmac; u32 local_nonce; u32 remote_nonce; struct mptcp_sock *msk; struct hlist_nulls_node token_node; }; struct mq_inflight { struct block_device *part; unsigned int inflight[2]; }; struct mq_sched { struct Qdisc **qdiscs; }; struct mqueue_fs_context { struct ipc_namespace *ipc_ns; bool newns; }; struct sigevent { sigval_t sigev_value; int sigev_signo; int sigev_notify; union { int _pad[12]; int _tid; struct { void (*_function)(sigval_t); void *_attribute; } _sigev_thread; } _sigev_un; }; struct posix_msg_tree_node; struct mqueue_inode_info { spinlock_t lock; struct inode vfs_inode; wait_queue_head_t wait_q; struct rb_root msg_tree; struct rb_node *msg_tree_rightmost; struct posix_msg_tree_node *node_cache; struct mq_attr attr; struct sigevent notify; struct pid *notify_owner; u32 notify_self_exec_id; struct user_namespace *notify_user_ns; struct ucounts *ucounts; struct sock *notify_sock; struct sk_buff *notify_cookie; struct ext_wait_queue e_wait_q[2]; long unsigned int qsize; }; struct mr_table; struct mr_mfc_iter { struct seq_net_private p; struct mr_table *mrt; struct list_head *cache; spinlock_t *lock; }; struct mr_table_ops { const struct rhashtable_params *rht_params; void *cmparg_any; }; struct vif_device { struct net_device *dev; netdevice_tracker dev_tracker; long unsigned int bytes_in; long unsigned int bytes_out; long unsigned int pkt_in; long unsigned int pkt_out; long unsigned int rate_limit; unsigned char threshold; short unsigned int flags; int link; struct netdev_phys_item_id dev_parent_id; __be32 local; __be32 remote; }; struct rhltable { struct rhashtable ht; }; struct mr_table { struct list_head list; possible_net_t net; struct mr_table_ops ops; u32 id; struct sock *mroute_sk; struct timer_list ipmr_expire_timer; struct list_head mfc_unres_queue; struct vif_device vif_table[32]; struct rhltable mfc_hash; struct list_head mfc_cache_list; int maxvif; atomic_t cache_resolve_queue_len; bool mroute_do_assert; bool mroute_do_pim; bool mroute_do_wrvifwhole; int mroute_reg_vif_num; }; struct mr_vif_iter { struct seq_net_private p; struct mr_table *mrt; int ct; }; struct ms_data { long unsigned int quirks; struct hid_device *hdev; struct work_struct ff_worker; __u8 strong; __u8 weak; void *output_report_dmabuf; }; struct msdos_partition { u8 boot_ind; u8 head; u8 sector; u8 cyl; u8 sys_ind; u8 end_head; u8 end_sector; u8 end_cyl; __le32 start_sect; __le32 nr_sects; }; struct msg_msgseg; struct msg_msg { struct list_head m_list; long int m_type; size_t m_ts; struct msg_msgseg *next; void *security; }; struct msg_msgseg { struct msg_msgseg *next; }; struct msg_queue { struct kern_ipc_perm q_perm; time64_t q_stime; time64_t q_rtime; time64_t q_ctime; long unsigned int q_cbytes; long unsigned int q_qnum; long unsigned int q_qbytes; struct pid *q_lspid; struct pid *q_lrpid; struct list_head q_messages; struct list_head q_receivers; struct list_head q_senders; long: 64; long: 64; }; struct msg_receiver { struct list_head r_list; struct task_struct *r_tsk; int r_mode; long int r_msgtype; long int r_maxsize; struct msg_msg *r_msg; }; struct msg_security_struct { u32 sid; }; struct msg_sender { struct list_head list; struct task_struct *tsk; size_t msgsz; }; struct msgbuf { __kernel_long_t mtype; char mtext[1]; }; struct msginfo { int msgpool; int msgmap; int msgmax; int msgmnb; int msgmni; int msgssz; int msgtql; short unsigned int msgseg; }; struct msi_ctrl { unsigned int domid; unsigned int first; unsigned int last; unsigned int nirqs; }; struct x86_msi_addr_lo { union { struct { u32 reserved_0: 2; u32 dest_mode_logical: 1; u32 redirect_hint: 1; u32 reserved_1: 1; u32 virt_destid_8_14: 7; u32 destid_0_7: 8; u32 base_address: 12; }; struct { u32 dmar_reserved_0: 2; u32 dmar_index_15: 1; u32 dmar_subhandle_valid: 1; u32 dmar_format: 1; u32 dmar_index_0_14: 15; u32 dmar_base_address: 12; }; }; }; typedef struct x86_msi_addr_lo arch_msi_msg_addr_lo_t; struct x86_msi_addr_hi { u32 reserved: 8; u32 destid_8_31: 24; }; typedef struct x86_msi_addr_hi arch_msi_msg_addr_hi_t; struct x86_msi_data { union { struct { u32 vector: 8; u32 delivery_mode: 3; u32 dest_mode_logical: 1; u32 reserved: 2; u32 active_low: 1; u32 is_level: 1; }; u32 dmar_subhandle; }; }; typedef struct x86_msi_data arch_msi_msg_data_t; struct msi_msg { union { u32 address_lo; arch_msi_msg_addr_lo_t arch_addr_lo; }; union { u32 address_hi; arch_msi_msg_addr_hi_t arch_addr_hi; }; union { u32 data; arch_msi_msg_data_t arch_data; }; }; struct pci_msi_desc { union { u32 msi_mask; u32 msix_ctrl; }; struct { u8 is_msix: 1; u8 multiple: 3; u8 multi_cap: 3; u8 can_mask: 1; u8 is_64: 1; u8 is_virtual: 1; unsigned int default_irq; } msi_attrib; union { u8 mask_pos; void *mask_base; }; }; union msi_domain_cookie { u64 value; void *ptr; void *iobase; }; union msi_instance_cookie { u64 value; void *ptr; }; struct msi_desc_data { union msi_domain_cookie dcookie; union msi_instance_cookie icookie; }; struct msi_desc { unsigned int irq; unsigned int nvec_used; struct device *dev; struct msi_msg msg; struct irq_affinity_desc *affinity; const void *iommu_cookie; struct device_attribute *sysfs_attrs; void (*write_msi_msg)(struct msi_desc *, void *); void *write_msi_msg_data; u16 msi_index; union { struct pci_msi_desc pci; struct msi_desc_data data; }; }; struct msi_dev_domain { struct xarray store; struct irq_domain *domain; }; struct msi_device_data { long unsigned int properties; struct mutex mutex; struct msi_dev_domain __domains[1]; long unsigned int __iter_idx; }; struct msi_domain_ops; struct msi_domain_info { u32 flags; enum irq_domain_bus_token bus_token; unsigned int hwsize; struct msi_domain_ops *ops; struct irq_chip *chip; void *chip_data; irq_flow_handler_t handler; void *handler_data; const char *handler_name; void *data; }; struct msi_domain_ops { irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *); void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); void (*prepare_desc)(struct irq_domain *, msi_alloc_info_t *, struct msi_desc *); void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); int (*domain_alloc_irqs)(struct irq_domain *, struct device *, int); void (*domain_free_irqs)(struct irq_domain *, struct device *); void (*msi_post_free)(struct irq_domain *, struct device *); int (*msi_translate)(struct irq_domain *, struct irq_fwspec *, irq_hw_number_t *, unsigned int *); }; struct msi_domain_template { char name[48]; struct irq_chip chip; struct msi_domain_ops ops; struct msi_domain_info info; }; struct msi_map { int index; int virq; }; struct msi_parent_ops { u32 supported_flags; u32 required_flags; u32 bus_select_token; u32 bus_select_mask; const char *prefix; bool (*init_dev_msi_info)(struct device *, struct irq_domain *, struct irq_domain *, struct msi_domain_info *); }; struct msix_entry { u32 vector; u16 entry; }; struct msqid64_ds { struct ipc64_perm msg_perm; long int msg_stime; long int msg_rtime; long int msg_ctime; long unsigned int msg_cbytes; long unsigned int msg_qnum; long unsigned int msg_qbytes; __kernel_pid_t msg_lspid; __kernel_pid_t msg_lrpid; long unsigned int __unused4; long unsigned int __unused5; }; struct msg; struct msqid_ds { struct ipc_perm msg_perm; struct msg *msg_first; struct msg *msg_last; __kernel_old_time_t msg_stime; __kernel_old_time_t msg_rtime; __kernel_old_time_t msg_ctime; long unsigned int msg_lcbytes; long unsigned int msg_lqbytes; short unsigned int msg_cbytes; short unsigned int msg_qnum; short unsigned int msg_qbytes; __kernel_ipc_pid_t msg_lspid; __kernel_ipc_pid_t msg_lrpid; }; struct msr { union { struct { u32 l; u32 h; }; u64 q; }; }; struct msr_data { bool host_initiated; u32 index; u64 data; }; struct msr_enumeration { u32 msr_no; u32 feature; }; struct msr_info { u32 msr_no; struct msr reg; struct msr *msrs; int err; }; struct msr_info_completion { struct msr_info msr; struct completion done; }; struct msr_regs_info { u32 *regs; int err; }; struct mthp_stat { long unsigned int stats[130]; }; struct mtrr_cleanup_result { long unsigned int gran_sizek; long unsigned int chunk_sizek; long unsigned int lose_cover_sizek; unsigned int num_reg; int bad; }; struct mtrr_gentry { __u64 base; __u32 size; __u32 regnum; __u32 type; __u32 _pad; }; struct mtrr_ops { u32 var_regs; void (*set)(unsigned int, long unsigned int, long unsigned int, mtrr_type); void (*get)(unsigned int, long unsigned int *, long unsigned int *, mtrr_type *); int (*get_free_region)(long unsigned int, long unsigned int, int); int (*validate_add_page)(long unsigned int, long unsigned int, unsigned int); int (*have_wrcomb)(void); }; struct mtrr_sentry { __u64 base; __u32 size; __u32 type; }; struct mtrr_var_range { __u32 base_lo; __u32 base_hi; __u32 mask_lo; __u32 mask_hi; }; struct mtrr_state_type { struct mtrr_var_range var_ranges[256]; mtrr_type fixed_ranges[88]; unsigned char enabled; bool have_fixed; mtrr_type def_type; }; struct multi_stop_data { cpu_stop_fn_t fn; void *data; unsigned int num_threads; const struct cpumask *active_cpus; enum multi_stop_state state; atomic_t thread_ack; }; struct multi_symbols_sort { const char **funcs; u64 *cookies; }; struct multiprocess_signals { sigset_t signal; struct hlist_node node; }; typedef struct mutex *class_mutex_t; typedef class_mutex_t class_mutex_intr_t; struct mutex_waiter { struct list_head list; struct task_struct *task; struct ww_acquire_ctx *ww_ctx; void *magic; }; struct mwait_cpu_dead { unsigned int control; unsigned int status; }; struct n_tty_data { size_t read_head; size_t commit_head; size_t canon_head; size_t echo_head; size_t echo_commit; size_t echo_mark; long unsigned int char_map[4]; long unsigned int overrun_time; unsigned int num_overrun; bool no_room; unsigned char lnext: 1; unsigned char erasing: 1; unsigned char raw: 1; unsigned char real_raw: 1; unsigned char icanon: 1; unsigned char push: 1; u8 read_buf[4096]; long unsigned int read_flags[64]; u8 echo_buf[4096]; size_t read_tail; size_t line_start; size_t lookahead_count; unsigned int column; unsigned int canon_column; size_t echo_tail; struct mutex atomic_read_lock; struct mutex output_lock; }; struct name_snapshot { struct qstr name; unsigned char inline_name[40]; }; struct saved { struct path link; struct delayed_call done; const char *name; unsigned int seq; }; struct nameidata { struct path path; struct qstr last; struct path root; struct inode *inode; unsigned int flags; unsigned int state; unsigned int seq; unsigned int next_seq; unsigned int m_seq; unsigned int r_seq; int last_type; unsigned int depth; int total_link_count; struct saved *stack; struct saved internal[2]; struct filename *name; struct nameidata *saved; unsigned int root_seq; int dfd; vfsuid_t dir_vfsuid; umode_t dir_mode; }; struct page_frag_cache { void *va; __u16 offset; __u16 size; unsigned int pagecnt_bias; bool pfmemalloc; }; struct page_frag_1k { void *va; u16 offset; bool pfmemalloc; }; struct napi_alloc_cache { local_lock_t bh_lock; struct page_frag_cache page; struct page_frag_1k page_small; unsigned int skb_count; void *skb_cache[64]; }; struct napi_gro_cb { union { struct { void *frag0; unsigned int frag0_len; }; struct { struct sk_buff *last; long unsigned int age; }; }; int data_offset; u16 flush; u16 count; u16 proto; u16 pad; union { struct { u16 gro_remcsum_start; u8 same_flow: 1; u8 encap_mark: 1; u8 csum_valid: 1; u8 csum_cnt: 3; u8 free: 2; u8 is_ipv6: 1; u8 is_fou: 1; u8 ip_fixedid: 1; u8 recursion_counter: 4; u8 is_flist: 1; }; struct { u16 gro_remcsum_start; u8 same_flow: 1; u8 encap_mark: 1; u8 csum_valid: 1; u8 csum_cnt: 3; u8 free: 2; u8 is_ipv6: 1; u8 is_fou: 1; u8 ip_fixedid: 1; u8 recursion_counter: 4; u8 is_flist: 1; } zeroed; }; __wsum csum; union { struct { u16 network_offset; u16 inner_network_offset; }; u16 network_offsets[2]; }; }; struct nat_keepalive { struct net *net; u16 family; xfrm_address_t saddr; xfrm_address_t daddr; __be16 encap_sport; __be16 encap_dport; __u32 smark; }; struct nat_keepalive_work_ctx { time64_t next_run; time64_t now; }; struct nf_nat_hooks_net { struct nf_hook_ops *nat_hook_ops; unsigned int users; }; struct nat_net { struct nf_nat_hooks_net nat_proto_net[11]; }; struct nbcon_state { union { unsigned int atom; struct { unsigned int prio: 2; unsigned int req_prio: 2; unsigned int unsafe: 1; unsigned int unsafe_takeover: 1; unsigned int cpu: 24; }; }; }; struct nbcon_write_context { struct nbcon_context ctxt; char *outbuf; unsigned int len; bool unsafe_takeover; }; struct nd_msg { struct icmp6hdr icmph; struct in6_addr target; __u8 opt[0]; }; struct nd_opt_hdr { __u8 nd_opt_type; __u8 nd_opt_len; }; struct nda_cacheinfo { __u32 ndm_confirmed; __u32 ndm_used; __u32 ndm_updated; __u32 ndm_refcnt; }; struct ndisc_options; struct prefix_info; struct ndisc_ops { int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *); int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); }; struct ndisc_options { struct nd_opt_hdr *nd_opt_array[15]; struct nd_opt_hdr *nd_opts_ri; struct nd_opt_hdr *nd_opts_ri_end; struct nd_opt_hdr *nd_useropts; struct nd_opt_hdr *nd_useropts_end; }; struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; }; struct ndt_config { __u16 ndtc_key_len; __u16 ndtc_entry_size; __u32 ndtc_entries; __u32 ndtc_last_flush; __u32 ndtc_last_rand; __u32 ndtc_hash_rnd; __u32 ndtc_hash_mask; __u32 ndtc_hash_chain_gc; __u32 ndtc_proxy_qlen; }; struct ndt_stats { __u64 ndts_allocs; __u64 ndts_destroys; __u64 ndts_hash_grows; __u64 ndts_res_failed; __u64 ndts_lookups; __u64 ndts_hits; __u64 ndts_rcv_probes_mcast; __u64 ndts_rcv_probes_ucast; __u64 ndts_periodic_gc_runs; __u64 ndts_forced_gc_runs; __u64 ndts_table_fulls; }; struct ndtmsg { __u8 ndtm_family; __u8 ndtm_pad1; __u16 ndtm_pad2; }; struct nduseroptmsg { unsigned char nduseropt_family; unsigned char nduseropt_pad1; short unsigned int nduseropt_opts_len; int nduseropt_ifindex; __u8 nduseropt_icmp_type; __u8 nduseropt_icmp_code; short unsigned int nduseropt_pad2; unsigned int nduseropt_pad3; }; struct neigh_dump_filter { int master_idx; int dev_idx; }; struct neigh_hash_table { struct neighbour **hash_buckets; unsigned int hash_shift; __u32 hash_rnd[4]; struct callback_head rcu; }; struct neigh_ops { int family; void (*solicit)(struct neighbour *, struct sk_buff *); void (*error_report)(struct neighbour *, struct sk_buff *); int (*output)(struct neighbour *, struct sk_buff *); int (*connected_output)(struct neighbour *, struct sk_buff *); }; struct neigh_parms { possible_net_t net; struct net_device *dev; netdevice_tracker dev_tracker; struct list_head list; int (*neigh_setup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; int dead; refcount_t refcnt; struct callback_head callback_head; int reachable_time; u32 qlen; int data[14]; long unsigned int data_state[1]; }; struct neigh_seq_state { struct seq_net_private p; struct neigh_table *tbl; struct neigh_hash_table *nht; void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); unsigned int bucket; unsigned int flags; }; struct neigh_statistics { long unsigned int allocs; long unsigned int destroys; long unsigned int hash_grows; long unsigned int res_failed; long unsigned int lookups; long unsigned int hits; long unsigned int rcv_probes_mcast; long unsigned int rcv_probes_ucast; long unsigned int periodic_gc_runs; long unsigned int forced_gc_runs; long unsigned int unres_discards; long unsigned int table_fulls; }; struct neigh_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table neigh_vars[21]; }; struct pneigh_entry; struct neigh_table { int family; unsigned int entry_size; unsigned int key_len; __be16 protocol; __u32 (*hash)(const void *, const struct net_device *, __u32 *); bool (*key_eq)(const struct neighbour *, const void *); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *); int (*is_multicast)(const void *); bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *); char *id; struct neigh_parms parms; struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; long unsigned int last_flush; struct delayed_work gc_work; struct delayed_work managed_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; atomic_t gc_entries; struct list_head gc_list; struct list_head managed_list; rwlock_t lock; long unsigned int last_rand; struct neigh_statistics *stats; struct neigh_hash_table *nht; struct pneigh_entry **phash_buckets; }; struct neighbour { struct neighbour *next; struct neigh_table *tbl; struct neigh_parms *parms; long unsigned int confirmed; long unsigned int updated; rwlock_t lock; refcount_t refcnt; unsigned int arp_queue_len_bytes; struct sk_buff_head arp_queue; struct timer_list timer; long unsigned int used; atomic_t probes; u8 nud_state; u8 type; u8 dead; u8 protocol; u32 flags; seqlock_t ha_lock; unsigned char ha[32]; struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct list_head gc_list; struct list_head managed_list; struct callback_head rcu; struct net_device *dev; netdevice_tracker dev_tracker; u8 primary_key[0]; }; struct neighbour_cb { long unsigned int sched_next; unsigned int flags; }; union nested_table { union nested_table *table; struct rhash_lock_head *bucket; }; struct ref_tracker_dir {}; struct raw_notifier_head { struct notifier_block *head; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; int sysctl_optmem_max; u8 sysctl_txrehash; struct prot_inuse *prot_inuse; struct cpumask *rps_default_mask; }; struct tcp_mib; struct udp_mib; struct netns_mib { struct ipstats_mib *ip_statistics; struct ipstats_mib *ipv6_statistics; struct tcp_mib *tcp_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udp_stats_in6; struct linux_tls_mib *tls_statistics; struct mptcp_mib *mptcp_statistics; struct udp_mib *udplite_statistics; struct udp_mib *udplite_stats_in6; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct proc_dir_entry *proc_net_devsnmp6; }; struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; }; struct unix_table { spinlock_t *locks; struct hlist_head *buckets; }; struct netns_unix { struct unix_table table; int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; }; struct netns_nexthop { struct rb_root rb_root; struct hlist_head *devhash; unsigned int seq; u32 last_id_allocated; struct blocking_notifier_head notifier_chain; }; struct ping_group_range { seqlock_t lock; kgid_t range[2]; }; struct sysctl_fib_multipath_hash_seed { u32 user_seed; u32 mp_seed; }; struct netns_ipv4 { __u8 __cacheline_group_begin__netns_ipv4_read_tx[0]; u8 sysctl_tcp_early_retrans; u8 sysctl_tcp_tso_win_divisor; u8 sysctl_tcp_tso_rtt_log; u8 sysctl_tcp_autocorking; int sysctl_tcp_min_snd_mss; unsigned int sysctl_tcp_notsent_lowat; int sysctl_tcp_limit_output_bytes; int sysctl_tcp_min_rtt_wlen; int sysctl_tcp_wmem[3]; u8 sysctl_ip_fwd_use_pmtu; __u8 __cacheline_group_end__netns_ipv4_read_tx[0]; __u8 __cacheline_group_begin__netns_ipv4_read_txrx[0]; u8 sysctl_tcp_moderate_rcvbuf; __u8 __cacheline_group_end__netns_ipv4_read_txrx[0]; __u8 __cacheline_group_begin__netns_ipv4_read_rx[0]; u8 sysctl_ip_early_demux; u8 sysctl_tcp_early_demux; int sysctl_tcp_reordering; int sysctl_tcp_rmem[3]; __u8 __cacheline_group_end__netns_ipv4_read_rx[0]; long: 64; struct inet_timewait_death_row tcp_death_row; struct udp_table *udp_table; struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct ip_ra_chain *ra_chain; struct mutex ra_mutex; struct fib_rules_ops *rules_ops; struct fib_table *fib_main; struct fib_table *fib_default; unsigned int fib_rules_require_fldissect; bool fib_has_custom_rules; bool fib_has_custom_local_routes; bool fib_offload_disabled; u8 sysctl_tcp_shrink_window; struct hlist_head *fib_table_hash; struct sock *fibnl; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct fqdir *fqdir; u8 sysctl_icmp_echo_ignore_all; u8 sysctl_icmp_echo_enable_probe; u8 sysctl_icmp_echo_ignore_broadcasts; u8 sysctl_icmp_ignore_bogus_error_responses; u8 sysctl_icmp_errors_use_inbound_ifaddr; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_msgs_per_sec; int sysctl_icmp_msgs_burst; atomic_t icmp_global_credit; u32 icmp_global_stamp; u32 ip_rt_min_pmtu; int ip_rt_mtu_expires; int ip_rt_min_advmss; struct local_ports ip_local_ports; u8 sysctl_tcp_ecn; u8 sysctl_tcp_ecn_fallback; u8 sysctl_ip_default_ttl; u8 sysctl_ip_no_pmtu_disc; u8 sysctl_ip_fwd_update_priority; u8 sysctl_ip_nonlocal_bind; u8 sysctl_ip_autobind_reuse; u8 sysctl_ip_dynaddr; u8 sysctl_raw_l3mdev_accept; u8 sysctl_udp_early_demux; u8 sysctl_nexthop_compat_mode; u8 sysctl_fwmark_reflect; u8 sysctl_tcp_fwmark_accept; u8 sysctl_tcp_l3mdev_accept; u8 sysctl_tcp_mtu_probing; int sysctl_tcp_mtu_probe_floor; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_intvl; u8 sysctl_tcp_keepalive_probes; u8 sysctl_tcp_syn_retries; u8 sysctl_tcp_synack_retries; u8 sysctl_tcp_syncookies; u8 sysctl_tcp_migrate_req; u8 sysctl_tcp_comp_sack_nr; u8 sysctl_tcp_backlog_ack_defer; u8 sysctl_tcp_pingpong_thresh; u8 sysctl_tcp_retries1; u8 sysctl_tcp_retries2; u8 sysctl_tcp_orphan_retries; u8 sysctl_tcp_tw_reuse; int sysctl_tcp_fin_timeout; u8 sysctl_tcp_sack; u8 sysctl_tcp_window_scaling; u8 sysctl_tcp_timestamps; int sysctl_tcp_rto_min_us; u8 sysctl_tcp_recovery; u8 sysctl_tcp_thin_linear_timeouts; u8 sysctl_tcp_slow_start_after_idle; u8 sysctl_tcp_retrans_collapse; u8 sysctl_tcp_stdurg; u8 sysctl_tcp_rfc1337; u8 sysctl_tcp_abort_on_overflow; u8 sysctl_tcp_fack; int sysctl_tcp_max_reordering; int sysctl_tcp_adv_win_scale; u8 sysctl_tcp_dsack; u8 sysctl_tcp_app_win; u8 sysctl_tcp_frto; u8 sysctl_tcp_nometrics_save; u8 sysctl_tcp_no_ssthresh_metrics_save; u8 sysctl_tcp_workaround_signed_windows; int sysctl_tcp_challenge_ack_limit; u8 sysctl_tcp_min_tso_segs; u8 sysctl_tcp_reflect_tos; int sysctl_tcp_invalid_ratelimit; int sysctl_tcp_pacing_ss_ratio; int sysctl_tcp_pacing_ca_ratio; unsigned int sysctl_tcp_child_ehash_entries; long unsigned int sysctl_tcp_comp_sack_delay_ns; long unsigned int sysctl_tcp_comp_sack_slack_ns; int sysctl_max_syn_backlog; int sysctl_tcp_fastopen; const struct tcp_congestion_ops *tcp_congestion_control; struct tcp_fastopen_context *tcp_fastopen_ctx; unsigned int sysctl_tcp_fastopen_blackhole_timeout; atomic_t tfo_active_disable_times; long unsigned int tfo_active_disable_stamp; u32 tcp_challenge_timestamp; u32 tcp_challenge_count; u8 sysctl_tcp_plb_enabled; u8 sysctl_tcp_plb_idle_rehash_rounds; u8 sysctl_tcp_plb_rehash_rounds; u8 sysctl_tcp_plb_suspend_rto_sec; int sysctl_tcp_plb_cong_thresh; int sysctl_udp_wmem_min; int sysctl_udp_rmem_min; u8 sysctl_fib_notify_on_flag_change; u8 sysctl_tcp_syn_linear_timeouts; u8 sysctl_udp_l3mdev_accept; u8 sysctl_igmp_llm_reports; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned int sysctl_udp_child_hash_entries; long unsigned int *sysctl_local_reserved_ports; int sysctl_ip_prot_sock; struct mr_table *mrt; struct sysctl_fib_multipath_hash_seed sysctl_fib_multipath_hash_seed; u32 sysctl_fib_multipath_hash_fields; u8 sysctl_fib_multipath_use_neigh; u8 sysctl_fib_multipath_hash_policy; struct fib_notifier_ops *notifier_ops; unsigned int fib_seq; struct fib_notifier_ops *ipmr_notifier_ops; unsigned int ipmr_seq; atomic_t rt_genid; siphash_key_t ip_id_key; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; u32 multipath_hash_fields; u8 multipath_hash_policy; u8 bindv6only; u8 flowlabel_consistency; u8 auto_flowlabels; int icmpv6_time; u8 icmpv6_echo_ignore_all; u8 icmpv6_echo_ignore_multicast; u8 icmpv6_echo_ignore_anycast; long unsigned int icmpv6_ratemask[4]; long unsigned int *icmpv6_ratemask_ptr; u8 anycast_src_echo_reply; u8 ip_nonlocal_bind; u8 fwmark_reflect; u8 flowlabel_state_ranges; int idgen_retries; int idgen_delay; int flowlabel_reflect; int max_dst_opts_cnt; int max_hbh_opts_cnt; int max_dst_opts_len; int max_hbh_opts_len; int seg6_flowlabel; u32 ioam6_id; u64 ioam6_id_wide; u8 skip_notify_on_dev_down; u8 fib_notify_on_flag_change; u8 icmpv6_error_anycast_as_unicast; }; struct rt6_statistics; struct seg6_pernet_data; struct netns_ipv6 { struct dst_ops ip6_dst_ops; struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct fqdir *fqdir; struct fib6_info *fib6_null_entry; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; atomic_t ip6_rt_gc_expire; long unsigned int ip6_rt_last_gc; unsigned char flowlabel_has_excl; bool fib6_has_custom_rules; unsigned int fib6_rules_require_fldissect; unsigned int fib6_routes_require_src; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct hlist_head *inet6_addr_lst; spinlock_t addrconf_hash_lock; struct delayed_work addr_chk_work; atomic_t dev_addr_genid; atomic_t fib6_sernum; struct seg6_pernet_data *seg6_data; struct fib_notifier_ops *notifier_ops; struct fib_notifier_ops *ip6mr_notifier_ops; unsigned int ipmr_seq; struct { struct hlist_head head; spinlock_t lock; u32 seq; } ip6addrlbl_table; struct ioam6_pernet_data *ioam6_data; long: 64; long: 64; }; struct nf_logger; struct nf_hook_entries; struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_logger *nf_loggers[11]; struct ctl_table_header *nf_log_dir_header; struct ctl_table_header *nf_lwtnl_dir_header; struct nf_hook_entries *hooks_ipv4[5]; struct nf_hook_entries *hooks_ipv6[5]; unsigned int defrag_ipv4_users; unsigned int defrag_ipv6_users; }; struct nf_ct_event_notifier; struct nf_generic_net { unsigned int timeout; }; struct nf_tcp_net { unsigned int timeouts[14]; u8 tcp_loose; u8 tcp_be_liberal; u8 tcp_max_retrans; u8 tcp_ignore_invalid_rst; unsigned int offload_timeout; }; struct nf_udp_net { unsigned int timeouts[2]; unsigned int offload_timeout; }; struct nf_icmp_net { unsigned int timeout; }; struct nf_dccp_net { u8 dccp_loose; unsigned int dccp_timeout[10]; }; struct nf_sctp_net { unsigned int timeouts[10]; }; struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct nf_dccp_net dccp; struct nf_sctp_net sctp; }; struct netns_ct { u8 sysctl_log_invalid; u8 sysctl_events; u8 sysctl_acct; u8 sysctl_tstamp; u8 sysctl_checksum; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_ip_net nf_ct_proto; }; struct netns_nftables { u8 gencursor; }; struct nf_flow_table_stat; struct netns_ft { struct nf_flow_table_stat *stat; }; struct netns_bpf { struct bpf_prog_array *run_array[2]; struct bpf_prog *progs[2]; struct list_head links[2]; }; struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; }; struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; }; struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; struct hlist_head *state_byseq; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; unsigned int idx_generator; struct hlist_head policy_inexact[3]; struct xfrm_policy_hash policy_bydst[3]; unsigned int policy_count[6]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct list_head inexact_bins; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; u8 policy_default[3]; struct ctl_table_header *sysctl_hdr; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; seqcount_spinlock_t xfrm_state_hash_generation; seqcount_spinlock_t xfrm_policy_hash_generation; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct delayed_work nat_keepalive_work; long: 64; long: 64; long: 64; }; struct netns_mpls { int ip_ttl_propagate; int default_ttl; size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; }; struct netns_xdp { struct mutex lock; struct hlist_head list; }; struct uevent_sock; struct net_generic; struct net { refcount_t passive; spinlock_t rules_mod_lock; unsigned int dev_base_seq; u32 ifindex; spinlock_t nsid_lock; atomic_t fnhe_genid; struct list_head list; struct list_head exit_list; struct llist_node cleanup_list; struct key_tag *key_domain; struct user_namespace *user_ns; struct ucounts *ucounts; struct idr netns_ids; struct ns_common ns; struct ref_tracker_dir refcnt_tracker; struct ref_tracker_dir notrefcnt_tracker; struct list_head dev_base_head; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct uevent_sock *uevent_sock; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; struct xarray dev_by_index; struct raw_notifier_head netdev_chain; u32 hash_mix; struct net_device *loopback_dev; struct list_head rules_ops; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_nexthop nexthop; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_nf nf; struct netns_ct ct; struct netns_nftables nft; struct netns_ft ft; struct net_generic *gen; struct netns_bpf bpf; long: 64; long: 64; long: 64; struct netns_xfrm xfrm; u64 net_cookie; struct netns_mpls mpls; struct netns_xdp xdp; struct sock *diag_nlsk; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct netdev_tc_txq { u16 count; u16 offset; }; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); struct net_device_stats { union { long unsigned int rx_packets; atomic_long_t __rx_packets; }; union { long unsigned int tx_packets; atomic_long_t __tx_packets; }; union { long unsigned int rx_bytes; atomic_long_t __rx_bytes; }; union { long unsigned int tx_bytes; atomic_long_t __tx_bytes; }; union { long unsigned int rx_errors; atomic_long_t __rx_errors; }; union { long unsigned int tx_errors; atomic_long_t __tx_errors; }; union { long unsigned int rx_dropped; atomic_long_t __rx_dropped; }; union { long unsigned int tx_dropped; atomic_long_t __tx_dropped; }; union { long unsigned int multicast; atomic_long_t __multicast; }; union { long unsigned int collisions; atomic_long_t __collisions; }; union { long unsigned int rx_length_errors; atomic_long_t __rx_length_errors; }; union { long unsigned int rx_over_errors; atomic_long_t __rx_over_errors; }; union { long unsigned int rx_crc_errors; atomic_long_t __rx_crc_errors; }; union { long unsigned int rx_frame_errors; atomic_long_t __rx_frame_errors; }; union { long unsigned int rx_fifo_errors; atomic_long_t __rx_fifo_errors; }; union { long unsigned int rx_missed_errors; atomic_long_t __rx_missed_errors; }; union { long unsigned int tx_aborted_errors; atomic_long_t __tx_aborted_errors; }; union { long unsigned int tx_carrier_errors; atomic_long_t __tx_carrier_errors; }; union { long unsigned int tx_fifo_errors; atomic_long_t __tx_fifo_errors; }; union { long unsigned int tx_heartbeat_errors; atomic_long_t __tx_heartbeat_errors; }; union { long unsigned int tx_window_errors; atomic_long_t __tx_window_errors; }; union { long unsigned int rx_compressed; atomic_long_t __rx_compressed; }; union { long unsigned int tx_compressed; atomic_long_t __tx_compressed; }; }; struct netdev_hw_addr_list { struct list_head list; int count; struct rb_root tree; }; struct sfp_bus; struct net_device_ops; struct xps_dev_maps; struct pcpu_lstats; struct pcpu_sw_netstats; struct pcpu_dstats; struct netdev_rx_queue; struct netdev_name_node; struct xdp_metadata_ops; struct xsk_tx_metadata_ops; struct net_device_core_stats; struct vlan_info; struct xdp_dev_bulk_queue; struct netdev_stat_ops; struct netdev_queue_mgmt_ops; struct phy_link_topology; struct udp_tunnel_nic_info; struct udp_tunnel_nic; struct rtnl_hw_stats64; struct net_device { __u8 __cacheline_group_begin__net_device_read_tx[0]; union { struct { long unsigned int priv_flags: 32; long unsigned int lltx: 1; }; struct { long unsigned int priv_flags: 32; long unsigned int lltx: 1; } priv_flags_fast; }; const struct net_device_ops *netdev_ops; const struct header_ops *header_ops; struct netdev_queue *_tx; netdev_features_t gso_partial_features; unsigned int real_num_tx_queues; unsigned int gso_max_size; unsigned int gso_ipv4_max_size; u16 gso_max_segs; s16 num_tc; unsigned int mtu; short unsigned int needed_headroom; struct netdev_tc_txq tc_to_txq[16]; struct xps_dev_maps *xps_maps[2]; struct nf_hook_entries *nf_hooks_egress; struct bpf_mprog_entry *tcx_egress; __u8 __cacheline_group_end__net_device_read_tx[0]; __u8 __cacheline_group_begin__net_device_read_txrx[0]; union { struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; }; long unsigned int state; unsigned int flags; short unsigned int hard_header_len; netdev_features_t features; struct inet6_dev *ip6_ptr; __u8 __cacheline_group_end__net_device_read_txrx[0]; __u8 __cacheline_group_begin__net_device_read_rx[0]; struct bpf_prog *xdp_prog; struct list_head ptype_specific; int ifindex; unsigned int real_num_rx_queues; struct netdev_rx_queue *_rx; long unsigned int gro_flush_timeout; u32 napi_defer_hard_irqs; unsigned int gro_max_size; unsigned int gro_ipv4_max_size; rx_handler_func_t *rx_handler; void *rx_handler_data; possible_net_t nd_net; struct bpf_mprog_entry *tcx_ingress; __u8 __cacheline_group_end__net_device_read_rx[0]; char name[16]; struct netdev_name_node *name_node; struct dev_ifalias *ifalias; long unsigned int mem_end; long unsigned int mem_start; long unsigned int base_addr; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct { struct list_head upper; struct list_head lower; } adj_list; xdp_features_t xdp_features; const struct xdp_metadata_ops *xdp_metadata_ops; const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; short unsigned int gflags; short unsigned int needed_tailroom; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; unsigned int min_mtu; unsigned int max_mtu; short unsigned int type; unsigned char min_header_len; unsigned char name_assign_type; int group; struct net_device_stats stats; struct net_device_core_stats *core_stats; atomic_t carrier_up_count; atomic_t carrier_down_count; const struct ethtool_ops *ethtool_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; unsigned int operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned char perm_addr[32]; unsigned char addr_assign_type; unsigned char addr_len; unsigned char upper_level; unsigned char lower_level; short unsigned int neigh_priv_len; short unsigned int dev_id; short unsigned int dev_port; int irq; u32 priv_len; spinlock_t addr_list_lock; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; struct list_head unlink_list; unsigned int promiscuity; unsigned int allmulti; bool uc_promisc; unsigned char nested_level; struct in_device *ip_ptr; struct vlan_info *vlan_info; struct mpls_dev *mpls_ptr; const unsigned char *dev_addr; unsigned int num_rx_queues; unsigned int xdp_zc_max_segs; struct netdev_queue *ingress_queue; struct nf_hook_entries *nf_hooks_ingress; unsigned char broadcast[32]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; unsigned int num_tx_queues; struct Qdisc *qdisc; unsigned int tx_queue_len; spinlock_t tx_global_lock; struct xdp_dev_bulk_queue *xdp_bulkq; struct hlist_head qdisc_hash[16]; struct timer_list watchdog_timer; int watchdog_timeo; u32 proto_down_reason; struct list_head todo_list; int *pcpu_refcnt; struct ref_tracker_dir refcnt_tracker; struct list_head link_watch_list; u8 reg_state; bool dismantle; enum { RTNL_LINK_INITIALIZED = 0, RTNL_LINK_INITIALIZING = 1, } rtnl_link_state: 16; bool needs_free_netdev; void (*priv_destructor)(struct net_device *); void *ml_priv; enum netdev_ml_priv_type ml_priv_type; enum netdev_stat_type pcpu_stat_type: 8; struct device dev; const struct attribute_group *sysfs_groups[4]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; const struct netdev_stat_ops *stat_ops; const struct netdev_queue_mgmt_ops *queue_mgmt_ops; unsigned int tso_max_size; u16 tso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 prio_tc_map[16]; struct phy_link_topology *link_topo; struct phy_device *phydev; struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; bool proto_down; bool threaded; long unsigned int see_all_hwtstamp_requests: 1; long unsigned int change_proto_down: 1; long unsigned int netns_local: 1; long unsigned int fcoe_mtu: 1; struct list_head net_notifier_list; const struct udp_tunnel_nic_info *udp_tunnel_nic_info; struct udp_tunnel_nic *udp_tunnel_nic; struct ethtool_netdev_state *ethtool; struct bpf_xdp_entity xdp_state[3]; u8 dev_addr_shadow[32]; netdevice_tracker linkwatch_dev_tracker; netdevice_tracker watchdog_dev_tracker; netdevice_tracker dev_registered_tracker; struct rtnl_hw_stats64 *offload_xstats_l3; struct devlink_port *devlink_port; struct hlist_head page_pools; struct dim_irq_moder *irq_moder; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u8 priv[0]; }; struct net_device_core_stats { long unsigned int rx_dropped; long unsigned int tx_dropped; long unsigned int rx_nohandler; long unsigned int rx_otherhost_dropped; }; struct net_device_devres { struct net_device *ndev; }; struct netdev_bpf; struct xdp_frame; struct net_device_path_ctx; struct net_device_path; struct skb_shared_hwtstamps; struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_eth_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_siocbond)(struct net_device *, struct ifreq *, int); int (*ndo_siocwandev)(struct net_device *, struct if_settings *); int (*ndo_siocdevprivate)(struct net_device *, struct ifreq *, void *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *, unsigned int); void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); bool (*ndo_has_offload_stats)(const struct net_device *, int); int (*ndo_get_offload_stats)(int, const struct net_device *, void *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); int (*ndo_set_vf_trust)(struct net_device *, int, bool); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, struct ifla_vf_guid *); int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *); int (*ndo_del_slave)(struct net_device *, struct net_device *); struct net_device * (*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool); struct net_device * (*ndo_sk_get_lower_dev)(struct net_device *, struct sock *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); int (*ndo_set_features)(struct net_device *, netdev_features_t); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16, struct netlink_ext_ack *); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, struct netlink_ext_ack *); int (*ndo_fdb_del_bulk)(struct nlmsghdr *, struct net_device *, struct netlink_ext_ack *); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, const unsigned char *, u16, u32, u32, struct netlink_ext_ack *); int (*ndo_mdb_add)(struct net_device *, struct nlattr **, u16, struct netlink_ext_ack *); int (*ndo_mdb_del)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); int (*ndo_mdb_del_bulk)(struct net_device *, struct nlattr **, struct netlink_ext_ack *); int (*ndo_mdb_dump)(struct net_device *, struct sk_buff *, struct netlink_callback *); int (*ndo_mdb_get)(struct net_device *, struct nlattr **, u32, u32, struct netlink_ext_ack *); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, struct netlink_ext_ack *); int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); int (*ndo_change_carrier)(struct net_device *, bool); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *, struct xdp_buff *); int (*ndo_xsk_wakeup)(struct net_device *, u32, u32); int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm_kern *, int); struct net_device * (*ndo_get_peer_dev)(struct net_device *); int (*ndo_fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *); ktime_t (*ndo_get_tstamp)(struct net_device *, const struct skb_shared_hwtstamps *, bool); int (*ndo_hwtstamp_get)(struct net_device *, struct kernel_hwtstamp_config *); int (*ndo_hwtstamp_set)(struct net_device *, struct kernel_hwtstamp_config *, struct netlink_ext_ack *); }; struct net_device_path { enum net_device_path_type type; const struct net_device *dev; union { struct { u16 id; __be16 proto; u8 h_dest[6]; } encap; struct { enum { DEV_PATH_BR_VLAN_KEEP = 0, DEV_PATH_BR_VLAN_TAG = 1, DEV_PATH_BR_VLAN_UNTAG = 2, DEV_PATH_BR_VLAN_UNTAG_HW = 3, } vlan_mode; u16 vlan_id; __be16 vlan_proto; } bridge; struct { int port; u16 proto; } dsa; struct { u8 wdma_idx; u8 queue; u16 wcid; u8 bss; u8 amsdu; } mtk_wdma; }; }; struct net_device_path_ctx { const struct net_device *dev; u8 daddr[6]; int num_vlans; struct { u16 id; __be16 proto; } vlan[2]; }; struct net_device_path_stack { int num_paths; struct net_device_path path[5]; }; struct net_devmem_dmabuf_binding { struct dma_buf *dmabuf; struct dma_buf_attachment *attachment; struct sg_table *sgt; struct net_device *dev; struct gen_pool *chunk_pool; refcount_t ref; struct list_head list; struct xarray bound_rxqs; u32 id; }; struct net_fill_args { u32 portid; u32 seq; int flags; int cmd; int nsid; bool add_ref; int ref_nsid; }; struct net_generic { union { struct { unsigned int len; struct callback_head rcu; } s; struct { struct {} __empty_ptr; void *ptr[0]; }; }; }; struct offload_callbacks { struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t); struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *); int (*gro_complete)(struct sk_buff *, int); }; struct packet_offload { __be16 type; u16 priority; struct offload_callbacks callbacks; struct list_head list; }; struct net_offload { struct offload_callbacks callbacks; unsigned int flags; u32 secret; }; struct net_protocol { int (*handler)(struct sk_buff *); int (*err_handler)(struct sk_buff *, u32); unsigned int no_policy: 1; unsigned int icmp_strict_tag_validation: 1; u32 secret; }; struct rps_sock_flow_table; struct net_hotdata { struct packet_offload ip_packet_offload; struct net_offload tcpv4_offload; struct net_protocol tcp_protocol; struct net_offload udpv4_offload; struct net_protocol udp_protocol; struct packet_offload ipv6_packet_offload; struct net_offload tcpv6_offload; struct inet6_protocol tcpv6_protocol; struct inet6_protocol udpv6_protocol; struct net_offload udpv6_offload; struct list_head offload_base; struct list_head ptype_all; struct kmem_cache *skbuff_cache; struct kmem_cache *skbuff_fclone_cache; struct kmem_cache *skb_small_head_cache; struct rps_sock_flow_table *rps_sock_flow_table; u32 rps_cpu_mask; int gro_normal_batch; int netdev_budget; int netdev_budget_usecs; int tstamp_prequeue; int max_backlog; int dev_tx_weight; int dev_rx_weight; int sysctl_max_skb_frags; int sysctl_skb_defer_max; int sysctl_mem_pcpu_rsv; }; struct net_iov { long unsigned int __unused_padding; long unsigned int pp_magic; struct page_pool *pp; struct dmabuf_genpool_chunk_owner *owner; long unsigned int dma_addr; atomic_long_t pp_ref_count; }; struct net_proto_family { int family; int (*create)(struct net *, struct socket *, int, int); struct module *owner; }; struct net_rate_estimator { struct gnet_stats_basic_sync *bstats; spinlock_t *stats_lock; bool running; struct gnet_stats_basic_sync *cpu_bstats; u8 ewma_log; u8 intvl_log; seqcount_t seq; u64 last_packets; u64 last_bytes; u64 avpps; u64 avbps; long unsigned int next_jiffies; struct timer_list timer; struct callback_head rcu; }; struct net_vrf { struct rtable *rth; struct rt6_info *rt6; struct fib6_table *fib6_table; u32 tb_id; struct list_head me_list; int ifindex; }; struct netconfmsg { __u8 ncm_family; }; struct netdev_adjacent { struct net_device *dev; netdevice_tracker dev_tracker; bool master; bool ignore; u16 ref_nr; void *private; struct list_head list; struct callback_head rcu; }; struct netdev_bonding_info { ifslave slave; ifbond master; }; struct xsk_buff_pool; struct netdev_bpf { enum bpf_netdev_command command; union { struct { u32 flags; struct bpf_prog *prog; struct netlink_ext_ack *extack; }; struct { struct bpf_offloaded_map *offmap; }; struct { struct xsk_buff_pool *pool; u16 queue_id; } xsk; }; }; struct netdev_hw_addr { struct list_head list; struct rb_node node; unsigned char addr[32]; unsigned char type; bool global_use; int sync_cnt; int refcount; int synced; struct callback_head callback_head; }; struct netdev_lag_lower_state_info { u8 link_up: 1; u8 tx_enabled: 1; }; struct netdev_lag_upper_info { enum netdev_lag_tx_type tx_type; enum netdev_lag_hash hash_type; }; struct netdev_name_node { struct hlist_node hlist; struct list_head list; struct net_device *dev; const char *name; struct callback_head rcu; }; struct netdev_nested_priv { unsigned char flags; void *data; }; struct netdev_net_notifier { struct list_head list; struct notifier_block *nb; }; struct netdev_nl_dump_ctx { long unsigned int ifindex; unsigned int rxq_idx; unsigned int txq_idx; unsigned int napi_id; }; struct netdev_notifier_info { struct net_device *dev; struct netlink_ext_ack *extack; }; struct netdev_notifier_bonding_info { struct netdev_notifier_info info; struct netdev_bonding_info bonding_info; }; struct netdev_notifier_change_info { struct netdev_notifier_info info; unsigned int flags_changed; }; struct netdev_notifier_changelowerstate_info { struct netdev_notifier_info info; void *lower_state_info; }; struct netdev_notifier_changeupper_info { struct netdev_notifier_info info; struct net_device *upper_dev; bool master; bool linking; void *upper_info; }; struct netdev_notifier_info_ext { struct netdev_notifier_info info; union { u32 mtu; } ext; }; struct netdev_notifier_offload_xstats_rd; struct netdev_notifier_offload_xstats_ru; struct netdev_notifier_offload_xstats_info { struct netdev_notifier_info info; enum netdev_offload_xstats_type type; union { struct netdev_notifier_offload_xstats_rd *report_delta; struct netdev_notifier_offload_xstats_ru *report_used; }; }; struct rtnl_hw_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; }; struct netdev_notifier_offload_xstats_rd { struct rtnl_hw_stats64 stats; bool used; }; struct netdev_notifier_offload_xstats_ru { bool used; }; struct netdev_notifier_pre_changeaddr_info { struct netdev_notifier_info info; const unsigned char *dev_addr; }; struct netdev_queue { struct net_device *dev; netdevice_tracker dev_tracker; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; long unsigned int tx_maxrate; atomic_long_t trans_timeout; struct net_device *sb_dev; struct xsk_buff_pool *pool; long: 64; struct dql dql; spinlock_t _xmit_lock; int xmit_lock_owner; long unsigned int trans_start; long unsigned int state; struct napi_struct *napi; int numa_node; long: 64; long: 64; long: 64; }; struct netdev_queue_attribute { struct attribute attr; ssize_t (*show)(struct netdev_queue *, char *); ssize_t (*store)(struct netdev_queue *, const char *, size_t); }; struct netdev_queue_mgmt_ops { size_t ndo_queue_mem_size; int (*ndo_queue_mem_alloc)(struct net_device *, void *, int); void (*ndo_queue_mem_free)(struct net_device *, void *); int (*ndo_queue_start)(struct net_device *, void *, int); int (*ndo_queue_stop)(struct net_device *, void *, int); }; struct netdev_queue_stats_rx { u64 bytes; u64 packets; u64 alloc_fail; u64 hw_drops; u64 hw_drop_overruns; u64 csum_unnecessary; u64 csum_none; u64 csum_bad; u64 hw_gro_packets; u64 hw_gro_bytes; u64 hw_gro_wire_packets; u64 hw_gro_wire_bytes; u64 hw_drop_ratelimits; }; struct netdev_queue_stats_tx { u64 bytes; u64 packets; u64 hw_drops; u64 hw_drop_errors; u64 csum_none; u64 needs_csum; u64 hw_gso_packets; u64 hw_gso_bytes; u64 hw_gso_wire_packets; u64 hw_gso_wire_bytes; u64 hw_drop_ratelimits; u64 stop; u64 wake; }; struct xdp_mem_info { u32 type; u32 id; }; struct xdp_rxq_info { struct net_device *dev; u32 queue_index; u32 reg_state; struct xdp_mem_info mem; unsigned int napi_id; u32 frag_size; long: 64; long: 64; long: 64; long: 64; }; struct pp_memory_provider_params { void *mp_priv; }; struct rps_map; struct rps_dev_flow_table; struct netdev_rx_queue { struct xdp_rxq_info xdp_rxq; struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; netdevice_tracker dev_tracker; struct xsk_buff_pool *pool; struct napi_struct *napi; struct pp_memory_provider_params mp_params; long: 64; long: 64; }; struct netdev_stat_ops { void (*get_queue_stats_rx)(struct net_device *, int, struct netdev_queue_stats_rx *); void (*get_queue_stats_tx)(struct net_device *, int, struct netdev_queue_stats_tx *); void (*get_base_stats)(struct net_device *, struct netdev_queue_stats_rx *, struct netdev_queue_stats_tx *); }; struct netdev_xmit { u16 recursion; u8 more; u8 skip_txqueue; }; struct mock_phc; struct xdp_attachment_info { struct bpf_prog *prog; u32 flags; }; struct nsim_sa { struct xfrm_state *xs; __be32 ipaddr[4]; u32 key[4]; u32 salt; bool used; bool crypt; bool rx; }; struct nsim_ipsec { struct nsim_sa sa[33]; struct dentry *pfile; u32 count; u32 tx; u32 ok; }; struct nsim_rxsc { sci_t sci; bool used; }; struct nsim_secy { sci_t sci; struct nsim_rxsc nsim_rxsc[1]; u8 nsim_rxsc_count; bool used; }; struct nsim_macsec { struct nsim_secy nsim_secy[3]; u8 nsim_secy_count; }; struct nsim_ethtool_pauseparam { bool rx; bool tx; bool report_stats_rx; bool report_stats_tx; }; struct nsim_ethtool { u32 get_err; u32 set_err; u32 channels; struct nsim_ethtool_pauseparam pauseparam; struct ethtool_coalesce coalesce; struct ethtool_ringparam ring; struct ethtool_fecparam fec; }; struct nsim_dev; struct nsim_dev_port; struct nsim_rq; struct nsim_bus_dev; struct netdevsim { struct net_device *netdev; struct nsim_dev *nsim_dev; struct nsim_dev_port *nsim_dev_port; struct mock_phc *phc; struct nsim_rq *rq; u64 tx_packets; u64 tx_bytes; u64 tx_dropped; struct u64_stats_sync syncp; struct nsim_bus_dev *nsim_bus_dev; struct bpf_prog *bpf_offloaded; u32 bpf_offloaded_id; struct xdp_attachment_info xdp; struct xdp_attachment_info xdp_hw; bool bpf_tc_accept; bool bpf_tc_non_bound_accept; bool bpf_xdpdrv_accept; bool bpf_xdpoffload_accept; bool bpf_map_accept; struct nsim_ipsec ipsec; struct nsim_macsec macsec; struct { u32 inject_error; u32 sleep; u32 __ports[8]; u32 (*ports)[4]; struct debugfs_u32_array dfs_ports[2]; } udp_ports; struct page *page; struct dentry *pp_dfs; struct nsim_ethtool ethtool; struct netdevsim *peer; }; struct netevent_redirect { struct dst_entry *old; struct dst_entry *new; struct neighbour *neigh; const void *daddr; }; struct netif_security_struct { struct net *ns; int ifindex; u32 sid; }; struct netkit { struct net_device *peer; struct bpf_mprog_entry *active; enum netkit_action policy; struct bpf_mprog_bundle bundle; enum netkit_mode mode; bool primary; u32 headroom; }; struct netkit_link { struct bpf_link link; struct net_device *dev; u32 location; }; struct netlbl_af4list { __be32 addr; __be32 mask; u32 valid; struct list_head list; }; struct netlbl_af6list { struct in6_addr addr; struct in6_addr mask; u32 valid; struct list_head list; }; struct netlbl_audit { u32 secid; kuid_t loginuid; unsigned int sessionid; }; struct netlbl_calipso_doiwalk_arg { struct netlink_callback *nl_cb; struct sk_buff *skb; u32 seq; }; struct netlbl_lsm_secattr; struct netlbl_calipso_ops { int (*doi_add)(struct calipso_doi *, struct netlbl_audit *); void (*doi_free)(struct calipso_doi *); int (*doi_remove)(u32, struct netlbl_audit *); struct calipso_doi * (*doi_getdef)(u32); void (*doi_putdef)(struct calipso_doi *); int (*doi_walk)(u32 *, int (*)(struct calipso_doi *, void *), void *); int (*sock_getattr)(struct sock *, struct netlbl_lsm_secattr *); int (*sock_setattr)(struct sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); void (*sock_delattr)(struct sock *); int (*req_setattr)(struct request_sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); void (*req_delattr)(struct request_sock *); int (*opt_getattr)(const unsigned char *, struct netlbl_lsm_secattr *); unsigned char * (*skbuff_optptr)(const struct sk_buff *); int (*skbuff_setattr)(struct sk_buff *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); int (*skbuff_delattr)(struct sk_buff *); void (*cache_invalidate)(void); int (*cache_add)(const unsigned char *, const struct netlbl_lsm_secattr *); }; struct netlbl_cipsov4_doiwalk_arg { struct netlink_callback *nl_cb; struct sk_buff *skb; u32 seq; }; struct netlbl_domaddr_map; struct netlbl_dommap_def { u32 type; union { struct netlbl_domaddr_map *addrsel; struct cipso_v4_doi *cipso; struct calipso_doi *calipso; }; }; struct netlbl_dom_map { char *domain; struct netlbl_dommap_def def; u16 family; u32 valid; struct list_head list; struct callback_head rcu; }; struct netlbl_domaddr4_map { struct netlbl_dommap_def def; struct netlbl_af4list list; }; struct netlbl_domaddr6_map { struct netlbl_dommap_def def; struct netlbl_af6list list; }; struct netlbl_domaddr_map { struct list_head list4; struct list_head list6; }; struct netlbl_domhsh_tbl { struct list_head *tbl; u32 size; }; struct netlbl_domhsh_walk_arg { struct netlink_callback *nl_cb; struct sk_buff *skb; u32 seq; }; struct netlbl_domhsh_walk_arg___2 { struct netlbl_audit *audit_info; u32 doi; }; struct netlbl_lsm_cache { refcount_t refcount; void (*free)(const void *); void *data; }; struct netlbl_lsm_catmap { u32 startbit; u64 bitmap[4]; struct netlbl_lsm_catmap *next; }; struct netlbl_lsm_secattr { u32 flags; u32 type; char *domain; struct netlbl_lsm_cache *cache; struct { struct { struct netlbl_lsm_catmap *cat; u32 lvl; } mls; u32 secid; } attr; }; struct netlbl_unlhsh_addr4 { u32 secid; struct netlbl_af4list list; struct callback_head rcu; }; struct netlbl_unlhsh_addr6 { u32 secid; struct netlbl_af6list list; struct callback_head rcu; }; struct netlbl_unlhsh_iface { int ifindex; struct list_head addr4_list; struct list_head addr6_list; u32 valid; struct list_head list; struct callback_head rcu; }; struct netlbl_unlhsh_tbl { struct list_head *tbl; u32 size; }; struct netlbl_unlhsh_walk_arg { struct netlink_callback *nl_cb; struct sk_buff *skb; u32 seq; }; struct netlink_broadcast_data { struct sock *exclude_sk; struct net *net; u32 portid; u32 group; int failure; int delivery_failure; int congested; int delivered; gfp_t allocation; struct sk_buff *skb; struct sk_buff *skb2; int (*tx_filter)(struct sock *, struct sk_buff *, void *); void *tx_data; }; struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; struct netlink_ext_ack *extack; u16 family; u16 answer_flags; u32 min_dump_alloc; unsigned int prev_seq; unsigned int seq; int flags; bool strict_check; union { u8 ctx[48]; long int args[6]; }; }; struct netlink_compare_arg { possible_net_t pnet; u32 portid; }; struct netlink_dump_control { int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); struct netlink_ext_ack *extack; void *data; struct module *module; u32 min_dump_alloc; int flags; }; struct netlink_ext_ack { const char *_msg; const struct nlattr *bad_attr; const struct nla_policy *policy; const struct nlattr *miss_nest; u16 miss_type; u8 cookie[20]; u8 cookie_len; char _msg_buf[80]; }; struct netlink_kernel_cfg { unsigned int groups; unsigned int flags; void (*input)(struct sk_buff *); int (*bind)(struct net *, int); void (*unbind)(struct net *, int); void (*release)(struct sock *, long unsigned int *); }; struct netlink_notify { struct net *net; u32 portid; int protocol; }; struct netlink_policy_dump_state { unsigned int policy_idx; unsigned int attr_idx; unsigned int n_alloc; struct { const struct nla_policy *policy; unsigned int maxtype; } policies[0]; }; struct netlink_range_validation { u64 min; u64 max; }; struct netlink_range_validation_signed { s64 min; s64 max; }; struct netlink_set_err_data { struct sock *exclude_sk; u32 portid; u32 group; int code; }; struct scm_creds { u32 pid; kuid_t uid; kgid_t gid; }; struct netlink_skb_parms { struct scm_creds creds; __u32 portid; __u32 dst_group; __u32 flags; struct sock *sk; bool nsid_is_set; int nsid; }; struct netlink_sock { struct sock sk; long unsigned int flags; u32 portid; u32 dst_portid; u32 dst_group; u32 subscriptions; u32 ngroups; long unsigned int *groups; long unsigned int state; size_t max_recvmsg_len; wait_queue_head_t wait; bool bound; bool cb_running; int dump_done_errno; struct netlink_callback cb; struct mutex nl_cb_mutex; void (*netlink_rcv)(struct sk_buff *); int (*netlink_bind)(struct net *, int); void (*netlink_unbind)(struct net *, int); void (*netlink_release)(struct sock *, long unsigned int *); struct module *module; struct rhash_head node; struct callback_head rcu; struct work_struct work; }; struct netlink_table { struct rhashtable hash; struct hlist_head mc_list; struct listeners *listeners; unsigned int flags; unsigned int groups; struct mutex *cb_mutex; struct module *module; int (*bind)(struct net *, int); void (*unbind)(struct net *, int); void (*release)(struct sock *, long unsigned int *); int registered; }; struct netlink_tap { struct net_device *dev; struct module *module; struct list_head list; }; struct netlink_tap_net { struct list_head netlink_tap_all; struct mutex netlink_tap_lock; }; struct netnode_security_struct { union { __be32 ipv4; struct in6_addr ipv6; } addr; u32 sid; u16 family; }; struct vrf_map { struct hlist_head ht[16]; spinlock_t vmap_lock; u32 shared_tables; bool strict_mode; }; struct netns_vrf { bool add_fib_rules; struct vrf_map vmap; struct ctl_table_header *ctl_hdr; }; struct netport_security_struct { u32 sid; u16 port; u8 protocol; }; struct new_utsname { char sysname[65]; char nodename[65]; char release[65]; char version[65]; char machine[65]; char domainname[65]; }; struct nh_info; struct nh_group; struct nexthop { struct rb_node rb_node; struct list_head fi_list; struct list_head f6i_list; struct list_head fdb_list; struct list_head grp_list; struct net *net; u32 id; u8 protocol; u8 nh_flags; bool is_group; refcount_t refcnt; struct callback_head rcu; union { struct nh_info *nh_info; struct nh_group *nh_grp; }; }; struct nexthop_grp { __u32 id; __u8 weight; __u8 weight_high; __u16 resvd2; }; struct nf_conntrack { refcount_t use; }; struct nf_conntrack_zone { u16 id; u8 flags; u8 dir; }; union nf_inet_addr { __u32 all[4]; __be32 ip; __be32 ip6[4]; struct in_addr in; struct in6_addr in6; }; union nf_conntrack_man_proto { __be16 all; struct { __be16 port; } tcp; struct { __be16 port; } udp; struct { __be16 id; } icmp; struct { __be16 port; } dccp; struct { __be16 port; } sctp; struct { __be16 key; } gre; }; struct nf_conntrack_man { union nf_inet_addr u3; union nf_conntrack_man_proto u; u_int16_t l3num; }; struct nf_conntrack_tuple { struct nf_conntrack_man src; struct { union nf_inet_addr u3; union { __be16 all; struct { __be16 port; } tcp; struct { __be16 port; } udp; struct { u_int8_t type; u_int8_t code; } icmp; struct { __be16 port; } dccp; struct { __be16 port; } sctp; struct { __be16 key; } gre; } u; u_int8_t protonum; struct {} __nfct_hash_offsetend; u_int8_t dir; } dst; }; struct nf_conntrack_tuple_hash { struct hlist_nulls_node hnnode; struct nf_conntrack_tuple tuple; }; struct nf_ct_dccp { u_int8_t role[2]; u_int8_t state; u_int8_t last_pkt; u_int8_t last_dir; u_int64_t handshake_seq; }; struct nf_ct_udp { long unsigned int stream_ts; }; struct nf_ct_gre { unsigned int stream_timeout; unsigned int timeout; }; union nf_conntrack_proto { struct nf_ct_dccp dccp; struct ip_ct_sctp sctp; struct ip_ct_tcp tcp; struct nf_ct_udp udp; struct nf_ct_gre gre; unsigned int tmpl_padto; }; struct nf_ct_ext; struct nf_conn { struct nf_conntrack ct_general; spinlock_t lock; u32 timeout; struct nf_conntrack_zone zone; struct nf_conntrack_tuple_hash tuplehash[2]; long unsigned int status; possible_net_t ct_net; struct hlist_node nat_bysource; struct {} __nfct_init_offset; struct nf_conn *master; u_int32_t mark; struct nf_ct_ext *ext; union nf_conntrack_proto proto; }; struct nf_conn___init { struct nf_conn ct; }; struct nf_conn_counter { atomic64_t packets; atomic64_t bytes; }; struct nf_conn_acct { struct nf_conn_counter counter[2]; }; struct nf_conntrack_helper; struct nf_conn_help { struct nf_conntrack_helper *helper; struct hlist_head expectations; u8 expecting[4]; long: 0; char data[32]; }; struct nf_conn_labels { long unsigned int bits[2]; }; union nf_conntrack_nat_help {}; struct nf_conn_nat { union nf_conntrack_nat_help help; }; struct nf_ct_seqadj { u32 correction_pos; s32 offset_before; s32 offset_after; }; struct nf_conn_seqadj { struct nf_ct_seqadj seq[2]; }; struct nf_conn_synproxy { u32 isn; u32 its; u32 tsoff; }; struct nf_ct_timeout; struct nf_conn_timeout { struct nf_ct_timeout *timeout; }; struct nf_conn_tstamp { u_int64_t start; u_int64_t stop; }; struct nf_conntrack_dccp_buf { struct dccp_hdr dh; struct dccp_hdr_ext ext; union { struct dccp_hdr_ack_bits ack; struct dccp_hdr_request req; struct dccp_hdr_response response; struct dccp_hdr_reset rst; } u; }; struct nf_conntrack_tuple_mask { struct { union nf_inet_addr u3; union nf_conntrack_man_proto u; } src; }; struct nf_conntrack_expect { struct hlist_node lnode; struct hlist_node hnode; struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_mask mask; refcount_t use; unsigned int flags; unsigned int class; void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); struct nf_conntrack_helper *helper; struct nf_conn *master; struct timer_list timeout; union nf_inet_addr saved_addr; union nf_conntrack_man_proto saved_proto; enum ip_conntrack_dir dir; struct callback_head rcu; }; struct nf_conntrack_expect_policy { unsigned int max_expected; unsigned int timeout; char name[16]; }; struct nf_conntrack_helper { struct hlist_node hnode; char name[16]; refcount_t refcnt; struct module *me; const struct nf_conntrack_expect_policy *expect_policy; struct nf_conntrack_tuple tuple; int (*help)(struct sk_buff *, unsigned int, struct nf_conn *, enum ip_conntrack_info); void (*destroy)(struct nf_conn *); int (*from_nlattr)(struct nlattr *, struct nf_conn *); int (*to_nlattr)(struct sk_buff *, const struct nf_conn *); unsigned int expect_class_max; unsigned int flags; unsigned int queue_num; u16 data_len; char nat_mod_name[16]; }; struct nf_conntrack_l4proto { u_int8_t l4proto; bool allow_clash; u16 nlattr_size; bool (*can_early_drop)(const struct nf_conn *); int (*to_nlattr)(struct sk_buff *, struct nlattr *, struct nf_conn *, bool); int (*from_nlattr)(struct nlattr **, struct nf_conn *); int (*tuple_to_nlattr)(struct sk_buff *, const struct nf_conntrack_tuple *); unsigned int (*nlattr_tuple_size)(void); int (*nlattr_to_tuple)(struct nlattr **, struct nf_conntrack_tuple *, u_int32_t); const struct nla_policy *nla_policy; struct { int (*nlattr_to_obj)(struct nlattr **, struct net *, void *); int (*obj_to_nlattr)(struct sk_buff *, const void *); u16 obj_size; u16 nlattr_max; const struct nla_policy *nla_policy; } ctnl_timeout; }; struct nf_conntrack_nat_helper { struct list_head list; char mod_name[16]; struct module *module; }; struct nf_conntrack_net { atomic_t count; unsigned int expect_count; unsigned int users4; unsigned int users6; unsigned int users_bridge; struct ctl_table_header *sysctl_header; }; struct nf_ct_bridge_info { struct nf_hook_ops *ops; unsigned int ops_size; struct module *me; }; struct nf_ct_ext { u8 offset[5]; u8 len; unsigned int gen_id; long: 0; char data[0]; }; struct nf_ct_helper_expectfn { struct list_head head; const char *name; void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); }; struct nf_ct_hook { int (*update)(struct net *, struct sk_buff *); void (*destroy)(struct nf_conntrack *); bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); void (*attach)(struct sk_buff *, const struct sk_buff *); void (*set_closing)(struct nf_conntrack *); int (*confirm)(struct sk_buff *); }; struct nf_ct_iter_data { struct net *net; void *data; u32 portid; int report; }; struct nf_ct_timeout { __u16 l3num; const struct nf_conntrack_l4proto *l4proto; char data[0]; }; struct nf_defrag_hook { struct module *owner; int (*enable)(struct net *); void (*disable)(struct net *); }; struct nf_flow_key { struct flow_dissector_key_meta meta; struct flow_dissector_key_control control; struct flow_dissector_key_control enc_control; struct flow_dissector_key_basic basic; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_vlan cvlan; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_keyid enc_key_id; union { struct flow_dissector_key_ipv4_addrs enc_ipv4; struct flow_dissector_key_ipv6_addrs enc_ipv6; }; struct flow_dissector_key_tcp tcp; struct flow_dissector_key_ports tp; }; struct nf_flow_match { struct flow_dissector dissector; struct nf_flow_key key; struct nf_flow_key mask; }; struct nf_flow_route { struct { struct dst_entry *dst; struct { u32 ifindex; struct { u16 id; __be16 proto; } encap[2]; u8 num_encaps: 2; u8 ingress_vlans: 2; } in; struct { u32 ifindex; u32 hw_ifindex; u8 h_source[6]; u8 h_dest[6]; } out; enum flow_offload_xmit_type xmit_type; } tuple[2]; }; struct nf_flow_rule { struct nf_flow_match match; struct flow_rule *rule; }; struct nf_flow_table_stat { unsigned int count_wq_add; unsigned int count_wq_del; unsigned int count_wq_stats; }; struct nf_flowtable_type; struct nf_flowtable { unsigned int flags; int priority; struct rhashtable rhashtable; struct list_head list; const struct nf_flowtable_type *type; struct delayed_work gc_work; struct flow_block flow_block; struct rw_semaphore flow_block_lock; possible_net_t net; }; struct nf_flowtable_ctx { const struct net_device *in; u32 offset; u32 hdrsize; }; struct nf_flowtable_type { struct list_head list; int family; int (*init)(struct nf_flowtable *); bool (*gc)(const struct flow_offload *); int (*setup)(struct nf_flowtable *, struct net_device *, enum flow_block_command); int (*action)(struct net *, struct flow_offload *, enum flow_offload_tuple_dir, struct nf_flow_rule *); void (*free)(struct nf_flowtable *); void (*get)(struct nf_flowtable *); void (*put)(struct nf_flowtable *); nf_hookfn *hook; struct module *owner; }; struct nf_hook_entry { nf_hookfn *hook; void *priv; }; struct nf_hook_entries { u16 num_hook_entries; struct nf_hook_entry hooks[0]; }; struct nf_hook_entries_rcu_head { struct callback_head head; void *allocation; }; struct nf_hook_state { u8 hook; u8 pf; struct net_device *in; struct net_device *out; struct sock *sk; struct net *net; int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; struct nf_queue_entry; struct nf_ipv6_ops { void (*route_input)(struct sk_buff *); int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); }; struct nf_log_buf { unsigned int count; char buf[1020]; }; struct nf_loginfo; typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); struct nf_logger { char *name; enum nf_log_type type; nf_logfn *logfn; struct module *me; }; struct nf_loginfo { u_int8_t type; union { struct { u_int32_t copy_len; u_int16_t group; u_int16_t qthreshold; u_int16_t flags; } ulog; struct { u_int8_t level; u_int8_t logflags; } log; } u; }; struct nf_mttg_trav { struct list_head *head; struct list_head *curr; uint8_t class; }; struct nf_nat_hook { int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *); void (*decode_session)(struct sk_buff *, struct flowi *); void (*remove_nat_bysrc)(struct nf_conn *); }; struct nf_nat_lookup_hook_priv { struct nf_hook_entries *entries; struct callback_head callback_head; }; struct nf_nat_proto_clean { u8 l3proto; u8 l4proto; }; struct nf_nat_range2 { unsigned int flags; union nf_inet_addr min_addr; union nf_inet_addr max_addr; union nf_conntrack_man_proto min_proto; union nf_conntrack_man_proto max_proto; union nf_conntrack_man_proto base_proto; }; struct nf_queue_entry { struct list_head list; struct sk_buff *skb; unsigned int id; unsigned int hook_index; struct nf_hook_state state; u16 size; }; struct nf_queue_handler { int (*outfn)(struct nf_queue_entry *, unsigned int); void (*nf_hook_drop)(struct net *); }; struct nf_sockopt_ops { struct list_head list; u_int8_t pf; int set_optmin; int set_optmax; int (*set)(struct sock *, int, sockptr_t, unsigned int); int get_optmin; int get_optmax; int (*get)(struct sock *, int, void *, int *); struct module *owner; }; struct nf_synproxy_info { __u8 options; __u8 wscale; __u16 mss; }; struct nfgenmsg { __u8 nfgen_family; __u8 version; __be16 res_id; }; struct nfnl_callback; struct nfnetlink_subsystem { const char *name; __u8 subsys_id; __u8 cb_count; const struct nfnl_callback *cb; struct module *owner; int (*commit)(struct net *, struct sk_buff *); int (*abort)(struct net *, struct sk_buff *, enum nfnl_abort_action); bool (*valid_genid)(struct net *, u32); }; struct nfnl_info; struct nfnl_callback { int (*call)(struct sk_buff *, const struct nfnl_info *, const struct nlattr * const *); const struct nla_policy *policy; enum nfnl_callback_type type; __u16 attr_count; }; struct nfnl_ct_hook { size_t (*build_size)(const struct nf_conn *); int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, u_int16_t, u_int16_t); int (*parse)(const struct nlattr *, struct nf_conn *); int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32); void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32); }; struct nfnl_err { struct list_head head; struct nlmsghdr *nlh; int err; struct netlink_ext_ack extack; }; struct nfnl_info { struct net *net; struct sock *sk; const struct nlmsghdr *nlh; const struct nfgenmsg *nfmsg; struct netlink_ext_ack *extack; }; struct nfnl_log_net { spinlock_t instances_lock; struct hlist_head instance_table[16]; atomic_t global_seq; }; struct nfnl_net { struct sock *nfnl; }; struct nfnl_queue_net { spinlock_t instances_lock; struct hlist_head instance_table[16]; }; struct nfqnl_instance { struct hlist_node hlist; struct callback_head rcu; u32 peer_portid; unsigned int queue_maxlen; unsigned int copy_range; unsigned int queue_dropped; unsigned int queue_user_dropped; u_int16_t queue_num; u_int8_t copy_mode; u_int32_t flags; spinlock_t lock; unsigned int queue_total; unsigned int id_sequence; struct list_head queue_list; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct nfqnl_msg_config_cmd { __u8 command; __u8 _pad; __be16 pf; }; struct nfqnl_msg_config_params { __be32 copy_range; __u8 copy_mode; } __attribute__((packed)); struct nfqnl_msg_packet_hdr { __be32 packet_id; __be16 hw_protocol; __u8 hook; } __attribute__((packed)); struct nfqnl_msg_packet_hw { __be16 hw_addrlen; __u16 _pad; __u8 hw_addr[8]; }; struct nfqnl_msg_packet_timestamp { __be64 sec; __be64 usec; }; struct nfqnl_msg_verdict_hdr { __be32 verdict; __be32 id; }; struct nft_table; struct nft_audit_data { struct nft_table *table; int entries; int op; struct list_head list; }; struct nft_rule_blob; struct nft_chain { struct nft_rule_blob *blob_gen_0; struct nft_rule_blob *blob_gen_1; struct list_head rules; struct list_head list; struct rhlist_head rhlhead; struct nft_table *table; u64 handle; u32 use; u8 flags: 5; u8 bound: 1; u8 genmask: 2; char *name; u16 udlen; u8 *udata; struct nft_rule_blob *blob_next; }; struct nft_chain_type; struct nft_stats; struct nft_base_chain { struct nf_hook_ops ops; struct list_head hook_list; const struct nft_chain_type *type; u8 policy; u8 flags; struct nft_stats *stats; struct nft_chain chain; struct flow_block flow_block; }; struct nft_bitmap { struct list_head list; u16 bitmap_size; u8 bitmap[0]; }; struct nft_elem_priv {}; struct nft_set_ext { u8 genmask; u8 offset[8]; char data[0]; }; struct nft_bitmap_elem { struct nft_elem_priv priv; struct list_head head; struct nft_set_ext ext; }; struct nft_verdict { u32 code; struct nft_chain *chain; }; struct nft_data { union { u32 data[4]; struct nft_verdict verdict; }; }; struct nft_bitwise { u8 sreg; u8 dreg; enum nft_bitwise_ops op: 8; u8 len; struct nft_data mask; struct nft_data xor; struct nft_data data; }; struct nft_bitwise_fast_expr { u32 mask; u32 xor; u8 sreg; u8 dreg; }; struct nft_byteorder { u8 sreg; u8 dreg; enum nft_byteorder_ops op: 8; u8 len; u8 size; }; struct nft_chain_hook { u32 num; s32 priority; const struct nft_chain_type *type; struct list_head list; }; struct nft_chain_type { const char *name; enum nft_chain_types type; int family; struct module *owner; unsigned int hook_mask; nf_hookfn *hooks[6]; int (*ops_register)(struct net *, const struct nf_hook_ops *); void (*ops_unregister)(struct net *, const struct nf_hook_ops *); }; struct nft_cmp16_fast_expr { struct nft_data data; struct nft_data mask; u8 sreg; u8 len; bool inv; }; struct nft_cmp_expr { struct nft_data data; u8 sreg; u8 len; enum nft_cmp_ops op: 8; }; struct nft_cmp_fast_expr { u32 data; u32 mask; u8 sreg; u8 len; bool inv; }; union nft_cmp_offload_data { u16 val16; u32 val32; u64 val64; }; struct nft_counter { u64_stats_t bytes; u64_stats_t packets; }; struct nft_counter_percpu_priv { struct nft_counter *counter; }; struct nft_counter_tot { s64 bytes; s64 packets; }; struct nft_ct_frag6_pernet { struct ctl_table_header *nf_frag_frags_hdr; struct fqdir *fqdir; }; struct nft_ctx { struct net *net; struct nft_table *table; struct nft_chain *chain; const struct nlattr * const *nla; u32 portid; u32 seq; u16 flags; u8 family; u8 level; bool report; long unsigned int reg_inited[1]; }; struct nft_data_desc { enum nft_data_types type; unsigned int size; unsigned int len; unsigned int flags; }; struct nft_set_ext_tmpl { u16 len; u8 offset[8]; u8 ext_len[8]; }; struct nft_set_binding { struct list_head list; const struct nft_chain *chain; u32 flags; }; struct nft_set; struct nft_expr; struct nft_dynset { struct nft_set *set; struct nft_set_ext_tmpl tmpl; enum nft_dynset_ops op: 8; u8 sreg_key; u8 sreg_data; bool invert; bool expr; u8 num_exprs; u64 timeout; struct nft_expr *expr_array[2]; struct nft_set_binding binding; }; struct nft_expr { const struct nft_expr_ops *ops; unsigned char data[0]; }; struct nft_expr_info { const struct nft_expr_ops *ops; const struct nlattr *attr; struct nlattr *tb[17]; }; struct nft_regs; struct nft_pktinfo; struct nft_regs_track; struct nft_offload_ctx; struct nft_flow_rule; struct nft_expr_type; struct nft_expr_ops { void (*eval)(const struct nft_expr *, struct nft_regs *, const struct nft_pktinfo *); int (*clone)(struct nft_expr *, const struct nft_expr *, gfp_t); unsigned int size; int (*init)(const struct nft_ctx *, const struct nft_expr *, const struct nlattr * const *); void (*activate)(const struct nft_ctx *, const struct nft_expr *); void (*deactivate)(const struct nft_ctx *, const struct nft_expr *, enum nft_trans_phase); void (*destroy)(const struct nft_ctx *, const struct nft_expr *); void (*destroy_clone)(const struct nft_ctx *, const struct nft_expr *); int (*dump)(struct sk_buff *, const struct nft_expr *, bool); int (*validate)(const struct nft_ctx *, const struct nft_expr *); bool (*reduce)(struct nft_regs_track *, const struct nft_expr *); bool (*gc)(struct net *, const struct nft_expr *); int (*offload)(struct nft_offload_ctx *, struct nft_flow_rule *, const struct nft_expr *); bool (*offload_action)(const struct nft_expr *); void (*offload_stats)(struct nft_expr *, const struct flow_stats *); const struct nft_expr_type *type; void *data; }; struct nft_expr_type { const struct nft_expr_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); void (*release_ops)(const struct nft_expr_ops *); const struct nft_expr_ops *ops; const struct nft_expr_ops *inner_ops; struct list_head list; const char *name; struct module *owner; const struct nla_policy *policy; unsigned int maxattr; u8 family; u8 flags; }; struct nft_exthdr { u8 type; u8 offset; u8 len; u8 op; u8 dreg; u8 sreg; u8 flags; }; struct nft_flow_key { struct flow_dissector_key_basic basic; struct flow_dissector_key_control control; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_ports tp; struct flow_dissector_key_ip ip; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_vlan cvlan; struct flow_dissector_key_eth_addrs eth_addrs; struct flow_dissector_key_meta meta; }; struct nft_flow_match { struct flow_dissector dissector; struct nft_flow_key key; struct nft_flow_key mask; }; struct nft_flowtable; struct nft_flow_offload { struct nft_flowtable *flowtable; }; struct nft_flow_rule { __be16 proto; struct nft_flow_match match; struct flow_rule *rule; }; struct nft_flowtable { struct list_head list; struct nft_table *table; char *name; int hooknum; int ops_len; u32 genmask: 2; u32 use; u64 handle; long: 64; struct list_head hook_list; struct nf_flowtable data; long: 64; long: 64; long: 64; }; struct nft_flowtable_filter { char *table; }; struct nft_flowtable_hook { u32 num; int priority; struct list_head list; }; struct nft_forward_info { const struct net_device *indev; const struct net_device *outdev; const struct net_device *hw_outdev; struct id encap[2]; u8 num_encaps; u8 ingress_vlans; u8 h_source[6]; u8 h_dest[6]; enum flow_offload_xmit_type xmit_type; }; struct nft_hash { u32 seed; u32 buckets; struct hlist_head table[0]; }; struct nft_hash_elem { struct nft_elem_priv priv; struct hlist_node node; struct nft_set_ext ext; }; struct nft_hook { struct list_head list; struct nf_hook_ops ops; struct callback_head rcu; }; struct nft_immediate_expr { struct nft_data data; u8 dreg; u8 dlen; }; struct nft_inner { u8 flags; u8 hdrsize; u8 type; u8 expr_type; struct __nft_expr expr; }; struct nft_inner_tun_ctx { u16 type; u16 inner_tunoff; u16 inner_lloff; u16 inner_nhoff; u16 inner_thoff; __be16 llproto; u8 l4proto; u8 flags; }; struct nft_rule_dp; struct nft_jumpstack { const struct nft_rule_dp *rule; }; struct nft_last { long unsigned int jiffies; unsigned int set; }; struct nft_last_priv { struct nft_last *last; }; struct nft_lookup { struct nft_set *set; u8 sreg; u8 dreg; bool dreg_set; bool invert; struct nft_set_binding binding; }; struct nft_module_request { struct list_head list; char module[56]; bool done; }; struct nft_obj_dump_ctx { unsigned int s_idx; char *table; u32 type; bool reset; }; struct nft_object_hash_key { const char *name; const struct nft_table *table; }; struct nft_object_ops; struct nft_object { struct list_head list; struct rhlist_head rhlhead; struct nft_object_hash_key key; u32 genmask: 2; u32 use; u64 handle; u16 udlen; u8 *udata; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; const struct nft_object_ops *ops; unsigned char data[0]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct nft_object_type; struct nft_object_ops { void (*eval)(struct nft_object *, struct nft_regs *, const struct nft_pktinfo *); unsigned int size; int (*init)(const struct nft_ctx *, const struct nlattr * const *, struct nft_object *); void (*destroy)(const struct nft_ctx *, struct nft_object *); int (*dump)(struct sk_buff *, struct nft_object *, bool); void (*update)(struct nft_object *, struct nft_object *); const struct nft_object_type *type; }; struct nft_object_type { const struct nft_object_ops * (*select_ops)(const struct nft_ctx *, const struct nlattr * const *); const struct nft_object_ops *ops; struct list_head list; u32 type; unsigned int maxattr; u8 family; struct module *owner; const struct nla_policy *policy; }; struct nft_objref_map { struct nft_set *set; u8 sreg; struct nft_set_binding binding; }; struct nft_offload_reg { u32 key; u32 len; u32 base_offset; u32 offset; u32 flags; struct nft_data data; struct nft_data mask; }; struct nft_offload_ctx { struct { enum nft_offload_dep_type type; __be16 l3num; u8 protonum; } dep; unsigned int num_actions; struct net *net; struct nft_offload_reg regs[24]; }; struct nft_offload_ethertype { __be16 value; __be16 mask; }; struct nft_payload_set { enum nft_payload_bases base: 8; u8 offset; u8 len; u8 sreg; u8 csum_type; u8 csum_offset; u8 csum_flags; }; struct nft_payload_vlan_hdr { __be16 h_vlan_proto; __be16 h_vlan_TCI; }; struct nft_pipapo_match; struct nft_pipapo { struct nft_pipapo_match *match; struct nft_pipapo_match *clone; int width; long unsigned int last_gc; }; struct nft_pipapo_elem { struct nft_elem_priv priv; struct nft_set_ext ext; }; union nft_pipapo_map_bucket; struct nft_pipapo_field { unsigned int rules; unsigned int bsize; unsigned int rules_alloc; u8 groups; u8 bb; long unsigned int *lt; union nft_pipapo_map_bucket *mt; }; union nft_pipapo_map_bucket { struct { u32 to; u32 n; }; struct nft_pipapo_elem *e; }; struct nft_pipapo_scratch; struct nft_pipapo_match { u8 field_count; unsigned int bsize_max; struct nft_pipapo_scratch **scratch; struct callback_head rcu; struct nft_pipapo_field f[0]; }; struct nft_pipapo_scratch { u8 map_index; u32 align_off; long unsigned int map[0]; }; struct nft_pktinfo { struct sk_buff *skb; const struct nf_hook_state *state; u8 flags; u8 tprot; u16 fragoff; u16 thoff; u16 inneroff; }; struct nft_range_expr { struct nft_data data_from; struct nft_data data_to; u8 sreg; u8 len; enum nft_range_ops op: 8; }; struct seqcount_rwlock { seqcount_t seqcount; rwlock_t *lock; }; typedef struct seqcount_rwlock seqcount_rwlock_t; struct nft_rbtree { struct rb_root root; rwlock_t lock; seqcount_rwlock_t count; long unsigned int last_gc; }; struct nft_rbtree_elem { struct nft_elem_priv priv; struct rb_node node; struct nft_set_ext ext; }; struct nft_regs { union { u32 data[20]; struct nft_verdict verdict; }; }; struct nft_regs_track { struct { const struct nft_expr *selector; const struct nft_expr *bitwise; u8 num_reg; } regs[20]; const struct nft_expr *cur; const struct nft_expr *last; }; struct nft_rhash { struct rhashtable ht; struct delayed_work gc_work; }; struct nft_rhash_cmp_arg { const struct nft_set *set; const u32 *key; u8 genmask; u64 tstamp; }; struct nft_rhash_ctx { const struct nft_ctx ctx; const struct nft_set *set; }; struct nft_rhash_elem { struct nft_elem_priv priv; struct rhash_head node; struct nft_set_ext ext; }; struct nft_rt { enum nft_rt_keys key: 8; u8 dreg; }; struct nft_rule { struct list_head list; u64 handle: 42; u64 genmask: 2; u64 dlen: 12; u64 udata: 1; unsigned char data[0]; }; struct nft_rule_blob { long unsigned int size; unsigned char data[0]; }; struct nft_rule_dp { u64 is_last: 1; u64 dlen: 12; u64 handle: 42; long: 0; unsigned char data[0]; }; struct nft_rule_dp_last { struct nft_rule_dp end; struct callback_head h; struct nft_rule_blob *blob; const struct nft_chain *chain; }; struct nft_rule_dump_ctx { unsigned int s_idx; char *table; char *chain; bool reset; }; struct nft_secmark { u32 secid; char *ctx; }; struct nft_set_ops; struct nft_set { struct list_head list; struct list_head bindings; refcount_t refs; struct nft_table *table; possible_net_t net; char *name; u64 handle; u32 ktype; u32 dtype; u32 objtype; u32 size; u8 field_len[16]; u8 field_count; u32 use; atomic_t nelems; u32 ndeact; u64 timeout; u32 gc_int; u16 policy; u16 udlen; unsigned char *udata; struct list_head pending_update; long: 64; long: 64; long: 64; long: 64; const struct nft_set_ops *ops; u16 flags: 13; u16 dead: 1; u16 genmask: 2; u8 klen; u8 dlen; u8 num_exprs; struct nft_expr *exprs[2]; struct list_head catchall_list; unsigned char data[0]; long: 64; long: 64; }; struct nft_set_desc { u32 ktype; unsigned int klen; u32 dtype; unsigned int dlen; u32 objtype; unsigned int size; u32 policy; u32 gc_int; u64 timeout; u8 field_len[16]; u8 field_count; bool expr; }; struct nft_set_iter { u8 genmask; enum nft_iter_type type: 8; unsigned int count; unsigned int skip; int err; int (*fn)(const struct nft_ctx *, struct nft_set *, const struct nft_set_iter *, struct nft_elem_priv *); }; struct nft_set_dump_args { const struct netlink_callback *cb; struct nft_set_iter iter; struct sk_buff *skb; bool reset; }; struct nft_set_dump_ctx { const struct nft_set *set; struct nft_ctx ctx; bool reset; }; struct nft_set_elem { union { u32 buf[16]; struct nft_data val; } key; union { u32 buf[16]; struct nft_data val; } key_end; union { u32 buf[16]; struct nft_data val; } data; struct nft_elem_priv *priv; }; struct nft_set_elem_catchall { struct list_head list; struct callback_head rcu; struct nft_elem_priv *elem; }; struct nft_set_elem_expr { u8 size; long: 0; unsigned char data[0]; }; struct nft_set_estimate { u64 size; enum nft_set_class lookup; enum nft_set_class space; }; struct nft_set_ext_type { u8 len; u8 align; }; struct nft_set_ops { bool (*lookup)(const struct net *, const struct nft_set *, const u32 *, const struct nft_set_ext **); bool (*update)(struct nft_set *, const u32 *, struct nft_elem_priv * (*)(struct nft_set *, const struct nft_expr *, struct nft_regs *), const struct nft_expr *, struct nft_regs *, const struct nft_set_ext **); bool (*delete)(const struct nft_set *, const u32 *); int (*insert)(const struct net *, const struct nft_set *, const struct nft_set_elem *, struct nft_elem_priv **); void (*activate)(const struct net *, const struct nft_set *, struct nft_elem_priv *); struct nft_elem_priv * (*deactivate)(const struct net *, const struct nft_set *, const struct nft_set_elem *); void (*flush)(const struct net *, const struct nft_set *, struct nft_elem_priv *); void (*remove)(const struct net *, const struct nft_set *, struct nft_elem_priv *); void (*walk)(const struct nft_ctx *, struct nft_set *, struct nft_set_iter *); struct nft_elem_priv * (*get)(const struct net *, const struct nft_set *, const struct nft_set_elem *, unsigned int); void (*commit)(struct nft_set *); void (*abort)(const struct nft_set *); u64 (*privsize)(const struct nlattr * const *, const struct nft_set_desc *); bool (*estimate)(const struct nft_set_desc *, u32, struct nft_set_estimate *); int (*init)(const struct nft_set *, const struct nft_set_desc *, const struct nlattr * const *); void (*destroy)(const struct nft_ctx *, const struct nft_set *); void (*gc_init)(const struct nft_set *); unsigned int elemsize; }; struct nft_set_type { const struct nft_set_ops ops; u32 features; }; struct nft_stats { u64 bytes; u64 pkts; struct u64_stats_sync syncp; }; struct nft_table { struct list_head list; struct rhltable chains_ht; struct list_head chains; struct list_head sets; struct list_head objects; struct list_head flowtables; u64 hgenerator; u64 handle; u32 use; u16 family: 6; u16 flags: 8; u16 genmask: 2; u32 nlpid; char *name; u16 udlen; u8 *udata; u8 validate_state; }; struct nft_timeout { u64 timeout; u64 expiration; }; struct nft_traceinfo { bool trace; bool nf_trace; bool packet_dumped; enum nft_trace_types type: 8; u32 skbid; const struct nft_base_chain *basechain; }; struct nft_trans { struct list_head list; struct net *net; struct nft_table *table; int msg_type; u32 seq; u16 flags; u8 report: 1; u8 put_net: 1; }; struct nft_trans_binding { struct nft_trans nft_trans; struct list_head binding_list; }; struct nft_trans_chain { struct nft_trans_binding nft_trans_binding; struct nft_chain *chain; char *name; struct nft_stats *stats; u8 policy; bool update; bool bound; u32 chain_id; struct nft_base_chain *basechain; struct list_head hook_list; }; struct nft_trans_elem { struct nft_trans nft_trans; struct nft_set *set; struct nft_elem_priv *elem_priv; u64 timeout; u64 expiration; u8 update_flags; bool bound; }; struct nft_trans_flowtable { struct nft_trans nft_trans; struct nft_flowtable *flowtable; struct list_head hook_list; u32 flags; bool update; }; struct nft_trans_gc { struct list_head list; struct net *net; struct nft_set *set; u32 seq; u16 count; struct nft_elem_priv *priv[256]; struct callback_head rcu; }; struct nft_trans_obj { struct nft_trans nft_trans; struct nft_object *obj; struct nft_object *newobj; bool update; }; struct nft_trans_rule { struct nft_trans nft_trans; struct nft_rule *rule; struct nft_chain *chain; struct nft_flow_rule *flow; u32 rule_id; bool bound; }; struct nft_trans_set { struct nft_trans_binding nft_trans_binding; struct list_head list_trans_newset; struct nft_set *set; u32 set_id; u32 gc_int; u64 timeout; bool update; bool bound; u32 size; }; struct nft_trans_table { struct nft_trans nft_trans; bool update; }; struct nft_userdata { u8 len; unsigned char data[0]; }; struct nftables_pernet { struct list_head tables; struct list_head commit_list; struct list_head commit_set_list; struct list_head binding_list; struct list_head module_list; struct list_head notify_list; struct mutex commit_mutex; u64 table_handle; u64 tstamp; unsigned int base_seq; unsigned int gc_seq; u8 validate_state; }; struct nftnl_skb_parms { bool report; }; struct nfulnl_instance { struct hlist_node hlist; spinlock_t lock; refcount_t use; unsigned int qlen; struct sk_buff *skb; struct timer_list timer; struct net *net; netns_tracker ns_tracker; struct user_namespace *peer_user_ns; u32 peer_portid; unsigned int flushtimeout; unsigned int nlbufsiz; unsigned int qthreshold; u_int32_t copy_range; u_int32_t seq; u_int16_t group_num; u_int16_t flags; u_int8_t copy_mode; struct callback_head rcu; }; struct nfulnl_msg_config_cmd { __u8 command; }; struct nfulnl_msg_config_mode { __be32 copy_range; __u8 copy_mode; __u8 _pad; } __attribute__((packed)); struct nfulnl_msg_packet_hdr { __be16 hw_protocol; __u8 hook; __u8 _pad; }; struct nfulnl_msg_packet_hw { __be16 hw_addrlen; __u16 _pad; __u8 hw_addr[8]; }; struct nfulnl_msg_packet_timestamp { __be64 sec; __be64 usec; }; struct nh_config { u32 nh_id; u8 nh_family; u8 nh_protocol; u8 nh_blackhole; u8 nh_fdb; u32 nh_flags; int nh_ifindex; struct net_device *dev; union { __be32 ipv4; struct in6_addr ipv6; } gw; struct nlattr *nh_grp; u16 nh_grp_type; u16 nh_grp_res_num_buckets; long unsigned int nh_grp_res_idle_timer; long unsigned int nh_grp_res_unbalanced_timer; bool nh_grp_res_has_num_buckets; bool nh_grp_res_has_idle_timer; bool nh_grp_res_has_unbalanced_timer; bool nh_hw_stats; struct nlattr *nh_encap; u16 nh_encap_type; u32 nlflags; struct nl_info nlinfo; }; struct nh_dump_filter { u32 nh_id; int dev_idx; int master_idx; bool group_filter; bool fdb_filter; u32 res_bucket_nh_id; u32 op_flags; }; struct nh_grp_entry_stats; struct nh_grp_entry { struct nexthop *nh; struct nh_grp_entry_stats *stats; u16 weight; union { struct { atomic_t upper_bound; } hthr; struct { struct list_head uw_nh_entry; u16 count_buckets; u16 wants_buckets; } res; }; struct list_head nh_list; struct nexthop *nh_parent; u64 packets_hw; }; struct nh_res_table; struct nh_group { struct nh_group *spare; u16 num_nh; bool is_multipath; bool hash_threshold; bool resilient; bool fdb_nh; bool has_v4; bool hw_stats; struct nh_res_table *res_table; struct nh_grp_entry nh_entries[0]; }; struct nh_grp_entry_stats { u64_stats_t packets; struct u64_stats_sync syncp; }; struct nh_info { struct hlist_node dev_hash; struct nexthop *nh_parent; u8 family; bool reject_nh; bool fdb_nh; union { struct fib_nh_common fib_nhc; struct fib_nh fib_nh; struct fib6_nh fib6_nh; }; }; struct nh_notifier_single_info { struct net_device *dev; u8 gw_family; union { __be32 ipv4; struct in6_addr ipv6; }; u32 id; u8 is_reject: 1; u8 is_fdb: 1; u8 has_encap: 1; }; struct nh_notifier_grp_entry_info { u16 weight; struct nh_notifier_single_info nh; }; struct nh_notifier_grp_hw_stats_entry_info { u32 id; u64 packets; }; struct nh_notifier_grp_hw_stats_info { u16 num_nh; bool hw_stats_used; struct nh_notifier_grp_hw_stats_entry_info stats[0]; }; struct nh_notifier_grp_info { u16 num_nh; bool is_fdb; bool hw_stats; struct nh_notifier_grp_entry_info nh_entries[0]; }; struct nh_notifier_res_table_info; struct nh_notifier_res_bucket_info; struct nh_notifier_info { struct net *net; struct netlink_ext_ack *extack; u32 id; enum nh_notifier_info_type type; union { struct nh_notifier_single_info *nh; struct nh_notifier_grp_info *nh_grp; struct nh_notifier_res_table_info *nh_res_table; struct nh_notifier_res_bucket_info *nh_res_bucket; struct nh_notifier_grp_hw_stats_info *nh_grp_hw_stats; }; }; struct nh_notifier_res_bucket_info { u16 bucket_index; unsigned int idle_timer_ms; bool force; struct nh_notifier_single_info old_nh; struct nh_notifier_single_info new_nh; }; struct nh_notifier_res_table_info { u16 num_nh_buckets; bool hw_stats; struct nh_notifier_single_info nhs[0]; }; struct nh_res_bucket { struct nh_grp_entry *nh_entry; atomic_long_t used_time; long unsigned int migrated_time; bool occupied; u8 nh_flags; }; struct nh_res_table { struct net *net; u32 nhg_id; struct delayed_work upkeep_dw; struct list_head uw_nh_entries; long unsigned int unbalanced_since; u32 idle_timer; u32 unbalanced_timer; u16 num_nh_buckets; struct nh_res_bucket nh_buckets[0]; }; struct nhmsg { unsigned char nh_family; unsigned char nh_scope; unsigned char nh_protocol; unsigned char resvd; unsigned int nh_flags; }; struct nl_pktinfo { __u32 group; }; struct rhashtable_walker { struct list_head list; struct bucket_table *tbl; }; struct rhashtable_iter { struct rhashtable *ht; struct rhash_head *p; struct rhlist_head *list; struct rhashtable_walker walker; unsigned int slot; unsigned int skip; bool end_of_table; }; struct nl_seq_iter { struct seq_net_private p; struct rhashtable_iter hti; int link; }; struct nla_bitfield32 { __u32 value; __u32 selector; }; struct nla_policy { u8 type; u8 validation_type; u16 len; union { u16 strict_start_type; const u32 bitfield32_valid; const u32 mask; const char *reject_message; const struct nla_policy *nested_policy; const struct netlink_range_validation *range; const struct netlink_range_validation_signed *range_signed; struct { s16 min; s16 max; }; int (*validate)(const struct nlattr *, struct netlink_ext_ack *); }; }; struct nlattr { __u16 nla_len; __u16 nla_type; }; struct nlmsg_perm { u16 nlmsg_type; u32 perm; }; struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; }; struct nlmsgerr { int error; struct nlmsghdr msg; }; struct nls_table { const char *charset; const char *alias; int (*uni2char)(wchar_t, unsigned char *, int); int (*char2uni)(const unsigned char *, int, wchar_t *); const unsigned char *charset2lower; const unsigned char *charset2upper; struct module *owner; struct nls_table *next; }; struct nmi_desc { raw_spinlock_t lock; struct list_head head; }; struct nmi_stats { unsigned int normal; unsigned int unknown; unsigned int external; unsigned int swallow; long unsigned int recv_jiffies; long unsigned int idt_seq; long unsigned int idt_nmi_seq; long unsigned int idt_ignored; atomic_long_t idt_calls; long unsigned int idt_seq_snap; long unsigned int idt_nmi_seq_snap; long unsigned int idt_ignored_snap; long int idt_calls_snap; }; typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); struct nmiaction { struct list_head list; nmi_handler_t handler; u64 max_duration; long unsigned int flags; const char *name; }; struct node { struct device dev; struct list_head access_list; }; struct node_access_nodes { struct device dev; struct list_head list_node; unsigned int access; }; struct node_attr { struct device_attribute attr; enum node_states state; }; struct node_groups { unsigned int id; union { unsigned int ngroups; unsigned int ncpus; }; }; struct node_hstate { struct kobject *hugepages_kobj; struct kobject *hstate_kobjs[2]; }; struct node_memory_type_map { struct memory_dev_type *memtype; int map_count; }; struct nodemask_scratch { nodemask_t mask1; nodemask_t mask2; }; struct notification { atomic_t requests; u32 flags; u64 next_id; struct list_head notifications; }; struct ns_get_path_bpf_map_args { struct bpf_offloaded_map *offmap; struct bpf_map_info *info; }; struct ns_get_path_bpf_prog_args { struct bpf_prog *prog; struct bpf_prog_info *info; }; struct ns_get_path_task_args { const struct proc_ns_operations *ns_ops; struct task_struct *task; }; struct nsim_map_entry { void *key; void *value; }; struct nsim_bpf_bound_map { struct netdevsim *ns; struct bpf_offloaded_map *map; struct mutex mutex; struct nsim_map_entry entry[2]; struct list_head l; }; struct nsim_bpf_bound_prog { struct nsim_dev *nsim_dev; struct bpf_prog *prog; struct dentry *ddir; const char *state; bool is_loaded; struct list_head l; }; struct nsim_bus_dev { struct device dev; struct list_head list; unsigned int port_count; unsigned int num_queues; struct net *initial_net; unsigned int max_vfs; unsigned int num_vfs; bool init; }; struct nsim_dev_health { struct devlink_health_reporter *empty_reporter; struct devlink_health_reporter *dummy_reporter; struct dentry *ddir; char *recovered_break_msg; u32 binary_len; bool fail_recover; }; struct nsim_dev_hwstats { struct dentry *ddir; struct dentry *l3_ddir; struct mutex hwsdev_list_lock; struct list_head l3_list; struct notifier_block netdevice_nb; struct delayed_work traffic_dw; }; struct udp_tunnel_nic_shared { struct udp_tunnel_nic *udp_tunnel_nic_info; struct list_head devices; }; struct nsim_dev_psample; struct nsim_fib_data; struct nsim_trap_data; struct nsim_vf_config; struct nsim_dev { struct nsim_bus_dev *nsim_bus_dev; struct nsim_fib_data *fib_data; struct nsim_trap_data *trap_data; struct dentry *ddir; struct dentry *ports_ddir; struct dentry *take_snapshot; struct dentry *nodes_ddir; struct nsim_vf_config *vfconfigs; struct bpf_offload_dev *bpf_dev; bool bpf_bind_accept; bool bpf_bind_verifier_accept; u32 bpf_bind_verifier_delay; struct dentry *ddir_bpf_bound_progs; u32 prog_id_gen; struct list_head bpf_bound_progs; struct list_head bpf_bound_maps; struct netdev_phys_item_id switch_id; struct list_head port_list; bool fw_update_status; u32 fw_update_overwrite_mask; u32 max_macs; bool test1; bool dont_allow_reload; bool fail_reload; struct devlink_region *dummy_region; struct nsim_dev_health health; struct nsim_dev_hwstats hwstats; struct flow_action_cookie *fa_cookie; spinlock_t fa_cookie_lock; bool fail_trap_group_set; bool fail_trap_policer_set; bool fail_trap_policer_counter_get; bool fail_trap_drop_counter_get; struct { struct udp_tunnel_nic_shared utn_shared; u32 __ports[8]; bool sync_all; bool open_only; bool ipv4_only; bool shared; bool static_iana_vxlan; u32 sleep; } udp_ports; struct nsim_dev_psample *psample; u16 esw_mode; }; struct nsim_dev_dummy_reporter_ctx { char *break_msg; }; struct nsim_dev_hwstats_fops { const struct file_operations fops; enum nsim_dev_hwstats_do action; enum netdev_offload_xstats_type type; }; struct nsim_dev_hwstats_netdev { struct list_head list; struct net_device *netdev; struct rtnl_hw_stats64 stats; bool enabled; bool fail_enable; }; struct nsim_dev_port { struct list_head list; struct devlink_port devlink_port; unsigned int port_index; enum nsim_dev_port_type port_type; struct dentry *ddir; struct dentry *rate_parent; char *parent_name; struct netdevsim *ns; }; struct nsim_fib_rt_key { unsigned char addr[16]; unsigned char prefix_len; int family; u32 tb_id; }; struct nsim_fib_rt { struct nsim_fib_rt_key key; struct rhash_head ht_node; struct list_head list; }; struct nsim_fib4_rt { struct nsim_fib_rt common; struct fib_info *fi; dscp_t dscp; u8 type; }; struct nsim_fib6_event { struct fib6_info **rt_arr; unsigned int nrt6; }; struct nsim_fib6_rt { struct nsim_fib_rt common; struct list_head nh_list; unsigned int nhs; }; struct nsim_fib6_rt_nh { struct list_head list; struct fib6_info *rt; }; struct nsim_fib_entry { u64 max; atomic64_t num; }; struct nsim_per_fib_data { struct nsim_fib_entry fib; struct nsim_fib_entry rules; }; struct nsim_fib_data { struct notifier_block fib_nb; struct nsim_per_fib_data ipv4; struct nsim_per_fib_data ipv6; struct nsim_fib_entry nexthops; struct rhashtable fib_rt_ht; struct list_head fib_rt_list; struct mutex fib_lock; struct notifier_block nexthop_nb; struct rhashtable nexthop_ht; struct devlink *devlink; struct work_struct fib_event_work; struct work_struct fib_flush_work; struct list_head fib_event_queue; spinlock_t fib_event_queue_lock; struct mutex nh_lock; struct dentry *ddir; bool fail_route_offload; bool fail_res_nexthop_group_replace; bool fail_nexthop_bucket_replace; bool fail_route_delete; }; struct nsim_fib_event { struct list_head list; union { struct fib_entry_notifier_info fen_info; struct nsim_fib6_event fib6_event; }; struct nsim_fib_data *data; long unsigned int event; int family; }; struct nsim_nexthop { struct rhash_head ht_node; u64 occ; u32 id; bool is_resilient; }; struct nsim_rate_node { struct dentry *ddir; struct dentry *rate_parent; char *parent_name; u16 tx_share; u16 tx_max; }; struct nsim_rq { struct napi_struct napi; struct sk_buff_head skb_queue; struct page_pool *page_pool; }; struct nsim_trap_item; struct nsim_trap_data { struct delayed_work trap_report_dw; struct nsim_trap_item *trap_items_arr; u64 *trap_policers_cnt_arr; u64 trap_pkt_cnt; struct nsim_dev *nsim_dev; spinlock_t trap_lock; }; struct nsim_trap_item { void *trap_ctx; enum devlink_trap_action action; }; struct nsim_vf_config { int link_state; u16 min_tx_rate; u16 max_tx_rate; u16 vlan; __be16 vlan_proto; u16 qos; u8 vf_mac[6]; bool spoofchk_enabled; bool trusted; bool rss_query_enabled; }; struct uts_namespace; struct time_namespace; struct nsproxy { refcount_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct time_namespace *time_ns; struct time_namespace *time_ns_for_children; struct cgroup_namespace *cgroup_ns; }; struct nsset { unsigned int flags; struct nsproxy *nsproxy; struct fs_struct *fs; const struct cred *cred; }; struct nt_partition_info { u32 xlink_enabled; u32 target_part_low; u32 target_part_high; u32 reserved; }; struct ntb_ctrl_regs { u32 partition_status; u32 partition_op; u32 partition_ctrl; u32 bar_setup; u32 bar_error; u16 lut_table_entries; u16 lut_table_offset; u32 lut_error; u16 req_id_table_size; u16 req_id_table_offset; u32 req_id_error; u32 reserved1[7]; struct { u32 ctl; u32 win_size; u64 xlate_addr; } bar_entry[6]; struct { u32 win_size; u32 reserved[3]; } bar_ext_entry[6]; u32 reserved2[192]; u32 req_id_table[512]; u32 reserved3[256]; u64 lut_entry[512]; }; struct ntb_info_regs { u8 partition_count; u8 partition_id; u16 reserved1; u64 ep_map; u16 requester_id; u16 reserved2; u32 reserved3[4]; struct nt_partition_info ntp_info[48]; } __attribute__((packed)); struct numa_group { refcount_t refcount; spinlock_t lock; int nr_tasks; pid_t gid; int active_nodes; struct callback_head rcu; long unsigned int total_faults; long unsigned int max_faults_cpu; long unsigned int faults[0]; }; struct numa_maps { long unsigned int pages; long unsigned int anon; long unsigned int active; long unsigned int writeback; long unsigned int mapcount_max; long unsigned int dirty; long unsigned int swapcache; long unsigned int node[64]; }; struct proc_maps_private { struct inode *inode; struct task_struct *task; struct mm_struct *mm; struct vma_iterator iter; struct mempolicy *task_mempolicy; }; struct numa_maps_private { struct proc_maps_private proc_maps; struct numa_maps md; }; struct numa_memblk { u64 start; u64 end; int nid; }; struct numa_meminfo { int nr_blks; struct numa_memblk blk[128]; }; struct numa_stats { long unsigned int load; long unsigned int runnable; long unsigned int util; long unsigned int compute_capacity; unsigned int nr_running; unsigned int weight; enum numa_type node_type; int idle_cpu; }; struct nvmem_cell_entry; struct nvmem_cell { struct nvmem_cell_entry *entry; const char *id; int index; }; typedef int (*nvmem_cell_post_process_t)(void *, const char *, int, unsigned int, void *, size_t); struct nvmem_device; struct nvmem_cell_entry { const char *name; int offset; size_t raw_len; int bytes; int bit_offset; int nbits; nvmem_cell_post_process_t read_post_process; void *priv; struct device_node *np; struct nvmem_device *nvmem; struct list_head node; }; struct nvmem_cell_info { const char *name; unsigned int offset; size_t raw_len; unsigned int bytes; unsigned int bit_offset; unsigned int nbits; struct device_node *np; nvmem_cell_post_process_t read_post_process; void *priv; }; struct nvmem_cell_lookup { const char *nvmem_name; const char *cell_name; const char *dev_id; const char *con_id; struct list_head node; }; struct nvmem_cell_table { const char *nvmem_name; const struct nvmem_cell_info *cells; size_t ncells; struct list_head node; }; typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); struct nvmem_keepout; struct nvmem_layout; struct nvmem_config { struct device *dev; const char *name; int id; struct module *owner; const struct nvmem_cell_info *cells; int ncells; bool add_legacy_fixed_of_cells; void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); const struct nvmem_keepout *keepout; unsigned int nkeepout; enum nvmem_type type; bool read_only; bool root_only; bool ignore_wp; struct nvmem_layout *layout; struct device_node *of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; int size; int word_size; int stride; void *priv; bool compat; struct device *base_dev; }; struct nvmem_device { struct module *owner; struct device dev; struct list_head node; int stride; int word_size; int id; struct kref refcnt; size_t size; bool read_only; bool root_only; int flags; enum nvmem_type type; struct bin_attribute eeprom; struct device *base_dev; struct list_head cells; void (*fixup_dt_cell_info)(struct nvmem_device *, struct nvmem_cell_info *); const struct nvmem_keepout *keepout; unsigned int nkeepout; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; struct gpio_desc *wp_gpio; struct nvmem_layout *layout; void *priv; bool sysfs_cells_populated; }; struct nvmem_keepout { unsigned int start; unsigned int end; unsigned char value; }; struct nvmem_layout { struct device dev; struct nvmem_device *nvmem; int (*add_cells)(struct nvmem_layout *); }; struct nvs_page { long unsigned int phys_start; unsigned int size; void *kaddr; void *data; bool unmap; struct list_head node; }; struct nvs_region { __u64 phys_start; __u64 size; struct list_head node; }; struct obj_cgroup { struct percpu_ref refcnt; struct mem_cgroup *memcg; atomic_t nr_charged_bytes; union { struct list_head list; struct callback_head rcu; }; }; struct objpool_head; typedef int (*objpool_fini_cb)(struct objpool_head *, void *); struct objpool_slot; struct objpool_head { int obj_size; int nr_objs; int nr_possible_cpus; int capacity; gfp_t gfp; refcount_t ref; long unsigned int flags; struct objpool_slot **cpu_slots; objpool_fini_cb release; void *context; }; struct objpool_slot { uint32_t head; uint32_t tail; uint32_t last; uint32_t mask; void *entries[0]; }; struct obs_kernel_param { const char *str; int (*setup_func)(char *); int early; }; struct ocontext { union { char *name; struct { u8 protocol; u16 low_port; u16 high_port; } port; struct { u32 addr; u32 mask; } node; struct { u32 addr[4]; u32 mask[4]; } node6; struct { u64 subnet_prefix; u16 low_pkey; u16 high_pkey; } ibpkey; struct { char *dev_name; u8 port; } ibendport; } u; union { u32 sclass; u32 behavior; } v; struct context___2 context[2]; u32 sid[2]; struct ocontext *next; }; struct od_dbs_tuners { unsigned int powersave_bias; }; struct od_ops { unsigned int (*powersave_bias_target)(struct cpufreq_policy *, unsigned int, unsigned int); }; struct od_policy_dbs_info { struct policy_dbs_info policy_dbs; unsigned int freq_lo; unsigned int freq_lo_delay_us; unsigned int freq_hi_delay_us; unsigned int sample_type: 1; }; struct of_dev_auxdata { char *compatible; resource_size_t phys_addr; char *name; void *platform_data; }; struct of_device_id { char name[32]; char type[32]; char compatible[128]; const void *data; }; struct of_pci_range { union { u64 pci_addr; u64 bus_addr; }; u64 cpu_addr; u64 size; u32 flags; }; struct of_bus; struct of_pci_range_parser { struct device_node *node; struct of_bus *bus; const __be32 *range; const __be32 *end; int na; int ns; int pna; bool dma; }; struct of_phandle_args { struct device_node *np; int args_count; uint32_t args[16]; }; struct offset_ctx { struct maple_tree mt; long unsigned int next_offset; }; struct old_timespec32 { old_time32_t tv_sec; s32 tv_nsec; }; struct old_itimerspec32 { struct old_timespec32 it_interval; struct old_timespec32 it_value; }; struct old_linux_dirent { long unsigned int d_ino; long unsigned int d_offset; short unsigned int d_namlen; char d_name[0]; }; struct old_serial_port { unsigned int uart; unsigned int baud_base; unsigned int port; unsigned int irq; upf_t flags; unsigned char io_type; unsigned char *iomem_base; short unsigned int iomem_reg_shift; }; struct old_timeval32 { old_time32_t tv_sec; s32 tv_usec; }; struct old_timex32 { u32 modes; s32 offset; s32 freq; s32 maxerror; s32 esterror; s32 status; s32 constant; s32 precision; s32 tolerance; struct old_timeval32 time; s32 tick; s32 ppsfreq; s32 jitter; s32 shift; s32 stabil; s32 jitcnt; s32 calcnt; s32 errcnt; s32 stbcnt; s32 tai; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct old_utimbuf32 { old_time32_t actime; old_time32_t modtime; }; struct old_utsname { char sysname[65]; char nodename[65]; char release[65]; char version[65]; char machine[65]; }; struct oldold_utsname { char sysname[9]; char nodename[9]; char release[9]; char version[9]; char machine[9]; }; struct static_key_true; struct once_work { struct work_struct work; struct static_key_true *key; struct module *module; }; struct online_data { unsigned int cpu; bool online; }; struct oom_control { struct zonelist *zonelist; nodemask_t *nodemask; struct mem_cgroup *memcg; const gfp_t gfp_mask; const int order; long unsigned int totalpages; struct task_struct *chosen; long int chosen_points; enum oom_constraint constraint; }; struct open_flags { int open_flag; umode_t mode; int acc_mode; int intent; int lookup_flags; }; struct optimistic_spin_node { struct optimistic_spin_node *next; struct optimistic_spin_node *prev; int locked; int cpu; }; struct optimized_kprobe { struct kprobe kp; struct list_head list; struct arch_optimized_insn optinsn; }; struct orc_entry { s16 sp_offset; s16 bp_offset; unsigned int sp_reg: 4; unsigned int bp_reg: 4; unsigned int type: 3; unsigned int signal: 1; } __attribute__((packed)); struct orlov_stats { __u64 free_clusters; __u32 free_inodes; __u32 used_dirs; }; struct osnoise_entry { struct trace_entry ent; u64 noise; u64 runtime; u64 max_sample; unsigned int hw_count; unsigned int nmi_count; unsigned int irq_count; unsigned int softirq_count; unsigned int thread_count; }; struct x86_cpu_id { __u16 vendor; __u16 family; __u16 model; __u16 steppings; __u16 feature; __u16 flags; kernel_ulong_t driver_data; }; struct override_status_id { struct acpi_device_id hid[2]; struct x86_cpu_id cpu_ids[2]; struct dmi_system_id dmi_ids[2]; const char *uid; const char *path; long long unsigned int status; }; struct p4_event_alias { u64 original; u64 alternative; }; struct p4_event_bind { unsigned int opcode; unsigned int escr_msr[2]; unsigned int escr_emask; unsigned int shared; signed char cntr[6]; }; struct p4_pebs_bind { unsigned int metric_pebs; unsigned int metric_vert; }; struct pacct_struct { int ac_flag; long int ac_exitcode; long unsigned int ac_mem; u64 ac_utime; u64 ac_stime; long unsigned int ac_minflt; long unsigned int ac_majflt; }; struct packet_type { __be16 type; bool ignore_outgoing; struct net_device *dev; netdevice_tracker dev_tracker; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); struct net *af_packet_net; void *af_packet_priv; struct list_head list; }; struct packet_fanout { possible_net_t net; unsigned int num_members; u32 max_num_members; u16 id; u8 type; u8 flags; union { atomic_t rr_cur; struct bpf_prog *bpf_prog; }; struct list_head list; spinlock_t lock; refcount_t sk_ref; long: 64; struct packet_type prot_hook; struct sock *arr[0]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct packet_mclist { struct packet_mclist *next; int ifindex; int count; short unsigned int type; short unsigned int alen; unsigned char addr[32]; }; struct packet_mreq_max { int mr_ifindex; short unsigned int mr_type; short unsigned int mr_alen; unsigned char mr_address[32]; }; struct pgv; struct tpacket_kbdq_core { struct pgv *pkbdq; unsigned int feature_req_word; unsigned int hdrlen; unsigned char reset_pending_on_curr_blk; unsigned char delete_blk_timer; short unsigned int kactive_blk_num; short unsigned int blk_sizeof_priv; short unsigned int last_kactive_blk_num; char *pkblk_start; char *pkblk_end; int kblk_size; unsigned int max_frame_len; unsigned int knum_blocks; uint64_t knxt_seq_num; char *prev; char *nxt_offset; struct sk_buff *skb; rwlock_t blk_fill_in_prog_lock; short unsigned int retire_blk_tov; short unsigned int version; long unsigned int tov_in_jiffies; struct timer_list retire_blk_timer; }; struct packet_ring_buffer { struct pgv *pg_vec; unsigned int head; unsigned int frames_per_block; unsigned int frame_size; unsigned int frame_max; unsigned int pg_vec_order; unsigned int pg_vec_pages; unsigned int pg_vec_len; unsigned int *pending_refcnt; union { long unsigned int *rx_owner_map; struct tpacket_kbdq_core prb_bdqc; }; }; struct packet_rollover { int sock; atomic_long_t num; atomic_long_t num_huge; atomic_long_t num_failed; long: 64; long: 64; long: 64; long: 64; u32 history[16]; }; struct sockaddr_pkt { short unsigned int spkt_family; unsigned char spkt_device[14]; __be16 spkt_protocol; }; struct sockaddr_ll { short unsigned int sll_family; __be16 sll_protocol; int sll_ifindex; short unsigned int sll_hatype; unsigned char sll_pkttype; unsigned char sll_halen; unsigned char sll_addr[8]; }; struct packet_skb_cb { union { struct sockaddr_pkt pkt; union { unsigned int origlen; struct sockaddr_ll ll; }; } sa; }; struct tpacket_stats { unsigned int tp_packets; unsigned int tp_drops; }; struct tpacket_stats_v3 { unsigned int tp_packets; unsigned int tp_drops; unsigned int tp_freeze_q_cnt; }; union tpacket_stats_u { struct tpacket_stats stats1; struct tpacket_stats_v3 stats3; }; struct packet_sock { struct sock sk; struct packet_fanout *fanout; union tpacket_stats_u stats; struct packet_ring_buffer rx_ring; struct packet_ring_buffer tx_ring; int copy_thresh; spinlock_t bind_lock; struct mutex pg_vec_lock; long unsigned int flags; int ifindex; u8 vnet_hdr_sz; __be16 num; struct packet_rollover *rollover; struct packet_mclist *mclist; atomic_long_t mapped; enum tpacket_versions tp_version; unsigned int tp_hdrlen; unsigned int tp_reserve; unsigned int tp_tstamp; struct completion skb_completion; struct net_device *cached_dev; long: 64; long: 64; long: 64; long: 64; long: 64; struct packet_type prot_hook; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; atomic_t tp_drops; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct padata_cpumask { cpumask_var_t pcpu; cpumask_var_t cbcpu; }; struct padata_instance { struct hlist_node cpu_online_node; struct hlist_node cpu_dead_node; struct workqueue_struct *parallel_wq; struct workqueue_struct *serial_wq; struct list_head pslist; struct padata_cpumask cpumask; struct kobject kobj; struct mutex lock; u8 flags; }; struct padata_list { struct list_head list; spinlock_t lock; }; struct padata_mt_job { void (*thread_fn)(long unsigned int, long unsigned int, void *); void *fn_arg; long unsigned int start; long unsigned int size; long unsigned int align; long unsigned int min_chunk; int max_threads; bool numa_aware; }; struct padata_mt_job_state { spinlock_t lock; struct completion completion; struct padata_mt_job *job; int nworks; int nworks_fini; long unsigned int chunk_size; }; struct parallel_data; struct padata_priv { struct list_head list; struct parallel_data *pd; int cb_cpu; unsigned int seq_nr; int info; void (*parallel)(struct padata_priv *); void (*serial)(struct padata_priv *); }; struct padata_serial_queue { struct padata_list serial; struct work_struct work; struct parallel_data *pd; }; struct padata_shell { struct padata_instance *pinst; struct parallel_data *pd; struct parallel_data *opd; struct list_head list; }; struct padata_sysfs_entry { struct attribute attr; ssize_t (*show)(struct padata_instance *, struct attribute *, char *); ssize_t (*store)(struct padata_instance *, struct attribute *, const char *, size_t); }; struct padata_work { struct work_struct pw_work; struct list_head pw_list; void *pw_data; }; typedef struct page *pgtable_t; struct printf_spec; struct page_flags_fields { int width; int shift; int mask; const struct printf_spec *spec; const char *name; }; struct page_pool_params_fast { unsigned int order; unsigned int pool_size; int nid; struct device *dev; struct napi_struct *napi; enum dma_data_direction dma_dir; unsigned int max_len; unsigned int offset; }; struct pp_alloc_cache { u32 count; netmem_ref cache[128]; }; struct ptr_ring { int producer; spinlock_t producer_lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; int consumer_head; int consumer_tail; spinlock_t consumer_lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; int size; int batch; void **queue; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct page_pool_params_slow { struct net_device *netdev; unsigned int queue_idx; unsigned int flags; void (*init_callback)(netmem_ref, void *); void *init_arg; }; struct page_pool { struct page_pool_params_fast p; int cpuid; u32 pages_state_hold_cnt; bool has_init_callback: 1; bool dma_map: 1; bool dma_sync: 1; long: 0; __u8 __cacheline_group_begin__frag[0]; long int frag_users; netmem_ref frag_page; unsigned int frag_offset; long: 0; __u8 __cacheline_group_end__frag[0]; long: 64; struct {} __cacheline_group_pad__frag; struct delayed_work release_dw; void (*disconnect)(void *); long unsigned int defer_start; long unsigned int defer_warn; u32 xdp_mem_id; long: 64; long: 64; long: 64; struct pp_alloc_cache alloc; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct ptr_ring ring; void *mp_priv; atomic_t pages_state_release_cnt; refcount_t user_cnt; u64 destroy_cnt; struct page_pool_params_slow slow; struct { struct hlist_node list; u64 detach_time; u32 napi_id; u32 id; } user; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct page_pool_dump_cb { long unsigned int ifindex; u32 pp_id; }; struct page_pool_params { union { struct { unsigned int order; unsigned int pool_size; int nid; struct device *dev; struct napi_struct *napi; enum dma_data_direction dma_dir; unsigned int max_len; unsigned int offset; }; struct page_pool_params_fast fast; }; union { struct { struct net_device *netdev; unsigned int queue_idx; unsigned int flags; void (*init_callback)(netmem_ref, void *); void *init_arg; }; struct page_pool_params_slow slow; }; }; struct page_region { __u64 start; __u64 end; __u64 categories; }; struct page_state { long unsigned int mask; long unsigned int res; enum mf_action_page_type type; int (*action)(struct page_state *, struct page *); }; struct page_vma_mapped_walk { long unsigned int pfn; long unsigned int nr_pages; long unsigned int pgoff; struct vm_area_struct *vma; long unsigned int address; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; unsigned int flags; }; struct pm_scan_arg { __u64 size; __u64 flags; __u64 start; __u64 end; __u64 walk_end; __u64 vec; __u64 vec_len; __u64 max_pages; __u64 category_inverted; __u64 category_mask; __u64 category_anyof_mask; __u64 return_mask; }; struct pagemap_scan_private { struct pm_scan_arg arg; long unsigned int masks_of_interest; long unsigned int cur_vma_category; struct page_region *vec_buf; long unsigned int vec_buf_len; long unsigned int vec_buf_index; long unsigned int found_pages; struct page_region *vec_out; }; struct pagemapread { int pos; int len; pagemap_entry_t *buffer; bool show_pfn; }; struct pagerange_state { long unsigned int cur_pfn; int ram; int not_ram; }; struct pages_devres { long unsigned int addr; unsigned int order; }; struct parallel_data { struct padata_shell *ps; struct padata_list *reorder_list; struct padata_serial_queue *squeue; refcount_t refcnt; unsigned int seq_nr; unsigned int processed; int cpu; struct padata_cpumask cpumask; struct work_struct reorder_work; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t lock; }; struct parsed_desc { u32 mb; u32 valid; }; struct partition_meta_info { char uuid[37]; u8 volname[64]; }; struct parsed_partitions { struct gendisk *disk; char name[32]; struct { sector_t from; sector_t size; int flags; bool has_info; struct partition_meta_info info; } *parts; int next; int limit; bool access_beyond_eod; char *pp_buf; }; struct partial_cluster { ext4_fsblk_t pclu; ext4_lblk_t lblk; enum { initial = 0, tofree = 1, nofree = 2, } state; }; struct partial_context { gfp_t flags; unsigned int orig_size; void *object; }; struct partial_page { unsigned int offset; unsigned int len; long unsigned int private; }; struct pause_reply_data { struct ethnl_reply_data base; struct ethtool_pauseparam pauseparam; struct ethtool_pause_stats pausestat; }; struct pause_req_info { struct ethnl_req_info base; enum ethtool_mac_stats_src src; }; struct pcc_mbox_chan { struct mbox_chan *mchan; u64 shmem_base_addr; u64 shmem_size; u32 latency; u32 max_access_rate; u16 min_turnaround_time; }; struct pcc_chan_reg { void *vaddr; struct acpi_generic_address *gas; u64 preserve_mask; u64 set_mask; u64 status_mask; }; struct pcc_chan_info { struct pcc_mbox_chan chan; struct pcc_chan_reg db; struct pcc_chan_reg plat_irq_ack; struct pcc_chan_reg cmd_complete; struct pcc_chan_reg cmd_update; struct pcc_chan_reg error; int plat_irq; u8 type; unsigned int plat_irq_flags; bool chan_in_use; }; struct pcc_data { struct pcc_mbox_chan *pcc_chan; void *pcc_comm_addr; struct completion done; struct mbox_client cl; struct acpi_pcc_info ctx; }; struct pci2phy_map { struct list_head list; int segment; int pbus_to_dieid[256]; }; struct pci_acs { u16 cap; u16 ctrl; u16 fw_ctrl; }; struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48]; short unsigned int bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned int is_added: 1; unsigned int unsafe_warn: 1; }; struct pci_bus_region { pci_bus_addr_t start; pci_bus_addr_t end; }; struct pci_bus_resource { struct list_head list; struct resource *res; unsigned int flags; }; struct pci_cap_saved_data { u16 cap_nr; bool cap_extended; unsigned int size; u32 data[0]; }; struct pci_cap_saved_state { struct hlist_node next; struct pci_cap_saved_data cap; }; struct pci_check_idx_range { int start; int end; }; struct pci_vpd { struct mutex lock; unsigned int len; u8 cap; }; struct rcec_ea; struct pcie_link_state; struct pci_sriov; struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; short unsigned int vendor; short unsigned int device; short unsigned int subsystem_vendor; short unsigned int subsystem_device; unsigned int class; u8 revision; u8 hdr_type; struct rcec_ea *rcec_ea; struct pci_dev *rcec; u32 devcap; u8 pcie_cap; u8 msi_cap; u8 msix_cap; u8 pcie_mpss: 3; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; long unsigned int *dma_alias_mask; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned int pme_support: 5; unsigned int pme_poll: 1; unsigned int pinned: 1; unsigned int config_rrs_sv: 1; unsigned int imm_ready: 1; unsigned int d1_support: 1; unsigned int d2_support: 1; unsigned int no_d1d2: 1; unsigned int no_d3cold: 1; unsigned int bridge_d3: 1; unsigned int d3cold_allowed: 1; unsigned int mmio_always_on: 1; unsigned int wakeup_prepared: 1; unsigned int skip_bus_pm: 1; unsigned int ignore_hotplug: 1; unsigned int hotplug_user_indicators: 1; unsigned int clear_retrain_link: 1; unsigned int d3hot_delay; unsigned int d3cold_delay; u16 l1ss; struct pcie_link_state *link_state; unsigned int ltr_path: 1; unsigned int pasid_no_tlp: 1; unsigned int eetlp_prefix_path: 1; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct resource resource[17]; struct resource driver_exclusive_resource; bool match_driver; unsigned int transparent: 1; unsigned int io_window: 1; unsigned int pref_window: 1; unsigned int pref_64_window: 1; unsigned int multifunction: 1; unsigned int is_busmaster: 1; unsigned int no_msi: 1; unsigned int no_64bit_msi: 1; unsigned int block_cfg_access: 1; unsigned int broken_parity_status: 1; unsigned int irq_reroute_variant: 2; unsigned int msi_enabled: 1; unsigned int msix_enabled: 1; unsigned int ari_enabled: 1; unsigned int ats_enabled: 1; unsigned int pasid_enabled: 1; unsigned int pri_enabled: 1; unsigned int is_managed: 1; unsigned int is_msi_managed: 1; unsigned int needs_freset: 1; unsigned int state_saved: 1; unsigned int is_physfn: 1; unsigned int is_virtfn: 1; unsigned int is_hotplug_bridge: 1; unsigned int shpc_managed: 1; unsigned int is_thunderbolt: 1; unsigned int untrusted: 1; unsigned int external_facing: 1; unsigned int broken_intx_masking: 1; unsigned int io_window_1k: 1; unsigned int irq_managed: 1; unsigned int non_compliant_bars: 1; unsigned int is_probed: 1; unsigned int link_active_reporting: 1; unsigned int no_vf_scan: 1; unsigned int no_command_memory: 1; unsigned int rom_bar_overlap: 1; unsigned int rom_attr_enabled: 1; pci_dev_flags_t dev_flags; atomic_t enable_cnt; spinlock_t pcie_cap_lock; u32 saved_config_space[16]; struct hlist_head saved_cap_space; struct bin_attribute *res_attr[17]; struct bin_attribute *res_attr_wc[17]; void *msix_base; raw_spinlock_t msi_lock; struct pci_vpd vpd; union { struct pci_sriov *sriov; struct pci_dev *physfn; }; u16 ats_cap; u8 ats_stu; u16 acs_cap; phys_addr_t rom; size_t romlen; const char *driver_override; long unsigned int priv_flags; u8 reset_methods[8]; }; struct pci_dev_acs_enabled { u16 vendor; u16 device; int (*acs_enabled)(struct pci_dev *, u16); }; struct pci_dev_acs_ops { u16 vendor; u16 device; int (*enable_acs)(struct pci_dev *); int (*disable_acs_redir)(struct pci_dev *); }; struct pci_dev_reset_methods { u16 vendor; u16 device; int (*reset)(struct pci_dev *, bool); }; struct pci_dev_resource { struct list_head list; struct resource *res; struct pci_dev *dev; resource_size_t start; resource_size_t end; resource_size_t add_size; resource_size_t min_align; long unsigned int flags; }; struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; __u32 override_only; }; struct pci_domain_busn_res { struct list_head list; struct resource res; int domain_nr; }; struct pci_dynids { spinlock_t lock; struct list_head list; }; struct pci_error_handlers; struct pci_driver { const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); int (*sriov_set_msix_vec_count)(struct pci_dev *, int); u32 (*sriov_get_vf_total_msix)(struct pci_dev *); const struct pci_error_handlers *err_handler; const struct attribute_group **groups; const struct attribute_group **dev_groups; struct device_driver driver; struct pci_dynids dynids; bool driver_managed_dma; }; struct pci_dynid { struct list_head node; struct pci_device_id id; }; struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, pci_channel_state_t); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_prepare)(struct pci_dev *); void (*reset_done)(struct pci_dev *); void (*resume)(struct pci_dev *); void (*cor_error_detected)(struct pci_dev *); }; struct pci_extra_dev { struct pci_dev *dev[4]; }; struct pci_filp_private { enum pci_mmap_state mmap_state; int write_combine; }; struct pci_fixup { u16 vendor; u16 device; u32 class; unsigned int class_shift; int hook_offset; }; struct pci_host_bridge { struct device dev; struct pci_bus *bus; struct pci_ops *ops; struct pci_ops *child_ops; void *sysdata; int busnr; int domain_nr; struct list_head windows; struct list_head dma_ranges; u8 (*swizzle_irq)(struct pci_dev *, u8 *); int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); void *release_data; unsigned int ignore_reset_delay: 1; unsigned int no_ext_tags: 1; unsigned int no_inc_mrrs: 1; unsigned int native_aer: 1; unsigned int native_pcie_hotplug: 1; unsigned int native_shpc_hotplug: 1; unsigned int native_pme: 1; unsigned int native_ltr: 1; unsigned int native_dpc: 1; unsigned int native_cxl_error: 1; unsigned int preserve_config: 1; unsigned int size_windows: 1; unsigned int msi_domain: 1; resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t); long: 64; long: 64; long: 64; long: 64; long unsigned int private[0]; }; struct pci_hostbridge_probe { u32 bus; u32 slot; u32 vendor; u32 device; }; struct pci_mmcfg_hostbridge_probe { u32 bus; u32 devfn; u32 vendor; u32 device; const char * (*probe)(void); }; struct pci_mmcfg_region { struct list_head list; struct resource res; u64 address; char *virt; u16 segment; u8 start_bus; u8 end_bus; char name[30]; }; struct pci_ops { int (*add_bus)(struct pci_bus *); void (*remove_bus)(struct pci_bus *); void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32); }; struct pci_osc_bit_struct { u32 bit; char *desc; }; struct pci_p2pdma_map_state { struct dev_pagemap *pgmap; int map; u64 bus_off; }; struct pci_pme_device { struct list_head list; struct pci_dev *dev; }; struct pci_raw_ops { int (*read)(unsigned int, unsigned int, unsigned int, int, int, u32 *); int (*write)(unsigned int, unsigned int, unsigned int, int, int, u32); }; struct pci_reset_fn_method { int (*reset_fn)(struct pci_dev *, bool); char *name; }; struct pci_root_info { struct list_head list; char name[12]; struct list_head resources; struct resource busn; int node; int link; }; struct pci_sysdata { int domain; int node; struct acpi_device *companion; void *iommu; void *fwnode; }; struct pci_root_info___2 { struct acpi_pci_root_info common; struct pci_sysdata sd; bool mcfg_added; u8 start_bus; u8 end_bus; }; struct pci_root_res { struct list_head list; struct resource res; }; struct pci_saved_state { u32 config_space[16]; struct pci_cap_saved_data cap[0]; }; struct serial_private; struct pciserial_board; struct pci_serial_quirk { u32 vendor; u32 device; u32 subvendor; u32 subdevice; int (*probe)(struct pci_dev *); int (*init)(struct pci_dev *); int (*setup)(struct serial_private *, const struct pciserial_board *, struct uart_8250_port *, int); void (*exit)(struct pci_dev *); }; struct setup_data { __u64 next; __u32 type; __u32 len; __u8 data[0]; }; struct pci_setup_rom { struct setup_data data; uint16_t vendor; uint16_t devid; uint64_t pcilen; long unsigned int segment; long unsigned int bus; long unsigned int device; long unsigned int function; uint8_t romdata[0]; }; struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; }; struct pci_slot_attribute { struct attribute attr; ssize_t (*show)(struct pci_slot *, char *); ssize_t (*store)(struct pci_slot *, const char *, size_t); }; struct pci_sriov { int pos; int nres; u32 cap; u16 ctrl; u16 total_VFs; u16 initial_VFs; u16 num_VFs; u16 offset; u16 stride; u16 vf_device; u32 pgsz; u8 link; u8 max_VF_buses; u16 driver_max_VFs; struct pci_dev *dev; struct pci_dev *self; u32 class; u8 hdr_type; u16 subsystem_vendor; u16 subsystem_device; resource_size_t barsz[6]; bool drivers_autoprobe; }; struct pcibios_fwaddrmap { struct list_head list; struct pci_dev *dev; resource_size_t fw_addr[17]; }; struct pcie_device { int irq; struct pci_dev *port; u32 service; void *priv_data; struct device device; }; struct pcie_link_state { struct pci_dev *pdev; struct pci_dev *downstream; struct pcie_link_state *root; struct pcie_link_state *parent; struct list_head sibling; u32 aspm_support: 7; u32 aspm_enabled: 7; u32 aspm_capable: 7; u32 aspm_default: 7; int: 4; u32 aspm_disable: 7; u32 clkpm_capable: 1; u32 clkpm_enabled: 1; u32 clkpm_default: 1; u32 clkpm_disable: 1; }; struct pcie_pme_service_data { spinlock_t lock; struct pcie_device *srv; struct work_struct work; bool noirq; }; struct pcie_port_service_driver { const char *name; int (*probe)(struct pcie_device *); void (*remove)(struct pcie_device *); int (*suspend)(struct pcie_device *); int (*resume_noirq)(struct pcie_device *); int (*resume)(struct pcie_device *); int (*runtime_suspend)(struct pcie_device *); int (*runtime_resume)(struct pcie_device *); int (*slot_reset)(struct pcie_device *); int port_type; u32 service; struct device_driver driver; }; struct pcie_tlp_log { u32 dw[4]; }; struct pcim_addr_devres { enum pcim_addr_devres_type type; void *baseaddr; long unsigned int offset; long unsigned int len; int bar; }; struct pcim_intx_devres { int orig_intx; }; struct pcim_iomap_devres { void *table[6]; }; struct pciserial_board { unsigned int flags; unsigned int num_ports; unsigned int base_baud; unsigned int uart_offset; unsigned int reg_shift; unsigned int first_offset; }; struct pcpu_group_info { int nr_units; long unsigned int base_offset; unsigned int *cpu_map; }; struct pcpu_alloc_info { size_t static_size; size_t reserved_size; size_t dyn_size; size_t unit_size; size_t atom_size; size_t alloc_size; size_t __ai_size; int nr_groups; struct pcpu_group_info groups[0]; }; struct pcpu_block_md { int scan_hint; int scan_hint_start; int contig_hint; int contig_hint_start; int left_free; int right_free; int first_free; int nr_bits; }; struct pcpuobj_ext; struct pcpu_chunk { struct list_head list; int free_bytes; struct pcpu_block_md chunk_md; long unsigned int *bound_map; void *base_addr; long unsigned int *alloc_map; struct pcpu_block_md *md_blocks; void *data; bool immutable; bool isolated; int start_offset; int end_offset; struct pcpuobj_ext *obj_exts; int nr_pages; int nr_populated; int nr_empty_pop_pages; long unsigned int populated[0]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct pcpu_dstats { u64_stats_t rx_packets; u64_stats_t rx_bytes; u64_stats_t rx_drops; u64_stats_t tx_packets; u64_stats_t tx_bytes; u64_stats_t tx_drops; struct u64_stats_sync syncp; long: 64; long: 64; }; struct pcpu_gen_cookie { local_t nesting; u64 last; }; struct pcpu_hot { union { struct { struct task_struct *current_task; int preempt_count; int cpu_number; u64 call_depth; long unsigned int top_of_stack; void *hardirq_stack_ptr; u16 softirq_pending; bool hardirq_stack_inuse; }; u8 pad[64]; }; }; struct pcpu_lstats { u64_stats_t packets; u64_stats_t bytes; struct u64_stats_sync syncp; }; struct pcpu_seg6_local_counters { u64_stats_t packets; u64_stats_t bytes; u64_stats_t errors; struct u64_stats_sync syncp; }; struct pcpu_sw_netstats { u64_stats_t rx_packets; u64_stats_t rx_bytes; u64_stats_t tx_packets; u64_stats_t tx_bytes; struct u64_stats_sync syncp; }; struct pcpuobj_ext { struct obj_cgroup *cgroup; }; struct pde_opener { struct list_head lh; struct file *file; bool closing; struct completion *c; }; struct pdev_archdata {}; struct pebs_basic { u64 format_size; u64 ip; u64 applicable_counters; u64 tsc; }; struct pebs_gprs { u64 flags; u64 ip; u64 ax; u64 cx; u64 dx; u64 bx; u64 sp; u64 bp; u64 si; u64 di; u64 r8; u64 r9; u64 r10; u64 r11; u64 r12; u64 r13; u64 r14; u64 r15; }; struct pebs_meminfo { u64 address; u64 aux; u64 latency; u64 tsx_tuning; }; struct pebs_record_core { u64 flags; u64 ip; u64 ax; u64 bx; u64 cx; u64 dx; u64 si; u64 di; u64 bp; u64 sp; u64 r8; u64 r9; u64 r10; u64 r11; u64 r12; u64 r13; u64 r14; u64 r15; }; struct pebs_record_nhm { u64 flags; u64 ip; u64 ax; u64 bx; u64 cx; u64 dx; u64 si; u64 di; u64 bp; u64 sp; u64 r8; u64 r9; u64 r10; u64 r11; u64 r12; u64 r13; u64 r14; u64 r15; u64 status; u64 dla; u64 dse; u64 lat; }; struct pebs_record_skl { u64 flags; u64 ip; u64 ax; u64 bx; u64 cx; u64 dx; u64 si; u64 di; u64 bp; u64 sp; u64 r8; u64 r9; u64 r10; u64 r11; u64 r12; u64 r13; u64 r14; u64 r15; u64 status; u64 dla; u64 dse; u64 lat; u64 real_ip; u64 tsx_tuning; u64 tsc; }; struct pebs_xmm { u64 xmm[32]; }; struct pending_reservation { struct rb_node rb_node; ext4_lblk_t lclu; }; struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[47]; }; struct per_cpu_pages { spinlock_t lock; int count; int high; int high_min; int high_max; int batch; u8 flags; u8 alloc_factor; u8 expire; short int free_count; struct list_head lists[14]; }; struct per_cpu_zonestat { s8 vm_stat_diff[10]; s8 stat_threshold; long unsigned int vm_numa_event[6]; }; struct percpu_cluster { unsigned int next[10]; }; struct percpu_free_defer { struct callback_head rcu; void *ptr; }; typedef void percpu_ref_func_t(struct percpu_ref *); struct percpu_ref_data { atomic_long_t count; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic: 1; bool allow_reinit: 1; struct callback_head rcu; struct percpu_ref *ref; }; struct perf_addr_filter { struct list_head entry; struct path path; long unsigned int offset; long unsigned int size; enum perf_addr_filter_action_t action; }; struct perf_addr_filter_range { long unsigned int start; long unsigned int size; }; struct perf_addr_filters_head { struct list_head list; raw_spinlock_t lock; unsigned int nr_file_filters; }; struct perf_event_header { __u32 type; __u16 misc; __u16 size; }; struct perf_aux_event { struct perf_event_header header; u64 hw_id; }; struct perf_aux_event___2 { struct perf_event_header header; u32 pid; u32 tid; }; struct perf_aux_event___3 { struct perf_event_header header; u64 offset; u64 size; u64 flags; }; struct perf_bpf_event { struct bpf_prog *prog; struct { struct perf_event_header header; u16 type; u16 flags; u32 id; u8 tag[8]; } event_id; }; struct perf_event_mmap_page; struct perf_buffer { refcount_t refcount; struct callback_head callback_head; int nr_pages; int overwrite; int paused; atomic_t poll; local_t head; unsigned int nest; local_t events; local_t wakeup; local_t lost; long int watermark; long int aux_watermark; spinlock_t event_lock; struct list_head event_list; atomic_t mmap_count; long unsigned int mmap_locked; struct user_struct *mmap_user; struct mutex aux_mutex; long int aux_head; unsigned int aux_nest; long int aux_wakeup; long unsigned int aux_pgoff; int aux_nr_pages; int aux_overwrite; atomic_t aux_mmap_count; long unsigned int aux_mmap_locked; void (*free_aux)(void *); refcount_t aux_refcount; int aux_in_sampling; void **aux_pages; void *aux_priv; struct perf_event_mmap_page *user_page; void *data_pages[0]; }; struct perf_callchain_entry { __u64 nr; __u64 ip[0]; }; struct perf_callchain_entry_ctx { struct perf_callchain_entry *entry; u32 max_stack; u32 nr; short int contexts; bool contexts_maxed; }; union perf_capabilities { struct { u64 lbr_format: 6; u64 pebs_trap: 1; u64 pebs_arch_reg: 1; u64 pebs_format: 4; u64 smm_freeze: 1; u64 full_width_write: 1; u64 pebs_baseline: 1; u64 perf_metrics: 1; u64 pebs_output_pt_available: 1; u64 pebs_timing_info: 1; u64 anythread_deprecated: 1; }; u64 capabilities; }; struct perf_cgroup_info; struct perf_cgroup { struct cgroup_subsys_state css; struct perf_cgroup_info *info; }; struct perf_cgroup_event { char *path; int path_size; struct { struct perf_event_header header; u64 id; char path[0]; } event_id; }; struct perf_cgroup_info { u64 time; u64 timestamp; u64 timeoffset; int active; }; struct perf_comm_event { struct task_struct *task; char *comm; int comm_size; struct { struct perf_event_header header; u32 pid; u32 tid; } event_id; }; struct perf_event_groups { struct rb_root tree; u64 index; }; struct perf_event_context { raw_spinlock_t lock; struct mutex mutex; struct list_head pmu_ctx_list; struct perf_event_groups pinned_groups; struct perf_event_groups flexible_groups; struct list_head event_list; int nr_events; int nr_user; int is_active; int nr_task_data; int nr_stat; int nr_freq; int rotate_disable; refcount_t refcount; struct task_struct *task; u64 time; u64 timestamp; u64 timeoffset; struct perf_event_context *parent_ctx; u64 parent_gen; u64 generation; int pin_count; int nr_cgroups; struct callback_head callback_head; local_t nr_no_switch_fast; }; struct perf_cpu_context { struct perf_event_context ctx; struct perf_event_context *task_ctx; int online; struct perf_cgroup *cgrp; int heap_size; struct perf_event **heap; struct perf_event *heap_default[2]; }; struct perf_event_pmu_context { struct pmu *pmu; struct perf_event_context *ctx; struct list_head pmu_ctx_entry; struct list_head pinned_active; struct list_head flexible_active; unsigned int embedded: 1; unsigned int nr_events; unsigned int nr_cgroups; unsigned int nr_freq; atomic_t refcount; struct callback_head callback_head; void *task_ctx_data; int rotate_necessary; }; struct perf_cpu_pmu_context { struct perf_event_pmu_context epc; struct perf_event_pmu_context *task_epc; struct list_head sched_cb_entry; int sched_cb_usage; int active_oncpu; int exclusive; raw_spinlock_t hrtimer_lock; struct hrtimer hrtimer; ktime_t hrtimer_interval; unsigned int hrtimer_active; }; struct perf_domain { struct em_perf_domain *em_pd; struct perf_domain *next; struct callback_head rcu; }; struct perf_event_attr { __u32 type; __u32 size; __u64 config; union { __u64 sample_period; __u64 sample_freq; }; __u64 sample_type; __u64 read_format; __u64 disabled: 1; __u64 inherit: 1; __u64 pinned: 1; __u64 exclusive: 1; __u64 exclude_user: 1; __u64 exclude_kernel: 1; __u64 exclude_hv: 1; __u64 exclude_idle: 1; __u64 mmap: 1; __u64 comm: 1; __u64 freq: 1; __u64 inherit_stat: 1; __u64 enable_on_exec: 1; __u64 task: 1; __u64 watermark: 1; __u64 precise_ip: 2; __u64 mmap_data: 1; __u64 sample_id_all: 1; __u64 exclude_host: 1; __u64 exclude_guest: 1; __u64 exclude_callchain_kernel: 1; __u64 exclude_callchain_user: 1; __u64 mmap2: 1; __u64 comm_exec: 1; __u64 use_clockid: 1; __u64 context_switch: 1; __u64 write_backward: 1; __u64 namespaces: 1; __u64 ksymbol: 1; __u64 bpf_event: 1; __u64 aux_output: 1; __u64 cgroup: 1; __u64 text_poke: 1; __u64 build_id: 1; __u64 inherit_thread: 1; __u64 remove_on_exec: 1; __u64 sigtrap: 1; __u64 __reserved_1: 26; union { __u32 wakeup_events; __u32 wakeup_watermark; }; __u32 bp_type; union { __u64 bp_addr; __u64 kprobe_func; __u64 uprobe_path; __u64 config1; }; union { __u64 bp_len; __u64 kprobe_addr; __u64 probe_offset; __u64 config2; }; __u64 branch_sample_type; __u64 sample_regs_user; __u32 sample_stack_user; __s32 clockid; __u64 sample_regs_intr; __u32 aux_watermark; __u16 sample_max_stack; __u16 __reserved_2; __u32 aux_sample_size; __u32 __reserved_3; __u64 sig_data; __u64 config3; }; typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); struct perf_event { struct list_head event_entry; struct list_head sibling_list; struct list_head active_list; struct rb_node group_node; u64 group_index; struct list_head migrate_entry; struct hlist_node hlist_entry; struct list_head active_entry; int nr_siblings; int event_caps; int group_caps; unsigned int group_generation; struct perf_event *group_leader; struct pmu *pmu; void *pmu_private; enum perf_event_state state; unsigned int attach_state; local64_t count; atomic64_t child_count; u64 total_time_enabled; u64 total_time_running; u64 tstamp; struct perf_event_attr attr; u16 header_size; u16 id_header_size; u16 read_size; struct hw_perf_event hw; struct perf_event_context *ctx; struct perf_event_pmu_context *pmu_ctx; atomic_long_t refcount; atomic64_t child_total_time_enabled; atomic64_t child_total_time_running; struct mutex child_mutex; struct list_head child_list; struct perf_event *parent; int oncpu; int cpu; struct list_head owner_entry; struct task_struct *owner; struct mutex mmap_mutex; atomic_t mmap_count; struct perf_buffer *rb; struct list_head rb_entry; long unsigned int rcu_batches; int rcu_pending; wait_queue_head_t waitq; struct fasync_struct *fasync; unsigned int pending_wakeup; unsigned int pending_kill; unsigned int pending_disable; long unsigned int pending_addr; struct irq_work pending_irq; struct irq_work pending_disable_irq; struct callback_head pending_task; unsigned int pending_work; struct rcuwait pending_work_wait; atomic_t event_limit; struct perf_addr_filters_head addr_filters; struct perf_addr_filter_range *addr_filter_ranges; long unsigned int addr_filters_gen; struct perf_event *aux_event; void (*destroy)(struct perf_event *); struct callback_head callback_head; struct pid_namespace *ns; u64 id; atomic64_t lost_samples; u64 (*clock)(void); perf_overflow_handler_t overflow_handler; void *overflow_handler_context; struct bpf_prog *prog; u64 bpf_cookie; struct trace_event_call *tp_event; struct event_filter *filter; struct ftrace_ops ftrace_ops; struct perf_cgroup *cgrp; void *security; struct list_head sb_list; __u32 orig_type; }; struct perf_event_min_heap { int nr; int size; struct perf_event **data; struct perf_event *preallocated[0]; }; struct perf_event_mmap_page { __u32 version; __u32 compat_version; __u32 lock; __u32 index; __s64 offset; __u64 time_enabled; __u64 time_running; union { __u64 capabilities; struct { __u64 cap_bit0: 1; __u64 cap_bit0_is_deprecated: 1; __u64 cap_user_rdpmc: 1; __u64 cap_user_time: 1; __u64 cap_user_time_zero: 1; __u64 cap_user_time_short: 1; __u64 cap_____res: 58; }; }; __u16 pmc_width; __u16 time_shift; __u32 time_mult; __u64 time_offset; __u64 time_zero; __u32 size; __u32 __reserved_1; __u64 time_cycles; __u64 time_mask; __u8 __reserved[928]; __u64 data_head; __u64 data_tail; __u64 data_offset; __u64 data_size; __u64 aux_head; __u64 aux_tail; __u64 aux_offset; __u64 aux_size; }; struct perf_event_query_bpf { __u32 ids_len; __u32 prog_cnt; __u32 ids[0]; }; struct perf_event_security_struct { u32 sid; }; struct perf_ibs { struct pmu pmu; unsigned int msr; u64 config_mask; u64 cnt_mask; u64 enable_mask; u64 valid_mask; u64 max_period; long unsigned int offset_mask[1]; int offset_max; unsigned int fetch_count_reset_broken: 1; unsigned int fetch_ignore_if_zero_rip: 1; struct cpu_perf_ibs *pcpu; u64 (*get_count)(u64); }; struct perf_ibs_data { u32 size; union { u32 data[0]; u32 caps; }; u64 regs[8]; }; struct perf_ksymbol_event { const char *name; int name_len; struct { struct perf_event_header header; u64 addr; u32 len; u16 ksym_type; u16 flags; } event_id; }; struct perf_mmap_event { struct vm_area_struct *vma; const char *file_name; int file_size; int maj; int min; u64 ino; u64 ino_generation; u32 prot; u32 flags; u8 build_id[20]; u32 build_id_size; struct { struct perf_event_header header; u32 pid; u32 tid; u64 start; u64 len; u64 pgoff; } event_id; }; struct perf_msr { u64 msr; struct attribute_group *grp; bool (*test)(int, void *); bool no_check; u64 mask; }; struct perf_ns_link_info { __u64 dev; __u64 ino; }; struct perf_namespaces_event { struct task_struct *task; struct { struct perf_event_header header; u32 pid; u32 tid; u64 nr_namespaces; struct perf_ns_link_info link_info[7]; } event_id; }; struct perf_pmu_events_attr { struct device_attribute attr; u64 id; const char *event_str; }; struct perf_pmu_events_ht_attr { struct device_attribute attr; u64 id; const char *event_str_ht; const char *event_str_noht; }; struct perf_pmu_events_hybrid_attr { struct device_attribute attr; u64 id; const char *event_str; u64 pmu_type; }; struct perf_pmu_format_hybrid_attr { struct device_attribute attr; u64 pmu_type; }; typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int); struct perf_raw_frag { union { struct perf_raw_frag *next; long unsigned int pad; }; perf_copy_f copy; void *data; u32 size; } __attribute__((packed)); struct perf_raw_record { struct perf_raw_frag frag; u32 size; }; struct perf_read_data { struct perf_event *event; bool group; int ret; }; struct perf_read_event { struct perf_event_header header; u32 pid; u32 tid; }; struct sched_state { int weight; int event; int counter; int unassigned; int nr_gp; u64 used; }; struct perf_sched { int max_weight; int max_events; int max_gp; int saved_states; struct event_constraint **constraints; struct sched_state state; struct sched_state saved[2]; }; struct perf_switch_event { struct task_struct *task; struct task_struct *next_prev; struct { struct perf_event_header header; u32 next_prev_pid; u32 next_prev_tid; } event_id; }; struct perf_task_event { struct task_struct *task; struct perf_event_context *task_ctx; struct { struct perf_event_header header; u32 pid; u32 ppid; u32 tid; u32 ptid; u64 time; } event_id; }; struct perf_text_poke_event { const void *old_bytes; const void *new_bytes; size_t pad; u16 old_len; u16 new_len; struct { struct perf_event_header header; u64 addr; } event_id; }; struct pericom8250 { void *virt; unsigned int nr; int line[0]; }; struct perm_datum { u32 value; }; struct pernet_operations { struct list_head list; int (*init)(struct net *); void (*pre_exit)(struct net *); void (*exit)(struct net *); void (*exit_batch)(struct list_head *); void (*exit_batch_rtnl)(struct list_head *, struct list_head *); unsigned int * const id; const size_t size; }; struct pfcp_metadata { u8 type; __be64 seid; } __attribute__((packed)); struct skb_array { struct ptr_ring ring; }; struct pfifo_fast_priv { struct skb_array q[3]; }; struct zone { long unsigned int _watermark[4]; long unsigned int watermark_boost; long unsigned int nr_reserved_highatomic; long int lowmem_reserve[4]; int node; struct pglist_data *zone_pgdat; struct per_cpu_pages *per_cpu_pageset; struct per_cpu_zonestat *per_cpu_zonestats; int pageset_high_min; int pageset_high_max; int pageset_batch; long unsigned int zone_start_pfn; atomic_long_t managed_pages; long unsigned int spanned_pages; long unsigned int present_pages; long unsigned int cma_pages; const char *name; long unsigned int nr_isolate_pageblock; int initialized; long: 0; struct cacheline_padding _pad1_; struct free_area free_area[11]; long unsigned int flags; spinlock_t lock; struct cacheline_padding _pad2_; long unsigned int percpu_drift_mark; long unsigned int compact_cached_free_pfn; long unsigned int compact_cached_migrate_pfn[2]; long unsigned int compact_init_migrate_pfn; long unsigned int compact_init_free_pfn; unsigned int compact_considered; unsigned int compact_defer_shift; int compact_order_failed; bool compact_blockskip_flush; bool contiguous; long: 0; struct cacheline_padding _pad3_; atomic_long_t vm_stat[10]; atomic_long_t vm_numa_event[6]; }; struct zoneref { struct zone *zone; int zone_idx; }; struct zonelist { struct zoneref _zonerefs[257]; }; struct pglist_data { struct zone node_zones[4]; struct zonelist node_zonelists[2]; int nr_zones; long unsigned int node_start_pfn; long unsigned int node_present_pages; long unsigned int node_spanned_pages; int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; wait_queue_head_t reclaim_wait[4]; atomic_t nr_writeback_throttled; long unsigned int nr_reclaim_start; struct task_struct *kswapd; int kswapd_order; enum zone_type kswapd_highest_zoneidx; int kswapd_failures; int kcompactd_max_order; enum zone_type kcompactd_highest_zoneidx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; bool proactive_compact_trigger; long unsigned int totalreserve_pages; long unsigned int min_unmapped_pages; long unsigned int min_slab_pages; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad1_; struct deferred_split deferred_split_queue; unsigned int nbp_rl_start; long unsigned int nbp_rl_nr_cand; unsigned int nbp_threshold; unsigned int nbp_th_start; long unsigned int nbp_th_nr_cand; struct lruvec __lruvec; long unsigned int flags; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct cacheline_padding _pad2_; struct per_cpu_nodestat *per_cpu_nodestats; atomic_long_t vm_stat[47]; struct memory_tier *memtier; struct memory_failure_stats mf_stats; long: 64; long: 64; }; struct pgv { char *buffer; }; struct phc_vclocks_reply_data { struct ethnl_reply_data base; int num; int *index; }; struct phy_attrs { u32 bus_width; u32 max_link_rate; enum phy_mode mode; }; struct regulator; struct phy_ops; struct phy { struct device dev; int id; const struct phy_ops *ops; struct mutex mutex; int init_count; int power_count; struct phy_attrs attrs; struct regulator *pwr; struct dentry *debugfs; }; struct phy_c45_device_ids { u32 devices_in_package; u32 mmds_present; u32 device_ids[32]; }; struct phy_configure_opts_mipi_dphy { unsigned int clk_miss; unsigned int clk_post; unsigned int clk_pre; unsigned int clk_prepare; unsigned int clk_settle; unsigned int clk_term_en; unsigned int clk_trail; unsigned int clk_zero; unsigned int d_term_en; unsigned int eot; unsigned int hs_exit; unsigned int hs_prepare; unsigned int hs_settle; unsigned int hs_skip; unsigned int hs_trail; unsigned int hs_zero; unsigned int init; unsigned int lpx; unsigned int ta_get; unsigned int ta_go; unsigned int ta_sure; unsigned int wakeup; long unsigned int hs_clk_rate; long unsigned int lp_clk_rate; unsigned char lanes; }; struct phy_configure_opts_dp { unsigned int link_rate; unsigned int lanes; unsigned int voltage[4]; unsigned int pre[4]; u8 ssc: 1; u8 set_rate: 1; u8 set_lanes: 1; u8 set_voltages: 1; }; struct phy_configure_opts_lvds { unsigned int bits_per_lane_and_dclk_cycle; long unsigned int differential_clk_rate; unsigned int lanes; bool is_slave; }; union phy_configure_opts { struct phy_configure_opts_mipi_dphy mipi_dphy; struct phy_configure_opts_dp dp; struct phy_configure_opts_lvds lvds; }; struct phylink; struct pse_control; struct phy_driver; struct phy_device { struct mdio_device mdio; const struct phy_driver *drv; struct device_link *devlink; u32 phyindex; u32 phy_id; struct phy_c45_device_ids c45_ids; unsigned int is_c45: 1; unsigned int is_internal: 1; unsigned int is_pseudo_fixed_link: 1; unsigned int is_gigabit_capable: 1; unsigned int has_fixups: 1; unsigned int suspended: 1; unsigned int suspended_by_mdio_bus: 1; unsigned int sysfs_links: 1; unsigned int loopback_enabled: 1; unsigned int downshifted_rate: 1; unsigned int is_on_sfp_module: 1; unsigned int mac_managed_pm: 1; unsigned int wol_enabled: 1; unsigned int autoneg: 1; unsigned int link: 1; unsigned int autoneg_complete: 1; unsigned int interrupts: 1; unsigned int irq_suspended: 1; unsigned int irq_rerun: 1; unsigned int default_timestamp: 1; int rate_matching; enum phy_state state; u32 dev_flags; phy_interface_t interface; long unsigned int possible_interfaces[1]; int speed; int duplex; int port; int pause; int asym_pause; u8 master_slave_get; u8 master_slave_set; u8 master_slave_state; long unsigned int supported[2]; long unsigned int advertising[2]; long unsigned int lp_advertising[2]; long unsigned int adv_old[2]; long unsigned int supported_eee[2]; long unsigned int advertising_eee[2]; bool eee_enabled; long unsigned int host_interfaces[1]; u32 eee_broken_modes; bool enable_tx_lpi; struct eee_config eee_cfg; struct list_head leds; int irq; void *priv; struct phy_package_shared *shared; struct sk_buff *skb; void *ehdr; struct nlattr *nest; struct delayed_work state_queue; struct mutex lock; bool sfp_bus_attached; struct sfp_bus *sfp_bus; struct phylink *phylink; struct net_device *attached_dev; struct mii_timestamper *mii_ts; struct pse_control *psec; u8 mdix; u8 mdix_ctrl; int pma_extable; unsigned int link_down_events; void (*phy_link_change)(struct phy_device *, bool); void (*adjust_link)(struct net_device *); }; struct phy_device_node { enum phy_upstream upstream_type; union { struct net_device *netdev; struct phy_device *phydev; } upstream; struct sfp_bus *parent_sfp_bus; struct phy_device *phy; }; struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; u32 phy_id_mask; const long unsigned int * const features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*get_features)(struct phy_device *); int (*get_rate_matching)(struct phy_device *, phy_interface_t); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*config_intr)(struct phy_device *); irqreturn_t (*handle_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd)(struct phy_device *, int, u16); int (*write_mmd)(struct phy_device *, int, u16, u16); int (*read_page)(struct phy_device *); int (*write_page)(struct phy_device *, int); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*cable_test_start)(struct phy_device *); int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *); int (*cable_test_get_status)(struct phy_device *, bool *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); int (*set_loopback)(struct phy_device *, bool); int (*get_sqi)(struct phy_device *); int (*get_sqi_max)(struct phy_device *); int (*get_plca_cfg)(struct phy_device *, struct phy_plca_cfg *); int (*set_plca_cfg)(struct phy_device *, const struct phy_plca_cfg *); int (*get_plca_status)(struct phy_device *, struct phy_plca_status *); int (*led_brightness_set)(struct phy_device *, u8, enum led_brightness); int (*led_blink_set)(struct phy_device *, u8, long unsigned int *, long unsigned int *); int (*led_hw_is_supported)(struct phy_device *, u8, long unsigned int); int (*led_hw_control_set)(struct phy_device *, u8, long unsigned int); int (*led_hw_control_get)(struct phy_device *, u8, long unsigned int *); int (*led_polarity_set)(struct phy_device *, int, long unsigned int); }; struct phy_link_topology { struct xarray phys; u32 next_phy_index; }; struct phy_lookup { struct list_head node; const char *dev_id; const char *con_id; struct phy *phy; }; struct phy_ops { int (*init)(struct phy *); int (*exit)(struct phy *); int (*power_on)(struct phy *); int (*power_off)(struct phy *); int (*set_mode)(struct phy *, enum phy_mode, int); int (*set_media)(struct phy *, enum phy_media); int (*set_speed)(struct phy *, int); int (*configure)(struct phy *, union phy_configure_opts *); int (*validate)(struct phy *, enum phy_mode, int, union phy_configure_opts *); int (*reset)(struct phy *); int (*calibrate)(struct phy *); int (*connect)(struct phy *, int); int (*disconnect)(struct phy *, int); void (*release)(struct phy *); struct module *owner; }; struct phy_package_shared { u8 base_addr; struct device_node *np; refcount_t refcnt; long unsigned int flags; size_t priv_size; void *priv; }; struct phy_plca_cfg { int version; int enabled; int node_id; int node_cnt; int to_tmr; int burst_cnt; int burst_tmr; }; struct phy_plca_status { bool pst; }; struct phy_provider { struct device *dev; struct device_node *children; struct module *owner; struct list_head list; struct phy * (*of_xlate)(struct device *, const struct of_phandle_args *); }; struct phy_req_info { struct ethnl_req_info base; struct phy_device_node *pdn; }; struct phy_tdr_config { u32 first; u32 last; u32 step; s8 pair; }; struct upid { int nr; struct pid_namespace *ns; }; struct pid { refcount_t count; unsigned int level; spinlock_t lock; struct dentry *stashed; u64 ino; struct hlist_head tasks[4]; struct hlist_head inodes; wait_queue_head_t wait_pidfd; struct callback_head rcu; struct upid numbers[0]; }; union proc_op { int (*proc_get_link)(struct dentry *, struct path *); int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); int lsmid; }; struct pid_entry { const char *name; unsigned int len; umode_t mode; const struct inode_operations *iop; const struct file_operations *fop; union proc_op op; }; struct pid_namespace { struct idr idr; struct callback_head rcu; unsigned int pid_allocated; struct task_struct *child_reaper; struct kmem_cache *pid_cachep; unsigned int level; struct pid_namespace *parent; struct fs_pin *bacct; struct user_namespace *user_ns; struct ucounts *ucounts; int reboot; struct ns_common ns; int memfd_noexec_scope; }; struct pimreghdr { __u8 type; __u8 reserved; __be16 csum; __be32 flags; }; struct ping_iter_state { struct seq_net_private p; int bucket; sa_family_t family; }; struct ping_table { struct hlist_head hash[64]; spinlock_t lock; }; struct pingfakehdr { struct icmphdr icmph; struct msghdr *msg; sa_family_t family; __wsum wcheck; }; struct pingv6_ops { int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *); int (*icmpv6_err_convert)(u8, u8, int *); void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); }; struct pipe_buffer; struct pipe_buf_operations { int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); }; struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; long unsigned int private; }; struct pipe_inode_info { struct mutex mutex; wait_queue_head_t rd_wait; wait_queue_head_t wr_wait; unsigned int head; unsigned int tail; unsigned int max_usage; unsigned int ring_size; unsigned int nr_accounted; unsigned int readers; unsigned int writers; unsigned int files; unsigned int r_counter; unsigned int w_counter; bool poll_usage; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; }; struct pipe_wait { struct trace_iterator *iter; int wait_index; }; struct pkcs1pad_ctx { struct crypto_akcipher *child; unsigned int key_size; }; struct rsa_asn1_template; struct pkcs1pad_inst_ctx { struct crypto_akcipher_spawn spawn; const struct rsa_asn1_template *digest_info; }; struct pkcs1pad_request { struct scatterlist in_sg[2]; struct scatterlist out_sg[1]; uint8_t *in_buf; uint8_t *out_buf; struct akcipher_request child_req; }; struct x509_certificate; struct pkcs7_signed_info; struct pkcs7_message { struct x509_certificate *certs; struct x509_certificate *crl; struct pkcs7_signed_info *signed_infos; u8 version; bool have_authattrs; enum OID data_type; size_t data_len; size_t data_hdrlen; const void *data; }; struct pkcs7_parse_context { struct pkcs7_message *msg; struct pkcs7_signed_info *sinfo; struct pkcs7_signed_info **ppsinfo; struct x509_certificate *certs; struct x509_certificate **ppcerts; long unsigned int data; enum OID last_oid; unsigned int x509_index; unsigned int sinfo_index; const void *raw_serial; unsigned int raw_serial_size; unsigned int raw_issuer_size; const void *raw_issuer; const void *raw_skid; unsigned int raw_skid_size; bool expect_skid; }; struct pkcs7_signed_info { struct pkcs7_signed_info *next; struct x509_certificate *signer; unsigned int index; bool unsupported_crypto; bool blacklisted; const void *msgdigest; unsigned int msgdigest_len; unsigned int authattrs_len; const void *authattrs; long unsigned int aa_set; time64_t signing_time; struct public_key_signature *sig; }; struct pkg_cstate_info { bool skip; int msr_index; int cstate_id; }; struct pkru_state { u32 pkru; u32 pad; }; struct plat_serial8250_port { long unsigned int iobase; void *membase; resource_size_t mapbase; resource_size_t mapsize; unsigned int uartclk; unsigned int irq; long unsigned int irqflags; void *private_data; unsigned char regshift; unsigned char iotype; unsigned char hub6; unsigned char has_sysrq; unsigned int type; upf_t flags; u16 bugs; unsigned int (*serial_in)(struct uart_port *, int); void (*serial_out)(struct uart_port *, int, int); u32 (*dl_read)(struct uart_8250_port *); void (*dl_write)(struct uart_8250_port *, u32); void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); void (*set_ldisc)(struct uart_port *, struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int, unsigned int); void (*handle_break)(struct uart_port *); }; struct mfd_cell; struct platform_device_id; struct platform_device { const char *name; int id; bool id_auto; struct device dev; u64 platform_dma_mask; struct device_dma_parameters dma_parms; u32 num_resources; struct resource *resource; const struct platform_device_id *id_entry; const char *driver_override; struct mfd_cell *mfd_cell; struct pdev_archdata archdata; }; struct platform_device_id { char name[20]; kernel_ulong_t driver_data; }; struct platform_device_info { struct device *parent; struct fwnode_handle *fwnode; bool of_node_reused; const char *name; int id; const struct resource *res; unsigned int num_res; const void *data; size_t size_data; u64 dma_mask; const struct property_entry *properties; }; struct platform_driver { int (*probe)(struct platform_device *); union { void (*remove)(struct platform_device *); void (*remove_new)(struct platform_device *); }; void (*shutdown)(struct platform_device *); int (*suspend)(struct platform_device *, pm_message_t); int (*resume)(struct platform_device *); struct device_driver driver; const struct platform_device_id *id_table; bool prevent_deferred_probe; bool driver_managed_dma; }; struct platform_object { struct platform_device pdev; char name[0]; }; struct platform_s2idle_ops { int (*begin)(void); int (*prepare)(void); int (*prepare_late)(void); void (*check)(void); bool (*wake)(void); void (*restore_early)(void); void (*restore)(void); void (*end)(void); }; struct platform_suspend_ops { int (*valid)(suspend_state_t); int (*begin)(suspend_state_t); int (*prepare)(void); int (*prepare_late)(void); int (*enter)(suspend_state_t); void (*wake)(void); void (*finish)(void); bool (*suspend_again)(void); void (*end)(void); void (*recover)(void); }; struct plca_reply_data { struct ethnl_reply_data base; struct phy_plca_cfg plca_cfg; struct phy_plca_status plca_st; }; struct pm_nl_pernet { spinlock_t lock; struct list_head local_addr_list; unsigned int addrs; unsigned int stale_loss_cnt; unsigned int add_addr_signal_max; unsigned int add_addr_accept_max; unsigned int local_addr_max; unsigned int subflows_max; unsigned int next_id; long unsigned int id_bitmap[4]; }; struct pm_qos_request { struct plist_node node; struct pm_qos_constraints *qos; }; struct pm_subsys_data { spinlock_t lock; unsigned int refcount; }; struct pm_vt_switch { struct list_head head; struct device *dev; bool required; }; struct pmu_event_list { raw_spinlock_t lock; struct list_head list; }; struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; netdevice_tracker dev_tracker; u32 flags; u8 protocol; u32 key[0]; }; struct pnp_protocol; struct pnp_id; struct pnp_card { struct device dev; unsigned char number; struct list_head global_list; struct list_head protocol_list; struct list_head devices; struct pnp_protocol *protocol; struct pnp_id *id; char name[50]; unsigned char pnpver; unsigned char productver; unsigned int serial; unsigned char checksum; struct proc_dir_entry *procdir; }; struct pnp_card_device_id { __u8 id[8]; kernel_ulong_t driver_data; struct { __u8 id[8]; } devs[8]; }; struct pnp_device_id; struct pnp_driver { const char *name; const struct pnp_device_id *id_table; unsigned int flags; int (*probe)(struct pnp_dev *, const struct pnp_device_id *); void (*remove)(struct pnp_dev *); void (*shutdown)(struct pnp_dev *); int (*suspend)(struct pnp_dev *, pm_message_t); int (*resume)(struct pnp_dev *); struct device_driver driver; }; struct pnp_card_link; struct pnp_card_driver { struct list_head global_list; char *name; const struct pnp_card_device_id *id_table; unsigned int flags; int (*probe)(struct pnp_card_link *, const struct pnp_card_device_id *); void (*remove)(struct pnp_card_link *); int (*suspend)(struct pnp_card_link *, pm_message_t); int (*resume)(struct pnp_card_link *); struct pnp_driver link; }; struct pnp_card_link { struct pnp_card *card; struct pnp_card_driver *driver; void *driver_data; pm_message_t pm_state; }; struct pnp_dev { struct device dev; u64 dma_mask; unsigned int number; int status; struct list_head global_list; struct list_head protocol_list; struct list_head card_list; struct list_head rdev_list; struct pnp_protocol *protocol; struct pnp_card *card; struct pnp_driver *driver; struct pnp_card_link *card_link; struct pnp_id *id; int active; int capabilities; unsigned int num_dependent_sets; struct list_head resources; struct list_head options; char name[50]; int flags; struct proc_dir_entry *procent; void *data; }; struct pnp_device_id { __u8 id[8]; kernel_ulong_t driver_data; }; struct pnp_dma { unsigned char map; unsigned char flags; }; struct pnp_fixup { char id[7]; void (*quirk_function)(struct pnp_dev *); }; struct pnp_id { char id[8]; struct pnp_id *next; }; struct pnp_info_buffer { char *buffer; char *curr; long unsigned int size; long unsigned int len; int stop; int error; }; typedef struct pnp_info_buffer pnp_info_buffer_t; struct pnp_irq { pnp_irq_mask_t map; unsigned char flags; }; struct pnp_mem { resource_size_t min; resource_size_t max; resource_size_t align; resource_size_t size; unsigned char flags; }; struct pnp_port { resource_size_t min; resource_size_t max; resource_size_t align; resource_size_t size; unsigned char flags; }; struct pnp_option { struct list_head list; unsigned int flags; long unsigned int type; union { struct pnp_port port; struct pnp_irq irq; struct pnp_dma dma; struct pnp_mem mem; } u; }; struct pnp_protocol { struct list_head protocol_list; char *name; int (*get)(struct pnp_dev *); int (*set)(struct pnp_dev *); int (*disable)(struct pnp_dev *); bool (*can_wakeup)(struct pnp_dev *); int (*suspend)(struct pnp_dev *, pm_message_t); int (*resume)(struct pnp_dev *); unsigned char number; struct device dev; struct list_head cards; struct list_head devices; }; struct pnp_resource { struct list_head list; struct resource res; }; struct policy_data { struct policydb *p; void *fp; }; struct policy_file { char *data; size_t len; }; struct policy_load_memory { size_t len; void *data; }; struct role_datum; struct user_datum; struct type_datum; struct role_allow; struct policydb { int mls_enabled; struct symtab symtab[8]; char **sym_val_to_name[8]; struct class_datum **class_val_to_struct; struct role_datum **role_val_to_struct; struct user_datum **user_val_to_struct; struct type_datum **type_val_to_struct; struct avtab te_avtab; struct hashtab role_tr; struct ebitmap filename_trans_ttypes; struct hashtab filename_trans; u32 compat_filename_trans_count; struct cond_bool_datum **bool_val_to_struct; struct avtab te_cond_avtab; struct cond_node *cond_list; u32 cond_list_len; struct role_allow *role_allow; struct ocontext *ocontexts[9]; struct genfs *genfs; struct hashtab range_tr; struct ebitmap *type_attr_map_array; struct ebitmap policycaps; struct ebitmap permissive_map; size_t len; unsigned int policyvers; unsigned int reject_unknown: 1; unsigned int allow_unknown: 1; u16 process_class; u32 process_trans_perms; }; struct policydb_compat_info { unsigned int version; unsigned int sym_num; unsigned int ocon_num; }; struct pollfd { int fd; short int events; short int revents; }; struct poll_list { struct poll_list *next; unsigned int len; struct pollfd entries[0]; }; struct poll_table_entry { struct file *filp; __poll_t key; wait_queue_entry_t wait; wait_queue_head_t *wait_address; }; struct poll_table_page { struct poll_table_page *next; struct poll_table_entry *entry; struct poll_table_entry entries[0]; }; struct poll_wqueues { poll_table pt; struct poll_table_page *table; struct task_struct *polling_task; int triggered; int error; int inline_index; struct poll_table_entry inline_entries[9]; }; struct worker_pool; struct pool_workqueue { struct worker_pool *pool; struct workqueue_struct *wq; int work_color; int flush_color; int refcnt; int nr_in_flight[16]; bool plugged; int nr_active; struct list_head inactive_works; struct list_head pending_node; struct list_head pwqs_node; struct list_head mayday_node; u64 stats[8]; struct kthread_work release_work; struct callback_head rcu; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct port_identity { struct clock_identity clock_identity; __be16 port_number; }; struct portdrv_service_data { struct pcie_port_service_driver *drv; struct device *dev; u32 service; }; struct posix_acl_entry { short int e_tag; short unsigned int e_perm; union { kuid_t e_uid; kgid_t e_gid; }; }; struct posix_acl { refcount_t a_refcount; struct callback_head a_rcu; unsigned int a_count; struct posix_acl_entry a_entries[0]; }; struct posix_acl_xattr_entry { __le16 e_tag; __le16 e_perm; __le32 e_id; }; struct posix_acl_xattr_header { __le32 a_version; }; struct posix_clock; struct posix_clock_context; struct posix_clock_operations { struct module *owner; int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *); int (*clock_gettime)(struct posix_clock *, struct timespec64 *); int (*clock_getres)(struct posix_clock *, struct timespec64 *); int (*clock_settime)(struct posix_clock *, const struct timespec64 *); long int (*ioctl)(struct posix_clock_context *, unsigned int, long unsigned int); int (*open)(struct posix_clock_context *, fmode_t); __poll_t (*poll)(struct posix_clock_context *, struct file *, poll_table *); int (*release)(struct posix_clock_context *); ssize_t (*read)(struct posix_clock_context *, uint, char *, size_t); }; struct posix_clock { struct posix_clock_operations ops; struct cdev cdev; struct device *dev; struct rw_semaphore rwsem; bool zombie; }; struct posix_clock_context { struct posix_clock *clk; void *private_clkdata; }; struct posix_clock_desc { struct file *fp; struct posix_clock *clk; }; struct posix_cputimer_base { u64 nextevt; struct timerqueue_head tqhead; }; struct posix_cputimers { struct posix_cputimer_base bases[3]; unsigned int timers_active; unsigned int expiry_active; }; struct posix_cputimers_work { struct callback_head work; struct mutex mutex; unsigned int scheduled; }; struct posix_msg_tree_node { struct rb_node rb_node; struct list_head msg_list; int priority; }; struct postprocess_bh_ctx { struct work_struct work; struct buffer_head *bh; }; struct power_supply_battery_info; struct power_supply { const struct power_supply_desc *desc; char **supplied_to; size_t num_supplicants; char **supplied_from; size_t num_supplies; struct device_node *of_node; void *drv_data; struct device dev; struct work_struct changed_work; struct delayed_work deferred_register_work; spinlock_t changed_lock; bool changed; bool initialized; bool removing; atomic_t use_cnt; struct power_supply_battery_info *battery_info; struct thermal_zone_device *tzd; struct thermal_cooling_device *tcd; }; struct power_supply_attr { const char *prop_name; char attr_name[31]; struct device_attribute dev_attr; const char * const *text_values; int text_values_len; }; struct power_supply_maintenance_charge_table; struct power_supply_battery_ocv_table; struct power_supply_resistance_temp_table; struct power_supply_vbat_ri_table; struct power_supply_battery_info { unsigned int technology; int energy_full_design_uwh; int charge_full_design_uah; int voltage_min_design_uv; int voltage_max_design_uv; int tricklecharge_current_ua; int precharge_current_ua; int precharge_voltage_max_uv; int charge_term_current_ua; int charge_restart_voltage_uv; int overvoltage_limit_uv; int constant_charge_current_max_ua; int constant_charge_voltage_max_uv; const struct power_supply_maintenance_charge_table *maintenance_charge; int maintenance_charge_size; int alert_low_temp_charge_current_ua; int alert_low_temp_charge_voltage_uv; int alert_high_temp_charge_current_ua; int alert_high_temp_charge_voltage_uv; int factory_internal_resistance_uohm; int factory_internal_resistance_charging_uohm; int ocv_temp[20]; int temp_ambient_alert_min; int temp_ambient_alert_max; int temp_alert_min; int temp_alert_max; int temp_min; int temp_max; struct power_supply_battery_ocv_table *ocv_table[20]; int ocv_table_size[20]; struct power_supply_resistance_temp_table *resist_table; int resist_table_size; const struct power_supply_vbat_ri_table *vbat2ri_discharging; int vbat2ri_discharging_size; const struct power_supply_vbat_ri_table *vbat2ri_charging; int vbat2ri_charging_size; int bti_resistance_ohm; int bti_resistance_tolerance; }; struct power_supply_battery_ocv_table { int ocv; int capacity; }; struct power_supply_config { struct device_node *of_node; struct fwnode_handle *fwnode; void *drv_data; const struct attribute_group **attr_grp; char **supplied_to; size_t num_supplicants; }; struct power_supply_hwmon { struct power_supply *psy; long unsigned int *props; }; struct power_supply_maintenance_charge_table { int charge_current_max_ua; int charge_voltage_max_uv; int charge_safety_timer_minutes; }; union power_supply_propval { int intval; const char *strval; }; struct power_supply_resistance_temp_table { int temp; int resistance; }; struct power_supply_vbat_ri_table { int vbat_uv; int ri_uohm; }; struct powercap_constraint_attr { struct device_attribute power_limit_attr; struct device_attribute time_window_attr; struct device_attribute max_power_attr; struct device_attribute min_power_attr; struct device_attribute max_time_window_attr; struct device_attribute min_time_window_attr; struct device_attribute name_attr; }; struct powercap_control_type_ops; struct powercap_control_type { struct device dev; struct idr idr; int nr_zones; const struct powercap_control_type_ops *ops; struct mutex lock; bool allocated; struct list_head node; }; struct powercap_control_type_ops { int (*set_enable)(struct powercap_control_type *, bool); int (*get_enable)(struct powercap_control_type *, bool *); int (*release)(struct powercap_control_type *); }; struct powercap_zone_ops; struct powercap_zone_constraint; struct powercap_zone { int id; char *name; void *control_type_inst; const struct powercap_zone_ops *ops; struct device dev; int const_id_cnt; struct idr idr; struct idr *parent_idr; void *private_data; struct attribute **zone_dev_attrs; int zone_attr_count; struct attribute_group dev_zone_attr_group; const struct attribute_group *dev_attr_groups[2]; bool allocated; struct powercap_zone_constraint *constraints; }; struct powercap_zone_constraint_ops; struct powercap_zone_constraint { int id; struct powercap_zone *power_zone; const struct powercap_zone_constraint_ops *ops; }; struct powercap_zone_constraint_ops { int (*set_power_limit_uw)(struct powercap_zone *, int, u64); int (*get_power_limit_uw)(struct powercap_zone *, int, u64 *); int (*set_time_window_us)(struct powercap_zone *, int, u64); int (*get_time_window_us)(struct powercap_zone *, int, u64 *); int (*get_max_power_uw)(struct powercap_zone *, int, u64 *); int (*get_min_power_uw)(struct powercap_zone *, int, u64 *); int (*get_max_time_window_us)(struct powercap_zone *, int, u64 *); int (*get_min_time_window_us)(struct powercap_zone *, int, u64 *); const char * (*get_name)(struct powercap_zone *, int); }; struct powercap_zone_ops { int (*get_max_energy_range_uj)(struct powercap_zone *, u64 *); int (*get_energy_uj)(struct powercap_zone *, u64 *); int (*reset_energy_uj)(struct powercap_zone *); int (*get_max_power_range_uw)(struct powercap_zone *, u64 *); int (*get_power_uw)(struct powercap_zone *, u64 *); int (*set_enable)(struct powercap_zone *, bool); int (*get_enable)(struct powercap_zone *, bool *); int (*release)(struct powercap_zone *); }; struct powerclamp_calibration_data { long unsigned int confidence; long unsigned int steady_comp; long unsigned int dynamic_comp; }; struct powerclamp_data { unsigned int cpu; unsigned int count; unsigned int guard; unsigned int window_size_now; unsigned int target_ratio; bool clamping; }; struct powernow_k8_data { unsigned int cpu; u32 numps; u32 batps; u32 rvo; u32 irt; u32 vidmvs; u32 vstable; u32 plllock; u32 exttype; u32 currvid; u32 currfid; struct cpufreq_frequency_table *powernow_table; struct acpi_processor_performance acpi_data; struct cpumask *available_cores; }; struct powernowk8_target_arg { struct cpufreq_policy *pol; unsigned int newstate; }; struct ppin_info { int feature; int msr_ppin_ctl; int msr_ppin; }; struct pppoe_tag { __be16 tag_type; __be16 tag_len; char tag_data[0]; }; struct pppoe_hdr { __u8 type: 4; __u8 ver: 4; __u8 code; __be16 sid; __be16 length; struct pppoe_tag tag[0]; }; struct pps_bind_args { int tsformat; int edge; int consumer; }; struct pps_device; struct pps_source_info { char name[32]; char path[32]; int mode; void (*echo)(struct pps_device *, int, void *); struct module *owner; struct device *dev; }; struct pps_ktime { __s64 sec; __s32 nsec; __u32 flags; }; struct pps_kparams { int api_version; int mode; struct pps_ktime assert_off_tu; struct pps_ktime clear_off_tu; }; struct pps_device { struct pps_source_info info; struct pps_kparams params; __u32 assert_sequence; __u32 clear_sequence; struct pps_ktime assert_tu; struct pps_ktime clear_tu; int current_mode; unsigned int last_ev; wait_queue_head_t queue; unsigned int id; const void *lookup_cookie; struct cdev cdev; struct device *dev; struct fasync_struct *async_queue; spinlock_t lock; }; struct pps_event_time { struct timespec64 ts_real; }; struct pps_kinfo { __u32 assert_sequence; __u32 clear_sequence; struct pps_ktime assert_tu; struct pps_ktime clear_tu; int current_mode; }; struct pps_fdata { struct pps_kinfo info; struct pps_ktime timeout; }; struct pptp_gre_header { struct gre_base_hdr gre_hd; __be16 payload_len; __be16 call_id; __be32 seq; __be32 ack; }; struct pr_clear { __u64 key; __u32 flags; __u32 __pad; }; struct pr_cont_work_struct { bool comma; work_func_t func; long int ctr; }; struct pr_held_reservation { u64 key; u32 generation; enum pr_type type; }; struct pr_keys { u32 generation; u32 num_keys; u64 keys[0]; }; struct pr_ops { int (*pr_register)(struct block_device *, u64, u64, u32); int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); int (*pr_release)(struct block_device *, u64, enum pr_type); int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); int (*pr_clear)(struct block_device *, u64); int (*pr_read_keys)(struct block_device *, struct pr_keys *); int (*pr_read_reservation)(struct block_device *, struct pr_held_reservation *); }; struct pr_preempt { __u64 old_key; __u64 new_key; __u32 type; __u32 flags; }; struct pr_registration { __u64 old_key; __u64 new_key; __u32 flags; __u32 __pad; }; struct pr_reservation { __u64 key; __u32 type; __u32 flags; }; struct prb_data_blk_lpos { long unsigned int begin; long unsigned int next; }; struct prb_data_block { long unsigned int id; char data[0]; }; struct prb_data_ring { unsigned int size_bits; char *data; atomic_long_t head_lpos; atomic_long_t tail_lpos; }; struct prb_desc { atomic_long_t state_var; struct prb_data_blk_lpos text_blk_lpos; }; struct printk_info; struct prb_desc_ring { unsigned int count_bits; struct prb_desc *descs; struct printk_info *infos; atomic_long_t head_id; atomic_long_t tail_id; atomic_long_t last_finalized_seq; }; struct printk_ringbuffer; struct prb_reserved_entry { struct printk_ringbuffer *rb; long unsigned int irqflags; long unsigned int id; unsigned int text_space; }; struct prctl_mm_map { __u64 start_code; __u64 end_code; __u64 start_data; __u64 end_data; __u64 start_brk; __u64 brk; __u64 start_stack; __u64 arg_start; __u64 arg_end; __u64 env_start; __u64 env_end; __u64 *auxv; __u32 auxv_size; __u32 exe_fd; }; struct prefix_cacheinfo { __u32 preferred_time; __u32 valid_time; }; struct prefix_info { __u8 type; __u8 length; __u8 prefix_len; union { __u8 flags; struct { __u8 reserved: 4; __u8 preferpd: 1; __u8 routeraddr: 1; __u8 autoconf: 1; __u8 onlink: 1; }; }; __be32 valid; __be32 prefered; __be32 reserved2; struct in6_addr prefix; }; struct prefixmsg { unsigned char prefix_family; unsigned char prefix_pad1; short unsigned int prefix_pad2; int prefix_ifindex; unsigned char prefix_type; unsigned char prefix_len; unsigned char prefix_flags; unsigned char prefix_pad3; }; struct prepend_buffer { char *buf; int len; }; struct print_entry { struct trace_entry ent; long unsigned int ip; char buf[0]; }; struct printf_spec { unsigned int type: 8; int field_width: 24; unsigned int flags: 8; unsigned int base: 8; int precision: 16; }; struct printk_info { u64 seq; u64 ts_nsec; u16 text_len; u8 facility; u8 flags: 5; u8 level: 3; u32 caller_id; struct dev_printk_info dev_info; }; struct printk_message { struct printk_buffers *pbufs; unsigned int outbuf_len; u64 seq; long unsigned int dropped; }; struct printk_record { struct printk_info *info; char *text_buf; unsigned int text_buf_size; }; struct printk_ringbuffer { struct prb_desc_ring desc_ring; struct prb_data_ring text_data_ring; atomic_long_t fail; }; struct privflags_reply_data { struct ethnl_reply_data base; const char (*priv_flag_names)[32]; unsigned int n_priv_flags; u32 priv_flags; }; struct prm_buffer { u8 prm_status; u64 efi_status; u8 prm_cmd; guid_t handler_guid; } __attribute__((packed)); struct prm_mmio_info; struct prm_context_buffer { char signature[4]; u16 revision; u16 reserved; guid_t identifier; u64 static_data_buffer; struct prm_mmio_info *mmio_ranges; }; struct prm_handler_info { guid_t guid; efi_status_t (*handler_addr)(u64, void *); u64 static_data_buffer_addr; u64 acpi_param_buffer_addr; struct list_head handler_list; }; struct prm_mmio_addr_range { u64 phys_addr; u64 virt_addr; u32 length; } __attribute__((packed)); struct prm_mmio_info { u64 mmio_count; struct prm_mmio_addr_range addr_ranges[0]; }; struct prm_module_info { guid_t guid; u16 major_rev; u16 minor_rev; u16 handler_count; struct prm_mmio_info *mmio_info; bool updatable; struct list_head module_list; struct prm_handler_info handlers[0]; }; typedef struct kobject *kobj_probe_t(dev_t, int *, void *); struct probe { struct probe *next; dev_t dev; long unsigned int range; struct module *owner; kobj_probe_t *get; int (*lock)(dev_t, void *); void *data; }; struct probe_arg { struct fetch_insn *code; bool dynamic; unsigned int offset; unsigned int count; const char *name; const char *comm; char *fmt; const struct fetch_type *type; }; struct probe_entry_arg { struct fetch_insn *code; unsigned int size; }; typedef int (*proc_write_t)(struct file *, char *, size_t); struct proc_ops; struct proc_dir_entry { atomic_t in_use; refcount_t refcnt; struct list_head pde_openers; spinlock_t pde_unload_lock; struct completion *pde_unload_completion; const struct inode_operations *proc_iops; union { const struct proc_ops *proc_ops; const struct file_operations *proc_dir_ops; }; const struct dentry_operations *proc_dops; union { const struct seq_operations *seq_ops; int (*single_show)(struct seq_file *, void *); }; proc_write_t write; void *data; unsigned int state_size; unsigned int low_ino; nlink_t nlink; kuid_t uid; kgid_t gid; loff_t size; struct proc_dir_entry *parent; struct rb_root subdir; struct rb_node subdir_node; char *name; umode_t mode; u8 flags; u8 namelen; char inline_name[0]; }; struct proc_fs_context { struct pid_namespace *pid_ns; unsigned int mask; enum proc_hidepid hidepid; int gid; enum proc_pidonly pidonly; }; struct proc_fs_info { struct pid_namespace *pid_ns; struct dentry *proc_self; struct dentry *proc_thread_self; kgid_t pid_gid; enum proc_hidepid hide_pid; enum proc_pidonly pidonly; struct callback_head rcu; }; struct proc_fs_opts { int flag; const char *str; }; struct proc_inode { struct pid *pid; unsigned int fd; union proc_op op; struct proc_dir_entry *pde; struct ctl_table_header *sysctl; struct ctl_table *sysctl_entry; struct hlist_node sibling_inodes; const struct proc_ns_operations *ns_ops; struct inode vfs_inode; }; struct proc_mounts { struct mnt_namespace *ns; struct path root; int (*show)(struct seq_file *, struct vfsmount *); }; struct proc_ns_operations { const char *name; const char *real_ns_name; int type; struct ns_common * (*get)(struct task_struct *); void (*put)(struct ns_common *); int (*install)(struct nsset *, struct ns_common *); struct user_namespace * (*owner)(struct ns_common *); struct ns_common * (*get_parent)(struct ns_common *); }; struct proc_ops { unsigned int proc_flags; int (*proc_open)(struct inode *, struct file *); ssize_t (*proc_read)(struct file *, char *, size_t, loff_t *); ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*proc_write)(struct file *, const char *, size_t, loff_t *); loff_t (*proc_lseek)(struct file *, loff_t, int); int (*proc_release)(struct inode *, struct file *); __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); long int (*proc_ioctl)(struct file *, unsigned int, long unsigned int); int (*proc_mmap)(struct file *, struct vm_area_struct *); long unsigned int (*proc_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); }; struct proc_timens_offset { int clockid; struct timespec64 val; }; struct process_timer { struct timer_list timer; struct task_struct *task; }; struct procmap_query { __u64 size; __u64 query_flags; __u64 query_addr; __u64 vma_start; __u64 vma_end; __u64 vma_flags; __u64 vma_page_size; __u64 vma_offset; __u64 inode; __u32 dev_major; __u32 dev_minor; __u32 vma_name_size; __u32 build_id_size; __u64 vma_name_addr; __u64 build_id_addr; }; struct prog_entry { int target; int when_to_branch; struct filter_pred *pred; }; struct prog_poke_elem { struct list_head list; struct bpf_prog_aux *aux; }; struct prog_test_member1 { int a; }; struct prog_test_member { struct prog_test_member1 m; int c; }; struct prog_test_ref_kfunc { int a; int b; struct prog_test_member memb; struct prog_test_ref_kfunc *next; refcount_t cnt; }; struct property { char *name; int length; void *value; struct property *next; }; struct prot_inuse { int all; int val[64]; }; struct smc_hashinfo; struct proto_accept_arg; struct sk_psock; struct timewait_sock_ops; struct raw_hashinfo; struct proto { void (*close)(struct sock *, long int); int (*pre_connect)(struct sock *, struct sockaddr *, int); int (*connect)(struct sock *, struct sockaddr *, int); int (*disconnect)(struct sock *, int); struct sock * (*accept)(struct sock *, struct proto_accept_arg *); int (*ioctl)(struct sock *, int, int *); int (*init)(struct sock *); void (*destroy)(struct sock *); void (*shutdown)(struct sock *, int); int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); int (*getsockopt)(struct sock *, int, int, char *, int *); void (*keepalive)(struct sock *, int); int (*sendmsg)(struct sock *, struct msghdr *, size_t); int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int *); void (*splice_eof)(struct socket *); int (*bind)(struct sock *, struct sockaddr *, int); int (*bind_add)(struct sock *, struct sockaddr *, int); int (*backlog_rcv)(struct sock *, struct sk_buff *); bool (*bpf_bypass_getsockopt)(int, int); void (*release_cb)(struct sock *); int (*hash)(struct sock *); void (*unhash)(struct sock *); void (*rehash)(struct sock *); int (*get_port)(struct sock *, short unsigned int); void (*put_port)(struct sock *); int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); unsigned int inuse_idx; int (*forward_alloc_get)(const struct sock *); bool (*stream_memory_free)(const struct sock *, int); bool (*sock_is_readable)(struct sock *); void (*enter_memory_pressure)(struct sock *); void (*leave_memory_pressure)(struct sock *); atomic_long_t *memory_allocated; int *per_cpu_fw_alloc; struct percpu_counter *sockets_allocated; long unsigned int *memory_pressure; long int *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; u32 sysctl_wmem_offset; u32 sysctl_rmem_offset; int max_header; bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; unsigned int ipv6_pinfo_offset; slab_flags_t slab_flags; unsigned int useroffset; unsigned int usersize; unsigned int *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; union { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; struct smc_hashinfo *smc_hash; } h; struct module *owner; char name[32]; struct list_head node; int (*diag_destroy)(struct sock *, int); }; struct proto_accept_arg { int flags; int err; int is_empty; bool kern; }; typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *); struct proto_ops { int family; struct module *owner; int (*release)(struct socket *); int (*bind)(struct socket *, struct sockaddr *, int); int (*connect)(struct socket *, struct sockaddr *, int, int); int (*socketpair)(struct socket *, struct socket *); int (*accept)(struct socket *, struct socket *, struct proto_accept_arg *); int (*getname)(struct socket *, struct sockaddr *, int); __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); int (*ioctl)(struct socket *, unsigned int, long unsigned int); int (*gettstamp)(struct socket *, void *, bool, bool); int (*listen)(struct socket *, int); int (*shutdown)(struct socket *, int); int (*setsockopt)(struct socket *, int, int, sockptr_t, unsigned int); int (*getsockopt)(struct socket *, int, int, char *, int *); void (*show_fdinfo)(struct seq_file *, struct socket *); int (*sendmsg)(struct socket *, struct msghdr *, size_t); int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); void (*splice_eof)(struct socket *); int (*set_peek_off)(struct sock *, int); int (*peek_len)(struct socket *); int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); int (*read_skb)(struct sock *, skb_read_actor_t); int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); int (*set_rcvlowat)(struct sock *, int); }; struct prt_quirk { const struct dmi_system_id *system; unsigned int segment; unsigned int bus; unsigned int device; unsigned char pin; const char *source; const char *actual_source; }; struct ps2pp_info { u8 model; u8 kind; u16 features; }; struct psample_group { struct list_head list; struct net *net; u32 group_num; u32 refcount; u32 seq; struct callback_head rcu; }; struct psb_s { u8 signature[10]; u8 tableversion; u8 flags1; u16 vstable; u8 flags2; u8 num_tables; u32 cpuid; u8 plllocktime; u8 maxfid; u8 maxvid; u8 numps; }; struct psched_pktrate { u64 rate_pkts_ps; u32 mult; u8 shift; }; struct psched_ratecfg { u64 rate_bytes_ps; u32 mult; u16 overhead; u16 mpu; u8 linklayer; u8 shift; }; struct pse_control_config { enum ethtool_podl_pse_admin_state podl_admin_control; enum ethtool_c33_pse_admin_state c33_admin_control; }; struct pse_control_status { enum ethtool_podl_pse_admin_state podl_admin_state; enum ethtool_podl_pse_pw_d_status podl_pw_status; enum ethtool_c33_pse_admin_state c33_admin_state; enum ethtool_c33_pse_pw_d_status c33_pw_status; u32 c33_pw_class; u32 c33_actual_pw; struct ethtool_c33_pse_ext_state_info c33_ext_state_info; u32 c33_avail_pw_limit; struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges; u32 c33_pw_limit_nb_ranges; }; struct pse_reply_data { struct ethnl_reply_data base; struct pse_control_status status; }; struct super_operations; struct xattr_handler; struct pseudo_fs_context { const struct super_operations *ops; const struct xattr_handler * const *xattr; const struct dentry_operations *dops; long unsigned int magic; }; struct psi_group {}; struct psmouse_protocol; struct psmouse { void *private; struct input_dev *dev; struct ps2dev ps2dev; struct delayed_work resync_work; const char *vendor; const char *name; const struct psmouse_protocol *protocol; unsigned char packet[8]; unsigned char badbyte; unsigned char pktcnt; unsigned char pktsize; unsigned char oob_data_type; unsigned char extra_buttons; bool acks_disable_command; unsigned int model; long unsigned int last; long unsigned int out_of_sync_cnt; long unsigned int num_resyncs; enum psmouse_state state; char devname[64]; char phys[32]; unsigned int rate; unsigned int resolution; unsigned int resetafter; unsigned int resync_time; bool smartscroll; psmouse_ret_t (*protocol_handler)(struct psmouse *); void (*set_rate)(struct psmouse *, unsigned int); void (*set_resolution)(struct psmouse *, unsigned int); void (*set_scale)(struct psmouse *, enum psmouse_scale); int (*reconnect)(struct psmouse *); int (*fast_reconnect)(struct psmouse *); void (*disconnect)(struct psmouse *); void (*cleanup)(struct psmouse *); int (*poll)(struct psmouse *); void (*pt_activate)(struct psmouse *); void (*pt_deactivate)(struct psmouse *); }; struct psmouse_attribute { struct device_attribute dattr; void *data; ssize_t (*show)(struct psmouse *, void *, char *); ssize_t (*set)(struct psmouse *, void *, const char *, size_t); bool protect; }; struct psmouse_protocol { enum psmouse_type type; bool maxproto; bool ignore_parity; bool try_passthru; bool smbus_companion; const char *name; const char *alias; int (*detect)(struct psmouse *, bool); int (*init)(struct psmouse *); }; struct pst_s { u8 fid; u8 vid; }; struct pstate_funcs { int (*get_max)(int); int (*get_max_physical)(int); int (*get_min)(int); int (*get_turbo)(int); int (*get_scaling)(void); int (*get_cpu_scaling)(int); int (*get_aperf_mperf_shift)(void); u64 (*get_val)(struct cpudata *, int); void (*get_vid)(struct cpudata *); }; struct psy_am_i_supplied_data { struct power_supply *psy; unsigned int count; }; struct psy_get_supplier_prop_data { struct power_supply *psy; enum power_supply_property psp; union power_supply_propval *val; }; struct pt_filter { long unsigned int msr_a; long unsigned int msr_b; long unsigned int config; }; struct pt_filters { struct pt_filter filter[4]; unsigned int nr_filters; }; struct pt { struct perf_output_handle handle; struct pt_filters filters; int handle_nmi; int vmx_on; u64 output_base; u64 output_mask; }; struct pt_address_range { long unsigned int msr_a; long unsigned int msr_b; unsigned int reg_off; }; struct topa; struct topa_entry; struct pt_buffer { struct list_head tables; struct topa *first; struct topa *last; struct topa *cur; unsigned int cur_idx; size_t output_off; long unsigned int nr_pages; local_t data_size; local64_t head; bool snapshot; bool single; long int stop_pos; long int intr_pos; struct topa_entry *stop_te; struct topa_entry *intr_te; void **data_pages; }; struct pt_cap_desc { const char *name; u32 leaf; u8 reg; u32 mask; }; struct pt_pmu { struct pmu pmu; u32 caps[8]; bool vmx; bool branch_en_always_on; long unsigned int max_nonturbo_ratio; unsigned int tsc_art_num; unsigned int tsc_art_den; }; struct pt_regs_offset { const char *name; int offset; }; struct ptdesc { long unsigned int __page_flags; union { struct callback_head pt_rcu_head; struct list_head pt_list; struct { long unsigned int _pt_pad_1; pgtable_t pmd_huge_pte; }; }; long unsigned int __page_mapping; union { long unsigned int pt_index; struct mm_struct *pt_mm; atomic_t pt_frag_refcount; }; union { long unsigned int _pt_pad_2; spinlock_t *ptl; }; unsigned int __page_type; atomic_t __page_refcount; long unsigned int pt_memcg_data; }; struct ptp_clock_info; struct ptp_clock { struct posix_clock clock; struct device dev; struct ptp_clock_info *info; dev_t devid; int index; struct pps_device *pps_source; long int dialed_frequency; struct list_head tsevqs; spinlock_t tsevqs_lock; struct mutex pincfg_mux; wait_queue_head_t tsev_wq; int defunct; struct device_attribute *pin_dev_attr; struct attribute **pin_attr; struct attribute_group pin_attr_group; const struct attribute_group *pin_attr_groups[2]; struct kthread_worker *kworker; struct kthread_delayed_work aux_work; unsigned int max_vclocks; unsigned int n_vclocks; int *vclock_index; struct mutex n_vclocks_mux; bool is_virtual_clock; bool has_cycles; struct dentry *debugfs_root; }; struct ptp_clock_caps { int max_adj; int n_alarm; int n_ext_ts; int n_per_out; int pps; int n_pins; int cross_timestamping; int adjust_phase; int max_phase_adj; int rsv[11]; }; struct ptp_clock_event { int type; int index; union { u64 timestamp; s64 offset; struct pps_event_time pps_times; }; }; struct ptp_pin_desc; struct ptp_system_timestamp; struct system_device_crosststamp; struct ptp_clock_request; struct ptp_clock_info { struct module *owner; char name[32]; s32 max_adj; int n_alarm; int n_ext_ts; int n_per_out; int n_pins; int pps; struct ptp_pin_desc *pin_config; int (*adjfine)(struct ptp_clock_info *, long int); int (*adjphase)(struct ptp_clock_info *, s32); s32 (*getmaxphase)(struct ptp_clock_info *); int (*adjtime)(struct ptp_clock_info *, s64); int (*gettime64)(struct ptp_clock_info *, struct timespec64 *); int (*gettimex64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *); int (*settime64)(struct ptp_clock_info *, const struct timespec64 *); int (*getcycles64)(struct ptp_clock_info *, struct timespec64 *); int (*getcyclesx64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); int (*getcrosscycles)(struct ptp_clock_info *, struct system_device_crosststamp *); int (*enable)(struct ptp_clock_info *, struct ptp_clock_request *, int); int (*verify)(struct ptp_clock_info *, unsigned int, enum ptp_pin_function, unsigned int); long int (*do_aux_work)(struct ptp_clock_info *); }; struct ptp_extts_request { unsigned int index; unsigned int flags; unsigned int rsv[2]; }; struct ptp_clock_time { __s64 sec; __u32 nsec; __u32 reserved; }; struct ptp_perout_request { union { struct ptp_clock_time start; struct ptp_clock_time phase; }; struct ptp_clock_time period; unsigned int index; unsigned int flags; union { struct ptp_clock_time on; unsigned int rsv[4]; }; }; struct ptp_clock_request { enum { PTP_CLK_REQ_EXTTS = 0, PTP_CLK_REQ_PEROUT = 1, PTP_CLK_REQ_PPS = 2, } type; union { struct ptp_extts_request extts; struct ptp_perout_request perout; }; }; struct ptp_extts_event { struct ptp_clock_time t; unsigned int index; unsigned int flags; unsigned int rsv[2]; }; struct ptp_header { u8 tsmt; u8 ver; __be16 message_length; u8 domain_number; u8 reserved1; u8 flag_field[2]; __be64 correction; __be32 reserved2; struct port_identity source_port_identity; __be16 sequence_id; u8 control; u8 log_message_interval; } __attribute__((packed)); struct ptp_pin_desc { char name[64]; unsigned int index; unsigned int func; unsigned int chan; unsigned int rsv[5]; }; struct ptp_sys_offset { unsigned int n_samples; unsigned int rsv[3]; struct ptp_clock_time ts[51]; }; struct ptp_sys_offset_extended { unsigned int n_samples; __kernel_clockid_t clockid; unsigned int rsv[2]; struct ptp_clock_time ts[75]; }; struct ptp_sys_offset_precise { struct ptp_clock_time device; struct ptp_clock_time sys_realtime; struct ptp_clock_time sys_monoraw; unsigned int rsv[4]; }; struct ptp_system_timestamp { struct timespec64 pre_ts; struct timespec64 post_ts; clockid_t clockid; }; struct timecounter { const struct cyclecounter *cc; u64 cycle_last; u64 nsec; u64 mask; u64 frac; }; struct ptp_vclock { struct ptp_clock *pclock; struct ptp_clock_info info; struct ptp_clock *clock; struct hlist_node vclock_hash_node; struct cyclecounter cc; struct timecounter tc; struct mutex lock; }; struct ptrace_peeksiginfo_args { __u64 off; __u32 flags; __s32 nr; }; struct ptrace_rseq_configuration { __u64 rseq_abi_pointer; __u32 rseq_abi_size; __u32 signature; __u32 flags; __u32 pad; }; struct ptrace_sud_config { __u64 mode; __u64 selector; __u64 offset; __u64 len; }; struct ptrace_syscall_info { __u8 op; __u8 pad[3]; __u32 arch; __u64 instruction_pointer; __u64 stack_pointer; union { struct { __u64 nr; __u64 args[6]; } entry; struct { __s64 rval; __u8 is_error; } exit; struct { __u64 nr; __u64 args[6]; __u32 ret_data; } seccomp; }; }; struct pts_mount_opts { int setuid; int setgid; kuid_t uid; kgid_t gid; umode_t mode; umode_t ptmxmode; int reserve; int max; }; struct pts_fs_info { struct ida allocated_ptys; struct pts_mount_opts mount_opts; struct super_block *sb; struct dentry *ptmx_dentry; }; struct public_key { void *key; u32 keylen; enum OID algo; void *params; u32 paramlen; bool key_is_private; const char *id_type; const char *pkey_algo; long unsigned int key_eflags; }; struct public_key_signature { struct asymmetric_key_id *auth_ids[3]; u8 *s; u8 *digest; u32 s_size; u32 digest_size; const char *pkey_algo; const char *hash_algo; const char *encoding; }; struct pvclock_vsyscall_time_info { struct pvclock_vcpu_time_info pvti; long: 64; long: 64; long: 64; long: 64; }; struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; }; struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; }; struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; long long unsigned int ino; blkcnt_t blocks; blkcnt_t nextents; }; struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3]; }; struct qdisc_dump_args { struct qdisc_walker w; struct sk_buff *skb; struct netlink_callback *cb; }; struct tc_ratespec { unsigned char cell_log; __u8 linklayer; short unsigned int overhead; short int cell_align; short unsigned int mpu; __u32 rate; }; struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; struct qdisc_rate_table *next; int refcnt; }; struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short int cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; }; struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[0]; }; struct qnode { struct mcs_spinlock mcs; }; struct qtag_prefix { __be16 eth_type; __be16 tci; }; struct queue_limits { blk_features_t features; blk_flags_t flags; long unsigned int seg_boundary_mask; long unsigned int virt_boundary_mask; unsigned int max_hw_sectors; unsigned int max_dev_sectors; unsigned int chunk_sectors; unsigned int max_sectors; unsigned int max_user_sectors; unsigned int max_segment_size; unsigned int physical_block_size; unsigned int logical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; unsigned int max_user_discard_sectors; unsigned int max_secure_erase_sectors; unsigned int max_write_zeroes_sectors; unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; unsigned int zone_write_granularity; unsigned int atomic_write_hw_max; unsigned int atomic_write_max_sectors; unsigned int atomic_write_hw_boundary; unsigned int atomic_write_boundary_sectors; unsigned int atomic_write_hw_unit_min; unsigned int atomic_write_unit_min; unsigned int atomic_write_hw_unit_max; unsigned int atomic_write_unit_max; short unsigned int max_segments; short unsigned int max_integrity_segments; short unsigned int max_discard_segments; unsigned int max_open_zones; unsigned int max_active_zones; unsigned int dma_alignment; unsigned int dma_pad_mask; struct blk_integrity integrity; }; struct queue_pages { struct list_head *pagelist; long unsigned int flags; nodemask_t *nmask; long unsigned int start; long unsigned int end; struct vm_area_struct *first; struct folio *large; long int nr_failed; }; struct queue_sysfs_entry { struct attribute attr; ssize_t (*show)(struct gendisk *, char *); int (*load_module)(struct gendisk *, const char *, size_t); ssize_t (*store)(struct gendisk *, const char *, size_t); }; struct quirk_entry { u32 nominal_freq; u32 lowest_freq; }; struct quirks_list_struct { struct hid_device_id hid_bl_item; struct list_head node; }; struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); }; struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; }; struct quota_info { unsigned int flags; struct rw_semaphore dqio_sem; struct inode *files[3]; struct mem_dqinfo info[3]; const struct quota_format_ops *ops[3]; }; struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); }; struct ra_msg { struct icmp6hdr icmph; __be32 reachable_time; __be32 retrans_timer; }; struct xa_node; struct radix_tree_iter { long unsigned int index; long unsigned int next_index; long unsigned int tags; struct xa_node *node; }; struct radix_tree_preload { local_lock_t lock; unsigned int nr; struct xa_node *nodes; }; struct ramfs_mount_opts { umode_t mode; }; struct ramfs_fs_info { struct ramfs_mount_opts mount_opts; }; struct rand_data { void *hash_state; __u64 prev_time; __u64 last_delta; __s64 last_delta2; unsigned int flags; unsigned int osr; unsigned char *mem; unsigned int memlocation; unsigned int memblocks; unsigned int memblocksize; unsigned int memaccessloops; unsigned int rct_count; unsigned int apt_cutoff; unsigned int apt_cutoff_permanent; unsigned int apt_observations; unsigned int apt_count; unsigned int apt_base; unsigned int health_failure; unsigned int apt_base_set: 1; }; struct range_trans { u32 source_type; u32 target_type; u32 target_class; }; struct rapl_model { struct perf_msr *rapl_msrs; long unsigned int events; unsigned int msr_power_unit; enum rapl_unit_quirk unit_quirk; }; struct rapl_pmu { raw_spinlock_t lock; int n_active; int cpu; struct list_head active_list; struct pmu *pmu; ktime_t timer_interval; struct hrtimer hrtimer; }; struct rapl_pmus { struct pmu pmu; unsigned int nr_rapl_pmu; struct rapl_pmu *pmus[0]; }; struct rate_sample { u64 prior_mstamp; u32 prior_delivered; u32 prior_delivered_ce; s32 delivered; s32 delivered_ce; long int interval_us; u32 snd_interval_us; u32 rcv_interval_us; long int rtt_us; int losses; u32 acked_sacked; u32 prior_in_flight; u32 last_end_seq; bool is_app_limited; bool is_retrans; bool is_ack_delayed; }; struct raw6_frag_vec { struct msghdr *msg; int hlen; char c[4]; }; struct raw6_sock { struct inet_sock inet; __u32 checksum; __u32 offset; struct icmp6_filter filter; __u32 ip6mr_table; struct ipv6_pinfo inet6; }; struct raw_data_entry { struct trace_entry ent; unsigned int id; char buf[0]; }; struct raw_frag_vec { struct msghdr *msg; union { struct icmphdr icmph; char c[1]; } hdr; int hlen; }; struct raw_hashinfo { spinlock_t lock; struct hlist_head ht[256]; }; struct raw_hwp_page { struct llist_node node; struct page *page; }; struct raw_iter_state { struct seq_net_private p; int bucket; }; struct raw_sock { struct inet_sock inet; struct icmp_filter filter; u32 ipmr_table; }; struct rb_augment_callbacks { void (*propagate)(struct rb_node *, struct rb_node *); void (*copy)(struct rb_node *, struct rb_node *); void (*rotate)(struct rb_node *, struct rb_node *); }; struct rb_event_info { u64 ts; u64 delta; u64 before; u64 after; long unsigned int length; struct buffer_page *tail_page; int add_timestamp; }; struct rb_irq_work { struct irq_work work; wait_queue_head_t waiters; wait_queue_head_t full_waiters; atomic_t seq; bool waiters_pending; bool full_waiters_pending; bool wakeup_full; }; struct rb_list { struct rb_root root; struct list_head head; spinlock_t lock; }; struct rb_time_struct { local64_t time; }; typedef struct rb_time_struct rb_time_t; struct rb_wait_data { struct rb_irq_work *irq_work; int seq; }; struct rc { long int (*fill)(void *, long unsigned int); uint8_t *ptr; uint8_t *buffer; uint8_t *buffer_end; long int buffer_size; uint32_t code; uint32_t range; uint32_t bound; void (*error)(char *); }; struct rc_dec { uint32_t range; uint32_t code; uint32_t init_bytes_left; const uint8_t *in; size_t in_pos; size_t in_limit; }; struct rc_map_table; struct rc_map { struct rc_map_table *scan; unsigned int size; unsigned int len; unsigned int alloc; enum rc_proto rc_proto; const char *name; spinlock_t lock; }; struct rc_scancode_filter { u32 data; u32 mask; }; struct rc_dev { struct device dev; bool managed_alloc; const struct attribute_group *sysfs_groups[5]; const char *device_name; const char *input_phys; struct input_id input_id; const char *driver_name; const char *map_name; struct rc_map rc_map; struct mutex lock; unsigned int minor; struct ir_raw_event_ctrl *raw; struct input_dev *input_dev; enum rc_driver_type driver_type; bool idle; bool encode_wakeup; u64 allowed_protocols; u64 enabled_protocols; u64 allowed_wakeup_protocols; enum rc_proto wakeup_protocol; struct rc_scancode_filter scancode_filter; struct rc_scancode_filter scancode_wakeup_filter; u32 scancode_mask; u32 users; void *priv; spinlock_t keylock; bool keypressed; long unsigned int keyup_jiffies; struct timer_list timer_keyup; struct timer_list timer_repeat; u32 last_keycode; enum rc_proto last_protocol; u64 last_scancode; u8 last_toggle; u32 timeout; u32 min_timeout; u32 max_timeout; u32 rx_resolution; struct device lirc_dev; struct cdev lirc_cdev; ktime_t gap_start; spinlock_t lirc_fh_lock; struct list_head lirc_fh; bool registered; int (*change_protocol)(struct rc_dev *, u64 *); int (*open)(struct rc_dev *); void (*close)(struct rc_dev *); int (*s_tx_mask)(struct rc_dev *, u32); int (*s_tx_carrier)(struct rc_dev *, u32); int (*s_tx_duty_cycle)(struct rc_dev *, u32); int (*s_rx_carrier_range)(struct rc_dev *, u32, u32); int (*tx_ir)(struct rc_dev *, unsigned int *, unsigned int); void (*s_idle)(struct rc_dev *, bool); int (*s_wideband_receiver)(struct rc_dev *, int); int (*s_carrier_report)(struct rc_dev *, int); int (*s_filter)(struct rc_dev *, struct rc_scancode_filter *); int (*s_wakeup_filter)(struct rc_dev *, struct rc_scancode_filter *); int (*s_timeout)(struct rc_dev *, unsigned int); }; struct rc_filter_attribute { struct device_attribute attr; enum rc_filter_type type; bool mask; }; struct rc_map_list { struct list_head list; struct rc_map map; }; struct rc_map_table { u64 scancode; u32 keycode; }; struct rcec_ea { u8 nextbusn; u8 lastbusn; u32 bitmap; }; struct rchan_callbacks; struct rchan_buf; struct rchan { u32 version; size_t subbuf_size; size_t n_subbufs; size_t alloc_size; const struct rchan_callbacks *cb; struct kref kref; void *private_data; size_t last_toobig; struct rchan_buf **buf; int is_global; struct list_head list; struct dentry *parent; int has_base_filename; char base_filename[255]; }; struct rchan_buf { void *start; void *data; size_t offset; size_t subbufs_produced; size_t subbufs_consumed; struct rchan *chan; wait_queue_head_t read_wait; struct irq_work wakeup_work; struct dentry *dentry; struct kref kref; struct page **page_array; unsigned int page_count; unsigned int finalized; size_t *padding; size_t prev_padding; size_t bytes_consumed; size_t early_bytes; unsigned int cpu; long: 64; long: 64; long: 64; }; struct rchan_callbacks { int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t); struct dentry * (*create_buf_file)(const char *, struct dentry *, umode_t, struct rchan_buf *, int *); int (*remove_buf_file)(struct dentry *); }; struct rchan_percpu_buf_dispatcher { struct rchan_buf *buf; struct dentry *dentry; }; struct rcu_cblist { struct callback_head *head; struct callback_head **tail; long int len; }; union rcu_noqs { struct { u8 norm; u8 exp; } b; u16 s; }; struct rcu_segcblist { struct callback_head *head; struct callback_head **tails[4]; long unsigned int gp_seq[4]; long int len; long int seglen[4]; u8 flags; }; struct rcu_snap_record { long unsigned int gp_seq; u64 cputime_irq; u64 cputime_softirq; u64 cputime_system; long unsigned int nr_hardirqs; unsigned int nr_softirqs; long long unsigned int nr_csw; long unsigned int jiffies; }; struct rcu_node; struct rcu_data { long unsigned int gp_seq; long unsigned int gp_seq_needed; union rcu_noqs cpu_no_qs; bool core_needs_qs; bool beenonline; bool gpwrap; bool cpu_started; struct rcu_node *mynode; long unsigned int grpmask; long unsigned int ticks_this_gp; struct irq_work defer_qs_iw; bool defer_qs_iw_pending; struct work_struct strict_work; struct rcu_segcblist cblist; long int qlen_last_fqs_check; long unsigned int n_cbs_invoked; long unsigned int n_force_qs_snap; long int blimit; int watching_snap; bool rcu_need_heavy_qs; bool rcu_urgent_qs; bool rcu_forced_tick; bool rcu_forced_tick_exp; long unsigned int barrier_seq_snap; struct callback_head barrier_head; int exp_watching_snap; struct task_struct *rcu_cpu_kthread_task; unsigned int rcu_cpu_kthread_status; char rcu_cpu_has_work; long unsigned int rcuc_activity; unsigned int softirq_snap; struct irq_work rcu_iw; bool rcu_iw_pending; long unsigned int rcu_iw_gp_seq; long unsigned int rcu_ofl_gp_seq; short int rcu_ofl_gp_state; long unsigned int rcu_onl_gp_seq; short int rcu_onl_gp_state; long unsigned int last_fqs_resched; long unsigned int last_sched_clock; struct rcu_snap_record snap_record; long int lazy_len; int cpu; }; struct rcu_exp_work { long unsigned int rew_s; struct kthread_work rew_work; }; struct rt_mutex { struct rt_mutex_base rtmutex; struct lockdep_map dep_map; }; struct rcu_node { raw_spinlock_t lock; long unsigned int gp_seq; long unsigned int gp_seq_needed; long unsigned int completedqs; long unsigned int qsmask; long unsigned int rcu_gp_init_mask; long unsigned int qsmaskinit; long unsigned int qsmaskinitnext; long unsigned int expmask; long unsigned int expmaskinit; long unsigned int expmaskinitnext; struct kthread_worker *exp_kworker; long unsigned int cbovldmask; long unsigned int ffmask; long unsigned int grpmask; int grplo; int grphi; u8 grpnum; u8 level; bool wait_blkd_tasks; struct rcu_node *parent; struct list_head blkd_tasks; struct list_head *gp_tasks; struct list_head *exp_tasks; struct list_head *boost_tasks; struct rt_mutex boost_mtx; long unsigned int boost_time; struct mutex kthread_mutex; struct task_struct *boost_kthread_task; unsigned int boost_kthread_status; long unsigned int n_boosts; long: 64; long: 64; long: 64; long: 64; raw_spinlock_t fqslock; spinlock_t exp_lock; long unsigned int exp_seq_rq; wait_queue_head_t exp_wq[4]; struct rcu_exp_work rew; bool exp_need_flush; raw_spinlock_t exp_poll_lock; long unsigned int exp_seq_poll_rq; struct work_struct exp_poll_wq; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; union rcu_special { struct { u8 blocked; u8 need_qs; u8 exp_hint; u8 need_mb; } b; u32 s; }; struct rcu_stall_chk_rdr { int nesting; union rcu_special rs; bool on_blkd_list; }; struct sr_wait_node { atomic_t inuse; struct llist_node node; }; struct rcu_state { struct rcu_node node[9]; struct rcu_node *level[3]; int ncpus; int n_online_cpus; long: 64; long: 64; long: 64; long: 64; long unsigned int gp_seq; long unsigned int gp_max; struct task_struct *gp_kthread; struct swait_queue_head gp_wq; short int gp_flags; short int gp_state; long unsigned int gp_wake_time; long unsigned int gp_wake_seq; long unsigned int gp_seq_polled; long unsigned int gp_seq_polled_snap; long unsigned int gp_seq_polled_exp_snap; struct mutex barrier_mutex; atomic_t barrier_cpu_count; struct completion barrier_completion; long unsigned int barrier_sequence; raw_spinlock_t barrier_lock; struct mutex exp_mutex; struct mutex exp_wake_mutex; long unsigned int expedited_sequence; atomic_t expedited_need_qs; struct swait_queue_head expedited_wq; int ncpus_snap; u8 cbovld; u8 cbovldnext; long unsigned int jiffies_force_qs; long unsigned int jiffies_kick_kthreads; long unsigned int n_force_qs; long unsigned int gp_start; long unsigned int gp_end; long unsigned int gp_activity; long unsigned int gp_req_activity; long unsigned int jiffies_stall; int nr_fqs_jiffies_stall; long unsigned int jiffies_resched; long unsigned int n_force_qs_gpstart; const char *name; char abbr; long: 0; arch_spinlock_t ofl_lock; struct llist_head srs_next; struct llist_node *srs_wait_tail; struct llist_node *srs_done_tail; struct sr_wait_node srs_wait_nodes[5]; struct work_struct srs_cleanup_work; atomic_t srs_cleanups_pending; }; struct rcu_synchronize { struct callback_head head; struct completion completion; }; struct rcu_tasks; typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *); typedef void (*pregp_func_t)(struct list_head *); typedef void (*pertask_func_t)(struct task_struct *, struct list_head *); typedef void (*postscan_func_t)(struct list_head *); typedef void (*holdouts_func_t)(struct list_head *, bool, bool *); typedef void (*postgp_func_t)(struct rcu_tasks *); typedef void (*rcu_callback_t)(struct callback_head *); typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); struct rcu_tasks_percpu; struct rcu_tasks { struct rcuwait cbs_wait; raw_spinlock_t cbs_gbl_lock; struct mutex tasks_gp_mutex; int gp_state; int gp_sleep; int init_fract; long unsigned int gp_jiffies; long unsigned int gp_start; long unsigned int tasks_gp_seq; long unsigned int n_ipis; long unsigned int n_ipis_fails; struct task_struct *kthread_ptr; long unsigned int lazy_jiffies; rcu_tasks_gp_func_t gp_func; pregp_func_t pregp_func; pertask_func_t pertask_func; postscan_func_t postscan_func; holdouts_func_t holdouts_func; postgp_func_t postgp_func; call_rcu_func_t call_func; unsigned int wait_state; struct rcu_tasks_percpu *rtpcpu; struct rcu_tasks_percpu **rtpcp_array; int percpu_enqueue_shift; int percpu_enqueue_lim; int percpu_dequeue_lim; long unsigned int percpu_dequeue_gpseq; struct mutex barrier_q_mutex; atomic_t barrier_q_count; struct completion barrier_q_completion; long unsigned int barrier_q_seq; long unsigned int barrier_q_start; char *name; char *kname; }; struct rcu_tasks_percpu { struct rcu_segcblist cblist; raw_spinlock_t lock; long unsigned int rtp_jiffies; long unsigned int rtp_n_lock_retries; struct timer_list lazy_timer; unsigned int urgent_gp; struct work_struct rtp_work; struct irq_work rtp_irq_work; struct callback_head barrier_q_head; struct list_head rtp_blkd_tasks; struct list_head rtp_exit_list; int cpu; int index; struct rcu_tasks *rtpp; }; struct rcu_tasks_test_desc { struct callback_head rh; const char *name; bool notrun; long unsigned int runstart; }; struct rd_msg { struct icmp6hdr icmph; struct in6_addr target; struct in6_addr dest; __u8 opt[0]; }; struct rdma_ah_init_attr { struct rdma_ah_attr *ah_attr; u32 flags; struct net_device *xmit_slave; }; struct rdma_counter { struct rdma_restrack_entry res; struct ib_device *device; uint32_t id; struct kref kref; struct rdma_counter_mode mode; struct mutex lock; struct rdma_hw_stats *stats; u32 port; }; struct rdma_stat_desc; struct rdma_hw_stats { struct mutex lock; long unsigned int timestamp; long unsigned int lifespan; const struct rdma_stat_desc *descs; long unsigned int *is_disabled; int num_counters; u64 value[0]; }; struct rdma_link_ops { struct list_head list; const char *type; int (*newlink)(const char *, struct net_device *); }; struct rdma_netdev_alloc_params { size_t sizeof_priv; unsigned int txqs; unsigned int rxqs; void *param; int (*initialize_rdma_netdev)(struct ib_device *, u32, struct net_device *, void *); }; struct rdma_stat_desc { const char *name; unsigned int flags; const void *priv; }; struct rdma_user_mmap_entry { struct kref ref; struct ib_ucontext *ucontext; long unsigned int start_pgoff; size_t npages; bool driver_removed; }; struct readahead_control { struct file *file; struct address_space *mapping; struct file_ra_state *ra; long unsigned int _index; unsigned int _nr_pages; unsigned int _batch_count; bool _workingset; long unsigned int _pflags; }; struct readdir_callback { struct dir_context ctx; struct old_linux_dirent *dirent; int result; }; struct real_mode_header { u32 text_start; u32 ro_end; u32 trampoline_start; u32 trampoline_header; u32 trampoline_start64; u32 trampoline_pgd; u32 wakeup_start; u32 wakeup_header; u32 machine_real_restart_asm; u32 machine_real_restart_seg; }; struct reciprocal_value_adv { u32 m; u8 sh; u8 exp; bool is_wide_m; }; struct reclaim_stat { unsigned int nr_dirty; unsigned int nr_unqueued_dirty; unsigned int nr_congested; unsigned int nr_writeback; unsigned int nr_immediate; unsigned int nr_pageout; unsigned int nr_activate[2]; unsigned int nr_ref_keep; unsigned int nr_unmap_fail; unsigned int nr_lazyfree_fail; unsigned int nr_demoted; }; struct reclaim_state { long unsigned int reclaimed; }; struct recovery_info { tid_t start_transaction; tid_t end_transaction; long unsigned int head_block; int nr_replays; int nr_revokes; int nr_revoke_hits; }; typedef int (*regex_match_func)(char *, struct regex *, int); struct regex { char pattern[256]; int len; int field_len; regex_match_func match; }; struct region { unsigned int start; unsigned int off; unsigned int group_len; unsigned int end; unsigned int nbits; }; struct region_devres { struct resource *parent; resource_size_t start; resource_size_t n; }; typedef int (*remote_function_f)(void *); struct remote_function_call { struct task_struct *p; remote_function_f func; void *info; int ret; }; struct remote_output { struct perf_buffer *rb; int err; }; struct renamedata { struct mnt_idmap *old_mnt_idmap; struct inode *old_dir; struct dentry *old_dentry; struct mnt_idmap *new_mnt_idmap; struct inode *new_dir; struct dentry *new_dentry; struct inode **delegated_inode; unsigned int flags; }; struct reply_func { int type; int (*cb)(struct net_device *, struct nlmsghdr *, u32, struct nlattr **, struct sk_buff *); }; struct req { struct req *next; struct completion done; int err; const char *name; umode_t mode; kuid_t uid; kgid_t gid; struct device *dev; }; struct req_iterator { struct bvec_iter iter; struct bio *bio; }; typedef enum rq_end_io_ret rq_end_io_fn(struct request *, blk_status_t); struct request { struct request_queue *q; struct blk_mq_ctx *mq_ctx; struct blk_mq_hw_ctx *mq_hctx; blk_opf_t cmd_flags; req_flags_t rq_flags; int tag; int internal_tag; unsigned int timeout; unsigned int __data_len; sector_t __sector; struct bio *bio; struct bio *biotail; union { struct list_head queuelist; struct request *rq_next; }; struct block_device *part; u64 start_time_ns; u64 io_start_time_ns; short unsigned int stats_sectors; short unsigned int nr_phys_segments; short unsigned int nr_integrity_segments; enum rw_hint write_hint; short unsigned int ioprio; enum mq_rq_state state; atomic_t ref; long unsigned int deadline; union { struct hlist_node hash; struct llist_node ipi_list; }; union { struct rb_node rb_node; struct bio_vec special_vec; }; struct { struct io_cq *icq; void *priv[2]; } elv; struct { unsigned int seq; rq_end_io_fn *saved_end_io; } flush; u64 fifo_time; rq_end_io_fn *end_io; void *end_io_data; }; struct request_key_auth { struct callback_head rcu; struct key *target_key; struct key *dest_keyring; const struct cred *cred; void *callout_info; size_t callout_len; pid_t pid; char op[8]; }; struct throtl_data; struct request_queue { void *queuedata; struct elevator_queue *elevator; const struct blk_mq_ops *mq_ops; struct blk_mq_ctx *queue_ctx; long unsigned int queue_flags; unsigned int rq_timeout; unsigned int queue_depth; refcount_t refs; unsigned int nr_hw_queues; struct xarray hctx_table; struct percpu_ref q_usage_counter; struct request *last_merge; spinlock_t queue_lock; int quiesce_depth; struct gendisk *disk; struct kobject *mq_kobj; struct queue_limits limits; struct device *dev; enum rpm_status rpm_status; atomic_t pm_only; struct blk_queue_stats *stats; struct rq_qos *rq_qos; struct mutex rq_qos_mutex; int id; long unsigned int nr_requests; struct timer_list timeout; struct work_struct timeout_work; atomic_t nr_active_requests_shared_tags; struct blk_mq_tags *sched_shared_tags; struct list_head icq_list; long unsigned int blkcg_pols[1]; struct blkcg_gq *root_blkg; struct list_head blkg_list; struct mutex blkcg_mutex; int node; spinlock_t requeue_lock; struct list_head requeue_list; struct delayed_work requeue_work; struct blk_trace *blk_trace; struct blk_flush_queue *fq; struct list_head flush_list; struct mutex sysfs_lock; struct mutex sysfs_dir_lock; struct mutex limits_lock; struct list_head unused_hctx_list; spinlock_t unused_hctx_lock; int mq_freeze_depth; struct throtl_data *td; struct callback_head callback_head; wait_queue_head_t mq_freeze_wq; struct mutex mq_freeze_lock; struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct dentry *debugfs_dir; struct dentry *sched_debugfs_dir; struct dentry *rqos_debugfs_dir; struct mutex debugfs_mutex; bool mq_sysfs_init_done; }; struct request_sock__safe_rcu_or_null { struct sock *sk; }; struct request_sock_ops { int family; unsigned int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(const struct sock *, struct request_sock *); void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); void (*send_reset)(const struct sock *, struct sk_buff *, enum sk_rst_reason); void (*destructor)(struct request_sock *); void (*syn_ack_timeout)(const struct request_sock *); }; struct res_proc_context { struct list_head *list; int (*preproc)(struct acpi_resource *, void *); void *preproc_data; int count; int error; }; struct reserve_mem_table { char name[16]; phys_addr_t start; phys_addr_t size; }; typedef resource_size_t (*resource_alignf)(void *, const struct resource *, resource_size_t, resource_size_t); struct resource_constraint { resource_size_t min; resource_size_t max; resource_size_t align; resource_alignf alignf; void *alignf_data; }; struct resource_entry { struct list_head node; struct resource *res; resource_size_t offset; struct resource __res; }; struct resource_win { struct resource res; resource_size_t offset; }; struct restart_block { long unsigned int arch_data; long int (*fn)(struct restart_block *); union { struct { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } futex; struct { clockid_t clockid; enum timespec_type type; union { struct __kernel_timespec *rmtp; struct old_timespec32 *compat_rmtp; }; u64 expires; } nanosleep; struct { struct pollfd *ufds; int nfds; int has_timeout; long unsigned int tv_sec; long unsigned int tv_nsec; } poll; }; }; struct resv_map { struct kref refs; spinlock_t lock; struct list_head regions; long int adds_in_progress; struct list_head region_cache; long int region_cache_count; struct rw_semaphore rw_sema; struct page_counter *reservation_counter; long unsigned int pages_per_hpage; struct cgroup_subsys_state *css; }; struct rethook { void *data; void (*handler)(struct rethook_node *, void *, long unsigned int, struct pt_regs *); struct objpool_head pool; struct callback_head rcu; }; struct return_instance { struct uprobe *uprobe; long unsigned int func; long unsigned int stack; long unsigned int orig_ret_vaddr; bool chained; struct return_instance *next; }; struct reuseport_array { struct bpf_map map; struct sock *ptrs[0]; }; struct rgb { u8 r; u8 g; u8 b; }; struct rhash_lock_head {}; struct rhashtable_compare_arg { struct rhashtable *ht; const void *key; }; struct ring_buffer_event { u32 type_len: 5; u32 time_delta: 27; u32 array[0]; }; struct ring_buffer_per_cpu; struct ring_buffer_iter { struct ring_buffer_per_cpu *cpu_buffer; long unsigned int head; long unsigned int next_event; struct buffer_page *head_page; struct buffer_page *cache_reader_page; long unsigned int cache_read; long unsigned int cache_pages_removed; u64 read_stamp; u64 page_stamp; struct ring_buffer_event *event; size_t event_size; int missed_events; }; struct ring_buffer_meta { int magic; int struct_size; long unsigned int text_addr; long unsigned int data_addr; long unsigned int first_buffer; long unsigned int head_buffer; long unsigned int commit_buffer; __u32 subbuf_size; __u32 nr_subbufs; int buffers[0]; }; struct trace_buffer_meta; struct ring_buffer_per_cpu { int cpu; atomic_t record_disabled; atomic_t resize_disabled; struct trace_buffer *buffer; raw_spinlock_t reader_lock; arch_spinlock_t lock; struct lock_class_key lock_key; struct buffer_data_page *free_page; long unsigned int nr_pages; unsigned int current_context; struct list_head *pages; struct buffer_page *head_page; struct buffer_page *tail_page; struct buffer_page *commit_page; struct buffer_page *reader_page; long unsigned int lost_events; long unsigned int last_overrun; long unsigned int nest; local_t entries_bytes; local_t entries; local_t overrun; local_t commit_overrun; local_t dropped_events; local_t committing; local_t commits; local_t pages_touched; local_t pages_lost; local_t pages_read; long int last_pages_touch; size_t shortest_full; long unsigned int read; long unsigned int read_bytes; rb_time_t write_stamp; rb_time_t before_stamp; u64 event_stamp[5]; u64 read_stamp; long unsigned int pages_removed; unsigned int mapped; unsigned int user_mapped; struct mutex mapping_lock; long unsigned int *subbuf_ids; struct trace_buffer_meta *meta_page; struct ring_buffer_meta *ring_meta; long int nr_pages_to_update; struct list_head new_pages; struct work_struct update_pages_work; struct completion update_done; struct rb_irq_work irq_work; }; struct rings_reply_data { struct ethnl_reply_data base; struct ethtool_ringparam ringparam; struct kernel_ethtool_ringparam kernel_ringparam; u32 supported_ring_params; }; struct rlb_client_info { __be32 ip_src; __be32 ip_dst; u8 mac_src[6]; u8 mac_dst[6]; u32 used_next; u32 used_prev; u32 src_next; u32 src_prev; u32 src_first; u8 assigned; u8 ntt; struct slave *slave; short unsigned int vlan_id; }; struct rlimit64 { __u64 rlim_cur; __u64 rlim_max; }; struct rmap_walk_arg { struct folio *folio; bool map_unused_to_zeropage; }; struct rmap_walk_control { void *arg; bool try_lock; bool contended; bool (*rmap_one)(struct folio *, struct vm_area_struct *, long unsigned int, void *); int (*done)(struct folio *); struct anon_vma * (*anon_lock)(struct folio *, struct rmap_walk_control *); bool (*invalid_vma)(struct vm_area_struct *, void *); }; struct rnd_state { __u32 s1; __u32 s2; __u32 s3; __u32 s4; }; struct rng_alg { int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); int (*seed)(struct crypto_rng *, const u8 *, unsigned int); void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); unsigned int seedsize; struct crypto_alg base; }; struct robust_list { struct robust_list *next; }; struct robust_list_head { struct robust_list list; long int futex_offset; struct robust_list *list_op_pending; }; struct role_allow { u32 role; u32 new_role; struct role_allow *next; }; struct role_datum { u32 value; u32 bounds; struct ebitmap dominates; struct ebitmap types; }; struct role_trans_datum { u32 new_role; }; struct role_trans_key { u32 role; u32 type; u32 tclass; }; struct romfs_super_block { __be32 word0; __be32 word1; __be32 size; __be32 checksum; char name[0]; }; struct root_device { struct device dev; struct module *owner; }; struct root_domain { atomic_t refcount; atomic_t rto_count; struct callback_head rcu; cpumask_var_t span; cpumask_var_t online; bool overloaded; bool overutilized; cpumask_var_t dlo_mask; atomic_t dlo_count; struct dl_bw dl_bw; struct cpudl cpudl; u64 visit_gen; struct irq_work rto_push_work; raw_spinlock_t rto_lock; int rto_loop; int rto_cpu; atomic_t rto_loop_next; atomic_t rto_loop_start; cpumask_var_t rto_mask; struct cpupri cpupri; struct perf_domain *pd; }; struct route_info { __u8 type; __u8 length; __u8 prefix_len; __u8 reserved_l: 3; __u8 route_pref: 2; __u8 reserved_h: 3; __be32 lifetime; __u8 prefix[0]; }; struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; }; struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0]; }; struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0]; }; struct rps_sock_flow_table { u32 mask; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 ents[0]; }; struct rt_prio_array { long unsigned int bitmap[2]; struct list_head queue[100]; }; struct rt_rq { struct rt_prio_array active; unsigned int rt_nr_running; unsigned int rr_nr_running; struct { int curr; int next; } highest_prio; bool overloaded; struct plist_head pushable_tasks; int rt_queued; }; struct sched_dl_entity; typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); typedef struct task_struct * (*dl_server_pick_f)(struct sched_dl_entity *); struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; u64 dl_density; s64 runtime; u64 deadline; unsigned int flags; unsigned int dl_throttled: 1; unsigned int dl_yielded: 1; unsigned int dl_non_contending: 1; unsigned int dl_overrun: 1; unsigned int dl_server: 1; unsigned int dl_defer: 1; unsigned int dl_defer_armed: 1; unsigned int dl_defer_running: 1; struct hrtimer dl_timer; struct hrtimer inactive_timer; struct rq *rq; dl_server_has_tasks_f server_has_tasks; dl_server_pick_f server_pick_task; struct sched_dl_entity *pi_se; }; struct sched_info { long unsigned int pcount; long long unsigned int run_delay; long long unsigned int last_arrival; long long unsigned int last_queued; }; struct rq { raw_spinlock_t __lock; unsigned int nr_running; unsigned int nr_numa_running; unsigned int nr_preferred_running; unsigned int numa_migrate_on; long unsigned int last_blocked_load_update_tick; unsigned int has_blocked_load; call_single_data_t nohz_csd; unsigned int nohz_tick_stopped; atomic_t nohz_flags; unsigned int ttwu_pending; u64 nr_switches; long: 64; long: 64; long: 64; long: 64; long: 64; struct cfs_rq cfs; struct rt_rq rt; struct dl_rq dl; struct sched_dl_entity fair_server; struct list_head leaf_cfs_rq_list; struct list_head *tmp_alone_branch; unsigned int nr_uninterruptible; struct task_struct *curr; struct sched_dl_entity *dl_server; struct task_struct *idle; struct task_struct *stop; long unsigned int next_balance; struct mm_struct *prev_mm; unsigned int clock_update_flags; u64 clock; u64 clock_task; u64 clock_pelt; long unsigned int lost_idle_time; u64 clock_pelt_idle; u64 clock_idle; atomic_t nr_iowait; u64 last_seen_need_resched_ns; int ticks_without_resched; int membarrier_state; struct root_domain *rd; struct sched_domain *sd; long unsigned int cpu_capacity; struct balance_callback *balance_callback; unsigned char nohz_idle_balance; unsigned char idle_balance; long unsigned int misfit_task_load; int active_balance; int push_cpu; struct cpu_stop_work active_balance_work; int cpu; int online; struct list_head cfs_tasks; struct sched_avg avg_rt; struct sched_avg avg_dl; u64 idle_stamp; u64 avg_idle; u64 max_idle_balance_cost; struct rcuwait hotplug_wait; long unsigned int calc_load_update; long int calc_load_active; long: 64; long: 64; call_single_data_t hrtick_csd; struct hrtimer hrtick_timer; ktime_t hrtick_time; struct sched_info rq_sched_info; long long unsigned int rq_cpu_time; unsigned int yld_count; unsigned int sched_count; unsigned int sched_goidle; unsigned int ttwu_count; unsigned int ttwu_local; struct cpuidle_state *idle_state; unsigned int nr_pinned; unsigned int push_busy; struct cpu_stop_work push_work; cpumask_var_t scratch_mask; long: 64; call_single_data_t cfsb_csd; struct list_head cfsb_csd_list; long: 64; long: 64; }; struct rq_depth { unsigned int max_depth; int scale_step; bool scaled_max; unsigned int queue_depth; unsigned int default_depth; }; struct rq_iter_data { struct blk_mq_hw_ctx *hctx; bool has_rq; }; struct rq_map_data { struct page **pages; long unsigned int offset; short unsigned int page_order; short unsigned int nr_entries; bool null_mapped; bool from_user; }; struct rq_qos_ops { void (*throttle)(struct rq_qos *, struct bio *); void (*track)(struct rq_qos *, struct request *, struct bio *); void (*merge)(struct rq_qos *, struct request *, struct bio *); void (*issue)(struct rq_qos *, struct request *); void (*requeue)(struct rq_qos *, struct request *); void (*done)(struct rq_qos *, struct request *); void (*done_bio)(struct rq_qos *, struct bio *); void (*cleanup)(struct rq_qos *, struct bio *); void (*queue_depth_changed)(struct rq_qos *); void (*exit)(struct rq_qos *); const struct blk_mq_debugfs_attr *debugfs_attrs; }; typedef bool acquire_inflight_cb_t(struct rq_wait *, void *); struct rq_qos_wait_data { struct wait_queue_entry wq; struct task_struct *task; struct rq_wait *rqw; acquire_inflight_cb_t *cb; void *private_data; bool got_token; }; struct rs_msg { struct icmp6hdr icmph; __u8 opt[0]; }; struct rsa_asn1_template { const char *name; const u8 *data; size_t size; }; struct rsa_key { const u8 *n; const u8 *e; const u8 *d; const u8 *p; const u8 *q; const u8 *dp; const u8 *dq; const u8 *qinv; size_t n_sz; size_t e_sz; size_t d_sz; size_t p_sz; size_t q_sz; size_t dp_sz; size_t dq_sz; size_t qinv_sz; }; struct rsa_mpi_key { MPI n; MPI e; MPI d; MPI p; MPI q; MPI dp; MPI dq; MPI qinv; }; struct rseq { __u32 cpu_id_start; __u32 cpu_id; __u64 rseq_cs; __u32 flags; __u32 node_id; __u32 mm_cid; char end[0]; }; struct rseq_cs { __u32 version; __u32 flags; __u64 start_ip; __u64 post_commit_offset; __u64 abort_ip; }; struct rss_nl_dump_ctx { long unsigned int ifindex; long unsigned int ctx_idx; unsigned int match_ifindex; unsigned int start_ctx; }; struct rss_reply_data { struct ethnl_reply_data base; bool no_key_fields; u32 indir_size; u32 hkey_size; u32 hfunc; u32 input_xfrm; u32 *indir_table; u8 *hkey; }; struct rss_req_info { struct ethnl_req_info base; u32 rss_context; }; struct rsvd_count { int ndelayed; bool first_do_lblk_found; ext4_lblk_t first_do_lblk; ext4_lblk_t last_do_lblk; struct extent_status *left_es; bool partial; ext4_lblk_t lclu; }; struct rt0_hdr { struct ipv6_rt_hdr rt_hdr; __u32 reserved; struct in6_addr addr[0]; }; struct rt2_hdr { struct ipv6_rt_hdr rt_hdr; __u32 reserved; struct in6_addr addr; }; struct rt6_exception { struct hlist_node hlist; struct rt6_info *rt6i; long unsigned int stamp; struct callback_head rcu; }; struct rt6_exception_bucket { struct hlist_head chain; int depth; }; struct rt6_info { struct dst_entry dst; struct fib6_info *from; int sernum; struct rt6key rt6i_dst; struct rt6key rt6i_src; struct in6_addr rt6i_gateway; struct inet6_dev *rt6i_idev; u32 rt6i_flags; short unsigned int rt6i_nfheader_len; }; struct rt6_mtu_change_arg { struct net_device *dev; unsigned int mtu; struct fib6_info *f6i; }; struct rt6_nh { struct fib6_info *fib6_info; struct fib6_config r_cfg; struct list_head next; }; struct rt6_rtnl_dump_arg { struct sk_buff *skb; struct netlink_callback *cb; struct net *net; struct fib_dump_filter filter; }; struct rt6_statistics { __u32 fib_nodes; __u32 fib_route_nodes; __u32 fib_rt_entries; __u32 fib_rt_cache; __u32 fib_discarded_routes; atomic_t fib_rt_alloc; }; struct rt_cache_stat { unsigned int in_slow_tot; unsigned int in_slow_mc; unsigned int in_no_route; unsigned int in_brd; unsigned int in_martian_dst; unsigned int in_martian_src; unsigned int out_slow_tot; unsigned int out_slow_mc; }; struct rt_waiter_node { struct rb_node entry; int prio; u64 deadline; }; struct rt_mutex_waiter { struct rt_waiter_node tree; struct rt_waiter_node pi_tree; struct task_struct *task; struct rt_mutex_base *lock; unsigned int wake_state; struct ww_acquire_ctx *ww_ctx; }; typedef struct rt_rq *rt_rq_iter_t; struct sigaltstack { void *ss_sp; int ss_flags; __kernel_size_t ss_size; }; typedef struct sigaltstack stack_t; struct sigcontext_64 { __u64 r8; __u64 r9; __u64 r10; __u64 r11; __u64 r12; __u64 r13; __u64 r14; __u64 r15; __u64 di; __u64 si; __u64 bp; __u64 bx; __u64 dx; __u64 ax; __u64 cx; __u64 sp; __u64 ip; __u64 flags; __u16 cs; __u16 gs; __u16 fs; __u16 ss; __u64 err; __u64 trapno; __u64 oldmask; __u64 cr2; __u64 fpstate; __u64 reserved1[8]; }; struct ucontext { long unsigned int uc_flags; struct ucontext *uc_link; stack_t uc_stack; struct sigcontext_64 uc_mcontext; sigset_t uc_sigmask; }; struct rt_sigframe { char *pretcode; struct ucontext uc; struct siginfo info; }; struct wake_q_node; struct wake_q_head { struct wake_q_node *first; struct wake_q_node **lastp; }; struct rt_wake_q_head { struct wake_q_head head; struct task_struct *rtlock_task; }; struct rta_cacheinfo { __u32 rta_clntref; __u32 rta_lastuse; __s32 rta_expires; __u32 rta_error; __u32 rta_used; __u32 rta_id; __u32 rta_ts; __u32 rta_tsage; }; struct rta_mfc_stats { __u64 mfcs_packets; __u64 mfcs_bytes; __u64 mfcs_wrong_if; }; struct rtable { struct dst_entry dst; int rt_genid; unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; __u8 rt_uses_gateway; int rt_iif; u8 rt_gw_family; union { __be32 rt_gw4; struct in6_addr rt_gw6; }; u32 rt_mtu_locked: 1; u32 rt_pmtu: 31; }; struct rtc_wkalrm; struct rtc_param; struct rtc_class_ops { int (*ioctl)(struct device *, unsigned int, long unsigned int); int (*read_time)(struct device *, struct rtc_time *); int (*set_time)(struct device *, struct rtc_time *); int (*read_alarm)(struct device *, struct rtc_wkalrm *); int (*set_alarm)(struct device *, struct rtc_wkalrm *); int (*proc)(struct device *, struct seq_file *); int (*alarm_irq_enable)(struct device *, unsigned int); int (*read_offset)(struct device *, long int *); int (*set_offset)(struct device *, long int); int (*param_get)(struct device *, struct rtc_param *); int (*param_set)(struct device *, struct rtc_param *); }; struct rtc_device; struct rtc_timer { struct timerqueue_node node; ktime_t period; void (*func)(struct rtc_device *); struct rtc_device *rtc; int enabled; }; struct rtc_device { struct device dev; struct module *owner; int id; const struct rtc_class_ops *ops; struct mutex ops_lock; struct cdev char_dev; long unsigned int flags; long unsigned int irq_data; spinlock_t irq_lock; wait_queue_head_t irq_queue; struct fasync_struct *async_queue; int irq_freq; int max_user_freq; struct timerqueue_head timerqueue; struct rtc_timer aie_timer; struct rtc_timer uie_rtctimer; struct hrtimer pie_timer; int pie_enabled; struct work_struct irqwork; long unsigned int set_offset_nsec; long unsigned int features[1]; time64_t range_min; timeu64_t range_max; timeu64_t alarm_offset_max; time64_t start_secs; time64_t offset_secs; bool set_start_time; }; struct rtc_param { __u64 param; union { __u64 uvalue; __s64 svalue; __u64 ptr; }; __u32 index; __u32 __pad; }; struct rtc_time { int tm_sec; int tm_min; int tm_hour; int tm_mday; int tm_mon; int tm_year; int tm_wday; int tm_yday; int tm_isdst; }; struct rtc_wkalrm { unsigned char enabled; unsigned char pending; struct rtc_time time; }; struct rtentry { long unsigned int rt_pad1; struct sockaddr rt_dst; struct sockaddr rt_gateway; struct sockaddr rt_genmask; short unsigned int rt_flags; short int rt_pad2; long unsigned int rt_pad3; void *rt_pad4; short int rt_metric; char *rt_dev; long unsigned int rt_mtu; long unsigned int rt_window; short unsigned int rt_irtt; }; struct rtgenmsg { unsigned char rtgen_family; }; struct rtm_dump_res_bucket_ctx; struct rtm_dump_nexthop_bucket_data { struct rtm_dump_res_bucket_ctx *ctx; struct nh_dump_filter filter; }; struct rtm_dump_nh_ctx { u32 idx; }; struct rtm_dump_res_bucket_ctx { struct rtm_dump_nh_ctx nh; u16 bucket_index; }; struct rtmsg { unsigned char rtm_family; unsigned char rtm_dst_len; unsigned char rtm_src_len; unsigned char rtm_tos; unsigned char rtm_table; unsigned char rtm_protocol; unsigned char rtm_scope; unsigned char rtm_type; unsigned int rtm_flags; }; struct rtnexthop { short unsigned int rtnh_len; unsigned char rtnh_flags; unsigned char rtnh_hops; int rtnh_ifindex; }; struct rtnl_af_ops { struct list_head list; int family; int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); size_t (*get_link_af_size)(const struct net_device *, u32); int (*validate_link_af)(const struct net_device *, const struct nlattr *, struct netlink_ext_ack *); int (*set_link_af)(struct net_device *, const struct nlattr *, struct netlink_ext_ack *); int (*fill_stats_af)(struct sk_buff *, const struct net_device *); size_t (*get_stats_af_size)(const struct net_device *); }; typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); struct rtnl_link { rtnl_doit_func doit; rtnl_dumpit_func dumpit; struct module *owner; unsigned int flags; struct callback_head rcu; }; struct rtnl_link_ifmap { __u64 mem_start; __u64 mem_end; __u64 base_addr; __u16 irq; __u8 dma; __u8 port; }; struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; struct net_device * (*alloc)(struct nlattr **, const char *, unsigned char, unsigned int, unsigned int); void (*setup)(struct net_device *); bool netns_refund; unsigned int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(void); unsigned int (*get_num_rx_queues)(void); unsigned int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); struct net * (*get_link_net)(const struct net_device *); size_t (*get_linkxstats_size)(const struct net_device *, int); int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); }; struct rtnl_link_stats { __u32 rx_packets; __u32 tx_packets; __u32 rx_bytes; __u32 tx_bytes; __u32 rx_errors; __u32 tx_errors; __u32 rx_dropped; __u32 tx_dropped; __u32 multicast; __u32 collisions; __u32 rx_length_errors; __u32 rx_over_errors; __u32 rx_crc_errors; __u32 rx_frame_errors; __u32 rx_fifo_errors; __u32 rx_missed_errors; __u32 tx_aborted_errors; __u32 tx_carrier_errors; __u32 tx_fifo_errors; __u32 tx_heartbeat_errors; __u32 tx_window_errors; __u32 rx_compressed; __u32 tx_compressed; __u32 rx_nohandler; }; struct rtnl_mdb_dump_ctx { long int idx; }; struct rtnl_net_dump_cb { struct net *tgt_net; struct net *ref_net; struct sk_buff *skb; struct net_fill_args fillargs; int idx; int s_idx; }; struct rtnl_newlink_tbs { struct nlattr *tb[66]; struct nlattr *attr[51]; struct nlattr *slave_attr[45]; }; struct rtnl_offload_xstats_request_used { bool request; bool used; }; struct rtnl_stats_dump_filters { u32 mask[6]; }; struct rtvia { __kernel_sa_family_t rtvia_family; __u8 rtvia_addr[0]; }; struct rusage { struct __kernel_old_timeval ru_utime; struct __kernel_old_timeval ru_stime; __kernel_long_t ru_maxrss; __kernel_long_t ru_ixrss; __kernel_long_t ru_idrss; __kernel_long_t ru_isrss; __kernel_long_t ru_minflt; __kernel_long_t ru_majflt; __kernel_long_t ru_nswap; __kernel_long_t ru_inblock; __kernel_long_t ru_oublock; __kernel_long_t ru_msgsnd; __kernel_long_t ru_msgrcv; __kernel_long_t ru_nsignals; __kernel_long_t ru_nvcsw; __kernel_long_t ru_nivcsw; }; typedef struct rw_semaphore *class_rwsem_read_t; struct rwsem_waiter { struct list_head list; struct task_struct *task; enum rwsem_waiter_type type; long unsigned int timeout; bool handoff_set; }; struct rx_queue_attribute { struct attribute attr; ssize_t (*show)(struct netdev_rx_queue *, char *); ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); }; struct s_data { struct sched_domain **sd; struct root_domain *rd; }; struct sadb_alg { __u8 sadb_alg_id; __u8 sadb_alg_ivlen; __u16 sadb_alg_minbits; __u16 sadb_alg_maxbits; __u16 sadb_alg_reserved; }; struct saved_alias { struct kmem_cache *s; const char *name; struct saved_alias *next; }; struct saved_cmdlines_buffer { unsigned int map_pid_to_cmdline[32769]; unsigned int *map_cmdline_to_pid; unsigned int cmdline_num; int cmdline_idx; char saved_cmdlines[0]; }; struct saved_msr; struct saved_msrs { unsigned int num; struct saved_msr *array; }; struct saved_context { struct pt_regs regs; u16 ds; u16 es; u16 fs; u16 gs; long unsigned int kernelmode_gs_base; long unsigned int usermode_gs_base; long unsigned int fs_base; long unsigned int cr0; long unsigned int cr2; long unsigned int cr3; long unsigned int cr4; u64 misc_enable; struct saved_msrs saved_msrs; long unsigned int efer; u16 gdt_pad; struct desc_ptr gdt_desc; u16 idt_pad; struct desc_ptr idt; u16 ldt; u16 tss; long unsigned int tr; long unsigned int safety; long unsigned int return_address; bool misc_enable_saved; } __attribute__((packed)); struct saved_msr { bool valid; struct msr_info info; }; struct saved_syn { u32 mac_hdrlen; u32 network_hdrlen; u32 tcp_hdrlen; u8 data[0]; }; struct sb_writers { short unsigned int frozen; int freeze_kcount; int freeze_ucount; struct percpu_rw_semaphore rw_sem[3]; }; struct sbitmap_word { long unsigned int word; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long unsigned int cleared; raw_spinlock_t swap_lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct sbq_wait_state { wait_queue_head_t wait; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct scan_control { long unsigned int nr_to_reclaim; nodemask_t *nodemask; struct mem_cgroup *target_mem_cgroup; long unsigned int anon_cost; long unsigned int file_cost; int *proactive_swappiness; unsigned int may_deactivate: 2; unsigned int force_deactivate: 1; unsigned int skipped_deactivate: 1; unsigned int may_writepage: 1; unsigned int may_unmap: 1; unsigned int may_swap: 1; unsigned int no_cache_trim_mode: 1; unsigned int cache_trim_mode_failed: 1; unsigned int proactive: 1; unsigned int memcg_low_reclaim: 1; unsigned int memcg_low_skipped: 1; unsigned int memcg_full_walk: 1; unsigned int hibernation_mode: 1; unsigned int compaction_ready: 1; unsigned int cache_trim_mode: 1; unsigned int file_is_tiny: 1; unsigned int no_demotion: 1; s8 order; s8 priority; s8 reclaim_idx; gfp_t gfp_mask; long unsigned int nr_scanned; long unsigned int nr_reclaimed; struct { unsigned int dirty; unsigned int unqueued_dirty; unsigned int congested; unsigned int writeback; unsigned int immediate; unsigned int file_taken; unsigned int taken; } nr; struct reclaim_state reclaim_state; }; struct scatter_walk { struct scatterlist *sg; unsigned int offset; }; struct sch_frag_data { long unsigned int dst; struct qdisc_skb_cb cb; __be16 inner_protocol; u16 vlan_tci; __be16 vlan_proto; unsigned int l2_len; u8 l2_data[18]; int (*xmit)(struct sk_buff *); }; struct sched_attr { __u32 size; __u32 sched_policy; __u64 sched_flags; __s32 sched_nice; __u32 sched_priority; __u64 sched_runtime; __u64 sched_deadline; __u64 sched_period; __u32 sched_util_min; __u32 sched_util_max; }; struct sched_class { void (*enqueue_task)(struct rq *, struct task_struct *, int); bool (*dequeue_task)(struct rq *, struct task_struct *, int); void (*yield_task)(struct rq *); bool (*yield_to_task)(struct rq *, struct task_struct *); void (*wakeup_preempt)(struct rq *, struct task_struct *, int); int (*balance)(struct rq *, struct task_struct *, struct rq_flags *); struct task_struct * (*pick_task)(struct rq *); struct task_struct * (*pick_next_task)(struct rq *, struct task_struct *); void (*put_prev_task)(struct rq *, struct task_struct *, struct task_struct *); void (*set_next_task)(struct rq *, struct task_struct *, bool); int (*select_task_rq)(struct task_struct *, int, int); void (*migrate_task_rq)(struct task_struct *, int); void (*task_woken)(struct rq *, struct task_struct *); void (*set_cpus_allowed)(struct task_struct *, struct affinity_context *); void (*rq_online)(struct rq *); void (*rq_offline)(struct rq *); struct rq * (*find_lock_rq)(struct task_struct *, struct rq *); void (*task_tick)(struct rq *, struct task_struct *, int); void (*task_fork)(struct task_struct *); void (*task_dead)(struct task_struct *); void (*switching_to)(struct rq *, struct task_struct *); void (*switched_from)(struct rq *, struct task_struct *); void (*switched_to)(struct rq *, struct task_struct *); void (*reweight_task)(struct rq *, struct task_struct *, const struct load_weight *); void (*prio_changed)(struct rq *, struct task_struct *, int); unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); void (*update_curr)(struct rq *); void (*task_change_group)(struct task_struct *); }; struct sched_clock_data { u64 tick_raw; u64 tick_gtod; u64 clock; }; struct sched_group; struct sched_domain_shared; struct sched_domain { struct sched_domain *parent; struct sched_domain *child; struct sched_group *groups; long unsigned int min_interval; long unsigned int max_interval; unsigned int busy_factor; unsigned int imbalance_pct; unsigned int cache_nice_tries; unsigned int imb_numa_nr; int nohz_idle; int flags; int level; long unsigned int last_balance; unsigned int balance_interval; unsigned int nr_balance_failed; u64 max_newidle_lb_cost; long unsigned int last_decay_max_lb_cost; unsigned int lb_count[3]; unsigned int lb_failed[3]; unsigned int lb_balanced[3]; unsigned int lb_imbalance[3]; unsigned int lb_gained[3]; unsigned int lb_hot_gained[3]; unsigned int lb_nobusyg[3]; unsigned int lb_nobusyq[3]; unsigned int alb_count; unsigned int alb_failed; unsigned int alb_pushed; unsigned int sbe_count; unsigned int sbe_balanced; unsigned int sbe_pushed; unsigned int sbf_count; unsigned int sbf_balanced; unsigned int sbf_pushed; unsigned int ttwu_wake_remote; unsigned int ttwu_move_affine; unsigned int ttwu_move_balance; char *name; union { void *private; struct callback_head rcu; }; struct sched_domain_shared *shared; unsigned int span_weight; long unsigned int span[0]; }; struct sched_domain_attr { int relax_domain_level; }; struct sched_domain_shared { atomic_t ref; atomic_t nr_busy_cpus; int has_idle_cores; int nr_idle_scan; }; typedef const struct cpumask * (*sched_domain_mask_f)(int); typedef int (*sched_domain_flags_f)(void); struct sched_group_capacity; struct sd_data { struct sched_domain **sd; struct sched_domain_shared **sds; struct sched_group **sg; struct sched_group_capacity **sgc; }; struct sched_domain_topology_level { sched_domain_mask_f mask; sched_domain_flags_f sd_flags; int flags; int numa_level; struct sd_data data; char *name; }; struct sched_entity { struct load_weight load; struct rb_node run_node; u64 deadline; u64 min_vruntime; u64 min_slice; struct list_head group_node; unsigned char on_rq; unsigned char sched_delayed; unsigned char rel_deadline; unsigned char custom_slice; u64 exec_start; u64 sum_exec_runtime; u64 prev_sum_exec_runtime; u64 vruntime; s64 vlag; u64 slice; u64 nr_migrations; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; long unsigned int runnable_weight; long: 64; struct sched_avg avg; }; struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; s64 sum_block_runtime; s64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; long: 64; long: 64; long: 64; long: 64; }; struct sched_entity_stats { struct sched_entity se; struct sched_statistics stats; }; struct sched_group { struct sched_group *next; atomic_t ref; unsigned int group_weight; unsigned int cores; struct sched_group_capacity *sgc; int asym_prefer_cpu; int flags; long unsigned int cpumask[0]; }; struct sched_group_capacity { atomic_t ref; long unsigned int capacity; long unsigned int min_capacity; long unsigned int max_capacity; long unsigned int next_update; int imbalance; int id; long unsigned int cpumask[0]; }; struct sched_param { int sched_priority; }; struct sched_rt_entity { struct list_head run_list; long unsigned int timeout; long unsigned int watchdog_stamp; unsigned int time_slice; short unsigned int on_rq; short unsigned int on_list; struct sched_rt_entity *back; }; struct scm_fp_list; struct scm_cookie { struct pid *pid; struct scm_fp_list *fp; struct scm_creds creds; u32 secid; }; struct unix_edge; struct scm_fp_list { short int count; short int count_unix; short int max; bool inflight; bool dead; struct list_head vertices; struct unix_edge *edges; struct user_struct *user; struct file *fp[253]; }; struct scm_stat { atomic_t nr_fds; long unsigned int nr_unix_fds; }; struct scm_timestamping { struct __kernel_old_timespec ts[3]; }; struct scm_timestamping64 { struct __kernel_timespec ts[3]; }; struct scm_timestamping_internal { struct timespec64 ts[3]; }; struct scm_ts_pktinfo { __u32 if_index; __u32 pkt_length; __u32 reserved[2]; }; struct scomp_alg { void * (*alloc_ctx)(struct crypto_scomp *); void (*free_ctx)(struct crypto_scomp *, void *); int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); union { struct { struct crypto_alg base; }; struct comp_alg_common calg; }; }; struct scomp_scratch { spinlock_t lock; void *src; void *dst; }; struct scratches_to_free { struct callback_head rcu; unsigned int cnt; void *scratches[0]; }; struct sctp_paramhdr { __be16 type; __be16 length; }; struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; __be32 adaptation_ind; }; struct sctp_addip_param { struct sctp_paramhdr param_hdr; __be32 crr_id; }; struct sctp_addiphdr { __be32 serial; }; union sctp_addr { struct sockaddr_in v4; struct sockaddr_in6 v6; struct sockaddr sa; }; struct sctp_ipv4addr_param { struct sctp_paramhdr param_hdr; struct in_addr addr; }; struct sctp_ipv6addr_param { struct sctp_paramhdr param_hdr; struct in6_addr addr; }; union sctp_addr_param { struct sctp_paramhdr p; struct sctp_ipv4addr_param v4; struct sctp_ipv6addr_param v6; }; struct sctp_transport; struct sctp_sock; struct sctp_af { int (*sctp_xmit)(struct sk_buff *, struct sctp_transport *); int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); int (*getsockopt)(struct sock *, int, int, char *, int *); void (*get_dst)(struct sctp_transport *, union sctp_addr *, struct flowi *, struct sock *); void (*get_saddr)(struct sctp_sock *, struct sctp_transport *, struct flowi *); void (*copy_addrlist)(struct list_head *, struct net_device *); int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *); void (*addr_copy)(union sctp_addr *, union sctp_addr *); void (*from_skb)(union sctp_addr *, struct sk_buff *, int); void (*from_sk)(union sctp_addr *, struct sock *); bool (*from_addr_param)(union sctp_addr *, union sctp_addr_param *, __be16, int); int (*to_addr_param)(const union sctp_addr *, union sctp_addr_param *); int (*addr_valid)(union sctp_addr *, struct sctp_sock *, const struct sk_buff *); enum sctp_scope (*scope)(union sctp_addr *); void (*inaddr_any)(union sctp_addr *, __be16); int (*is_any)(const union sctp_addr *); int (*available)(union sctp_addr *, struct sctp_sock *); int (*skb_iif)(const struct sk_buff *); int (*skb_sdif)(const struct sk_buff *); int (*is_ce)(const struct sk_buff *); void (*seq_dump_addr)(struct seq_file *, union sctp_addr *); void (*ecn_capable)(struct sock *); __u16 net_header_len; int sockaddr_len; int (*ip_options_len)(struct sock *); sa_family_t sa_family; struct list_head list; }; struct sctp_chunk; struct sctp_inq { struct list_head in_chunk_list; struct sctp_chunk *in_progress; struct work_struct immediate; }; struct sctp_bind_addr { __u16 port; struct list_head address_list; }; struct sctp_ep_common { enum sctp_endpoint_type type; refcount_t refcnt; bool dead; struct sock *sk; struct net *net; struct sctp_inq inqueue; struct sctp_bind_addr bind_addr; }; struct sctp_cookie { __u32 my_vtag; __u32 peer_vtag; __u32 my_ttag; __u32 peer_ttag; ktime_t expiration; __u16 sinit_num_ostreams; __u16 sinit_max_instreams; __u32 initial_tsn; union sctp_addr peer_addr; __u16 my_port; __u8 prsctp_capable; __u8 padding; __u32 adaptation_ind; __u8 auth_random[36]; __u8 auth_hmacs[10]; __u8 auth_chunks[20]; __u32 raw_addr_list_len; }; struct sctp_tsnmap { long unsigned int *tsn_map; __u32 base_tsn; __u32 cumulative_tsn_ack_point; __u32 max_tsn_seen; __u16 len; __u16 pending_data; __u16 num_dup_tsns; __be32 dup_tsns[16]; }; struct sctp_inithdr_host { __u32 init_tag; __u32 a_rwnd; __u16 num_outbound_streams; __u16 num_inbound_streams; __u32 initial_tsn; }; struct sctp_stream_out_ext; struct sctp_stream_out { union { __u32 mid; __u16 ssn; }; __u32 mid_uo; struct sctp_stream_out_ext *ext; __u8 state; }; struct sctp_stream_in { union { __u32 mid; __u16 ssn; }; __u32 mid_uo; __u32 fsn; __u32 fsn_uo; char pd_mode; char pd_mode_uo; }; struct sctp_stream_interleave; struct sctp_stream { struct { struct __genradix tree; struct sctp_stream_out type[0]; } out; struct { struct __genradix tree; struct sctp_stream_in type[0]; } in; __u16 outcnt; __u16 incnt; struct sctp_stream_out *out_curr; union { struct { struct list_head prio_list; }; struct { struct list_head rr_list; struct sctp_stream_out_ext *rr_next; }; struct { struct list_head fc_list; }; }; struct sctp_stream_interleave *si; }; struct sctp_sched_ops; struct sctp_association; struct sctp_outq { struct sctp_association *asoc; struct list_head out_chunk_list; struct sctp_sched_ops *sched; unsigned int out_qlen; unsigned int error; struct list_head control_chunk_list; struct list_head sacked; struct list_head retransmit; struct list_head abandoned; __u32 outstanding_bytes; char fast_rtx; char cork; }; struct sctp_ulpq { char pd_mode; struct sctp_association *asoc; struct sk_buff_head reasm; struct sk_buff_head reasm_uo; struct sk_buff_head lobby; }; struct sctp_priv_assoc_stats { struct __kernel_sockaddr_storage obs_rto_ipaddr; __u64 max_obs_rto; __u64 isacks; __u64 osacks; __u64 opackets; __u64 ipackets; __u64 rtxchunks; __u64 outofseqtsns; __u64 idupchunks; __u64 gapcnt; __u64 ouodchunks; __u64 iuodchunks; __u64 oodchunks; __u64 iodchunks; __u64 octrlchunks; __u64 ictrlchunks; }; struct sctp_endpoint; struct sctp_random_param; struct sctp_chunks_param; struct sctp_hmac_algo_param; struct sctp_auth_bytes; struct sctp_shared_key; struct sctp_association { struct sctp_ep_common base; struct list_head asocs; sctp_assoc_t assoc_id; struct sctp_endpoint *ep; struct sctp_cookie c; struct { struct list_head transport_addr_list; __u32 rwnd; __u16 transport_count; __u16 port; struct sctp_transport *primary_path; union sctp_addr primary_addr; struct sctp_transport *active_path; struct sctp_transport *retran_path; struct sctp_transport *last_sent_to; struct sctp_transport *last_data_from; struct sctp_tsnmap tsn_map; __be16 addip_disabled_mask; __u16 ecn_capable: 1; __u16 ipv4_address: 1; __u16 ipv6_address: 1; __u16 asconf_capable: 1; __u16 prsctp_capable: 1; __u16 reconf_capable: 1; __u16 intl_capable: 1; __u16 auth_capable: 1; __u16 sack_needed: 1; __u16 sack_generation: 1; __u16 zero_window_announced: 1; __u32 sack_cnt; __u32 adaptation_ind; struct sctp_inithdr_host i; void *cookie; int cookie_len; __u32 addip_serial; struct sctp_random_param *peer_random; struct sctp_chunks_param *peer_chunks; struct sctp_hmac_algo_param *peer_hmacs; } peer; enum sctp_state state; int overall_error_count; ktime_t cookie_life; long unsigned int rto_initial; long unsigned int rto_max; long unsigned int rto_min; int max_burst; int max_retrans; __u16 pf_retrans; __u16 ps_retrans; __u16 max_init_attempts; __u16 init_retries; long unsigned int max_init_timeo; long unsigned int hbinterval; long unsigned int probe_interval; __be16 encap_port; __u16 pathmaxrxt; __u32 flowlabel; __u8 dscp; __u8 pmtu_pending; __u32 pathmtu; __u32 param_flags; __u32 sackfreq; long unsigned int sackdelay; long unsigned int timeouts[12]; struct timer_list timers[12]; struct sctp_transport *shutdown_last_sent_to; struct sctp_transport *init_last_sent_to; int shutdown_retries; __u32 next_tsn; __u32 ctsn_ack_point; __u32 adv_peer_ack_point; __u32 highest_sacked; __u32 fast_recovery_exit; __u8 fast_recovery; __u16 unack_data; __u32 rtx_data_chunks; __u32 rwnd; __u32 a_rwnd; __u32 rwnd_over; __u32 rwnd_press; int sndbuf_used; atomic_t rmem_alloc; wait_queue_head_t wait; __u32 frag_point; __u32 user_frag; int init_err_counter; int init_cycle; __u16 default_stream; __u16 default_flags; __u32 default_ppid; __u32 default_context; __u32 default_timetolive; __u32 default_rcv_context; struct sctp_stream stream; struct sctp_outq outqueue; struct sctp_ulpq ulpq; __u32 last_ecne_tsn; __u32 last_cwr_tsn; int numduptsns; struct sctp_chunk *addip_last_asconf; struct list_head asconf_ack_list; struct list_head addip_chunk_list; __u32 addip_serial; int src_out_of_asoc_ok; union sctp_addr *asconf_addr_del_pending; struct sctp_transport *new_transport; struct list_head endpoint_shared_keys; struct sctp_auth_bytes *asoc_shared_key; struct sctp_shared_key *shkey; __u16 default_hmac_id; __u16 active_key_id; __u8 need_ecne: 1; __u8 temp: 1; __u8 pf_expose: 2; __u8 force_delay: 1; __u8 strreset_enable; __u8 strreset_outstanding; __u32 strreset_outseq; __u32 strreset_inseq; __u32 strreset_result[2]; struct sctp_chunk *strreset_chunk; struct sctp_priv_assoc_stats stats; int sent_cnt_removable; __u16 subscribe; __u64 abandoned_unsent[3]; __u64 abandoned_sent[3]; u32 secid; u32 peer_secid; struct callback_head rcu; }; struct sctp_assocparams { sctp_assoc_t sasoc_assoc_id; __u16 sasoc_asocmaxrxt; __u16 sasoc_number_peer_destinations; __u32 sasoc_peer_rwnd; __u32 sasoc_local_rwnd; __u32 sasoc_cookie_life; }; struct sctp_auth_bytes { refcount_t refcnt; __u32 len; __u8 data[0]; }; struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; }; struct sctp_bind_bucket { short unsigned int port; signed char fastreuse; signed char fastreuseport; kuid_t fastuid; struct hlist_node node; struct hlist_head owner; struct net *net; }; struct sctp_cookie_preserve_param; struct sctp_hostname_param; struct sctp_cookie_param; struct sctp_supported_addrs_param; struct sctp_supported_ext_param; union sctp_params { void *v; struct sctp_paramhdr *p; struct sctp_cookie_preserve_param *life; struct sctp_hostname_param *dns; struct sctp_cookie_param *cookie; struct sctp_supported_addrs_param *sat; struct sctp_ipv4addr_param *v4; struct sctp_ipv6addr_param *v6; union sctp_addr_param *addr; struct sctp_adaptation_ind_param *aind; struct sctp_supported_ext_param *ext; struct sctp_random_param *random; struct sctp_chunks_param *chunks; struct sctp_hmac_algo_param *hmac_algo; struct sctp_addip_param *addip; }; struct sctp_sndrcvinfo { __u16 sinfo_stream; __u16 sinfo_ssn; __u16 sinfo_flags; __u32 sinfo_ppid; __u32 sinfo_context; __u32 sinfo_timetolive; __u32 sinfo_tsn; __u32 sinfo_cumtsn; sctp_assoc_t sinfo_assoc_id; }; struct sctp_datahdr; struct sctp_inithdr; struct sctp_sackhdr; struct sctp_heartbeathdr; struct sctp_sender_hb_info; struct sctp_shutdownhdr; struct sctp_signed_cookie; struct sctp_ecnehdr; struct sctp_cwrhdr; struct sctp_errhdr; struct sctp_fwdtsn_hdr; struct sctp_idatahdr; struct sctp_ifwdtsn_hdr; struct sctp_chunkhdr; struct sctphdr; struct sctp_datamsg; struct sctp_chunk { struct list_head list; refcount_t refcnt; int sent_count; union { struct list_head transmitted_list; struct list_head stream_list; }; struct list_head frag_list; struct sk_buff *skb; union { struct sk_buff *head_skb; struct sctp_shared_key *shkey; }; union sctp_params param_hdr; union { __u8 *v; struct sctp_datahdr *data_hdr; struct sctp_inithdr *init_hdr; struct sctp_sackhdr *sack_hdr; struct sctp_heartbeathdr *hb_hdr; struct sctp_sender_hb_info *hbs_hdr; struct sctp_shutdownhdr *shutdown_hdr; struct sctp_signed_cookie *cookie_hdr; struct sctp_ecnehdr *ecne_hdr; struct sctp_cwrhdr *ecn_cwr_hdr; struct sctp_errhdr *err_hdr; struct sctp_addiphdr *addip_hdr; struct sctp_fwdtsn_hdr *fwdtsn_hdr; struct sctp_authhdr *auth_hdr; struct sctp_idatahdr *idata_hdr; struct sctp_ifwdtsn_hdr *ifwdtsn_hdr; } subh; __u8 *chunk_end; struct sctp_chunkhdr *chunk_hdr; struct sctphdr *sctp_hdr; struct sctp_sndrcvinfo sinfo; struct sctp_association *asoc; struct sctp_ep_common *rcvr; long unsigned int sent_at; union sctp_addr source; union sctp_addr dest; struct sctp_datamsg *msg; struct sctp_transport *transport; struct sk_buff *auth_chunk; __u16 rtt_in_progress: 1; __u16 has_tsn: 1; __u16 has_ssn: 1; __u16 singleton: 1; __u16 end_of_packet: 1; __u16 ecn_ce_done: 1; __u16 pdiscard: 1; __u16 tsn_gap_acked: 1; __u16 data_accepted: 1; __u16 auth: 1; __u16 has_asconf: 1; __u16 pmtu_probe: 1; __u16 tsn_missing_report: 2; __u16 fast_retransmit: 2; }; struct sctp_chunkhdr { __u8 type; __u8 flags; __be16 length; }; struct sctp_chunks_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; }; struct sctp_cookie_param { struct sctp_paramhdr p; __u8 body[0]; }; struct sctp_cookie_preserve_param { struct sctp_paramhdr param_hdr; __be32 lifespan_increment; }; struct sctp_cwrhdr { __be32 lowest_tsn; }; struct sctp_datahdr { __be32 tsn; __be16 stream; __be16 ssn; __u32 ppid; }; struct sctp_datamsg { struct list_head chunks; refcount_t refcnt; long unsigned int expires_at; int send_error; u8 send_failed: 1; u8 can_delay: 1; u8 abandoned: 1; }; struct sctp_ecnehdr { __be32 lowest_tsn; }; struct sctp_endpoint { struct sctp_ep_common base; struct hlist_node node; int hashent; struct list_head asocs; __u8 secret_key[32]; __u8 *digest; __u32 sndbuf_policy; __u32 rcvbuf_policy; struct crypto_shash **auth_hmacs; struct sctp_hmac_algo_param *auth_hmacs_list; struct sctp_chunks_param *auth_chunk_list; struct list_head endpoint_shared_keys; __u16 active_key_id; __u8 ecn_enable: 1; __u8 auth_enable: 1; __u8 intl_enable: 1; __u8 prsctp_enable: 1; __u8 asconf_enable: 1; __u8 reconf_enable: 1; __u8 strreset_enable; struct callback_head rcu; }; struct sctp_errhdr { __be16 cause; __be16 length; }; struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; }; struct sctp_heartbeathdr { struct sctp_paramhdr info; }; struct sctp_hmac_algo_param { struct sctp_paramhdr param_hdr; __be16 hmac_ids[0]; }; struct sctp_hostname_param { struct sctp_paramhdr param_hdr; uint8_t hostname[0]; }; struct sctp_idatahdr { __be32 tsn; __be16 stream; __be16 reserved; __be32 mid; union { __u32 ppid; __be32 fsn; }; __u8 payload[0]; }; struct sctp_ifwdtsn_hdr { __be32 new_cum_tsn; }; struct sctp_inithdr { __be32 init_tag; __be32 a_rwnd; __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; }; struct sctp_initmsg { __u16 sinit_num_ostreams; __u16 sinit_max_instreams; __u16 sinit_max_attempts; __u16 sinit_max_init_timeo; }; struct sctp_packet { __u16 source_port; __u16 destination_port; __u32 vtag; struct list_head chunk_list; size_t overhead; size_t size; size_t max_size; struct sctp_transport *transport; struct sctp_chunk *auth; u8 has_cookie_echo: 1; u8 has_sack: 1; u8 has_auth: 1; u8 has_data: 1; u8 ipfragok: 1; }; struct sctp_paddrparams { sctp_assoc_t spp_assoc_id; struct __kernel_sockaddr_storage spp_address; __u32 spp_hbinterval; __u16 spp_pathmaxrxt; __u32 spp_pathmtu; __u32 spp_sackdelay; __u32 spp_flags; __u32 spp_ipv6_flowlabel; __u8 spp_dscp; int: 0; } __attribute__((packed)); struct sctp_ulpevent; struct sctp_pf { void (*event_msgname)(struct sctp_ulpevent *, char *, int *); void (*skb_msgname)(struct sk_buff *, char *, int *); int (*af_supported)(sa_family_t, struct sctp_sock *); int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *, struct sctp_sock *); int (*bind_verify)(struct sctp_sock *, union sctp_addr *); int (*send_verify)(struct sctp_sock *, union sctp_addr *); int (*supported_addrs)(const struct sctp_sock *, __be16 *); struct sock * (*create_accept_sk)(struct sock *, struct sctp_association *, bool); int (*addr_to_user)(struct sctp_sock *, union sctp_addr *); void (*to_sk_saddr)(union sctp_addr *, struct sock *); void (*to_sk_daddr)(union sctp_addr *, struct sock *); void (*copy_ip_options)(struct sock *, struct sock *); struct sctp_af *af; }; struct sctp_random_param { struct sctp_paramhdr param_hdr; __u8 random_val[0]; }; struct sctp_rtoinfo { sctp_assoc_t srto_assoc_id; __u32 srto_initial; __u32 srto_max; __u32 srto_min; }; struct sctp_sackhdr { __be32 cum_tsn_ack; __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; }; struct sctp_sender_hb_info { struct sctp_paramhdr param_hdr; union sctp_addr daddr; long unsigned int sent_at; __u64 hb_nonce; __u32 probe_size; }; struct sctp_shared_key { struct list_head key_list; struct sctp_auth_bytes *key; refcount_t refcnt; __u16 key_id; __u8 deactivated; }; struct sctp_shutdownhdr { __be32 cum_tsn_ack; }; struct sctp_signed_cookie { __u8 signature[32]; __u32 __pad; struct sctp_cookie c; } __attribute__((packed)); struct sctp_sock { struct inet_sock inet; enum sctp_socket_type type; struct sctp_pf *pf; struct crypto_shash *hmac; char *sctp_hmac_alg; struct sctp_endpoint *ep; struct sctp_bind_bucket *bind_hash; __u16 default_stream; __u32 default_ppid; __u16 default_flags; __u32 default_context; __u32 default_timetolive; __u32 default_rcv_context; int max_burst; __u32 hbinterval; __u32 probe_interval; __be16 udp_port; __be16 encap_port; __u16 pathmaxrxt; __u32 flowlabel; __u8 dscp; __u16 pf_retrans; __u16 ps_retrans; __u32 pathmtu; __u32 sackdelay; __u32 sackfreq; __u32 param_flags; __u32 default_ss; struct sctp_rtoinfo rtoinfo; struct sctp_paddrparams paddrparam; struct sctp_assocparams assocparams; __u16 subscribe; struct sctp_initmsg initmsg; int user_frag; __u32 autoclose; __u32 adaptation_ind; __u32 pd_point; __u16 nodelay: 1; __u16 pf_expose: 2; __u16 reuse: 1; __u16 disable_fragments: 1; __u16 v4mapped: 1; __u16 frag_interleave: 1; __u16 recvrcvinfo: 1; __u16 recvnxtinfo: 1; __u16 data_ready_signalled: 1; atomic_t pd_mode; struct sk_buff_head pd_lobby; struct list_head auto_asconf_list; int do_auto_asconf; }; struct sctp_stream_interleave { __u16 data_chunk_len; __u16 ftsn_chunk_len; struct sctp_chunk * (*make_datafrag)(const struct sctp_association *, const struct sctp_sndrcvinfo *, int, __u8, gfp_t); void (*assign_number)(struct sctp_chunk *); bool (*validate_data)(struct sctp_chunk *); int (*ulpevent_data)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); int (*enqueue_event)(struct sctp_ulpq *, struct sctp_ulpevent *); void (*renege_events)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); void (*start_pd)(struct sctp_ulpq *, gfp_t); void (*abort_pd)(struct sctp_ulpq *, gfp_t); void (*generate_ftsn)(struct sctp_outq *, __u32); bool (*validate_ftsn)(struct sctp_chunk *); void (*report_ftsn)(struct sctp_ulpq *, __u32); void (*handle_ftsn)(struct sctp_ulpq *, struct sctp_chunk *); }; struct sctp_stream_priorities; struct sctp_stream_out_ext { __u64 abandoned_unsent[3]; __u64 abandoned_sent[3]; struct list_head outq; union { struct { struct list_head prio_list; struct sctp_stream_priorities *prio_head; }; struct { struct list_head rr_list; }; struct { struct list_head fc_list; __u32 fc_length; __u16 fc_weight; }; }; }; struct sctp_stream_priorities { struct list_head prio_sched; struct list_head active; struct sctp_stream_out_ext *next; __u16 prio; __u16 users; }; struct sctp_supported_addrs_param { struct sctp_paramhdr param_hdr; __be16 types[0]; }; struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; }; struct sctp_transport { struct list_head transports; struct rhlist_head node; refcount_t refcnt; __u32 rto_pending: 1; __u32 hb_sent: 1; __u32 pmtu_pending: 1; __u32 dst_pending_confirm: 1; __u32 sack_generation: 1; u32 dst_cookie; struct flowi fl; union sctp_addr ipaddr; struct sctp_af *af_specific; struct sctp_association *asoc; long unsigned int rto; __u32 rtt; __u32 rttvar; __u32 srtt; __u32 cwnd; __u32 ssthresh; __u32 partial_bytes_acked; __u32 flight_size; __u32 burst_limited; struct dst_entry *dst; union sctp_addr saddr; long unsigned int hbinterval; long unsigned int probe_interval; long unsigned int sackdelay; __u32 sackfreq; atomic_t mtu_info; ktime_t last_time_heard; long unsigned int last_time_sent; long unsigned int last_time_ecne_reduced; __be16 encap_port; __u16 pathmaxrxt; __u32 flowlabel; __u8 dscp; __u16 pf_retrans; __u16 ps_retrans; __u32 pathmtu; __u32 param_flags; int init_sent_count; int state; short unsigned int error_count; struct timer_list T3_rtx_timer; struct timer_list hb_timer; struct timer_list proto_unreach_timer; struct timer_list reconf_timer; struct timer_list probe_timer; struct list_head transmitted; struct sctp_packet packet; struct list_head send_ready; struct { __u32 next_tsn_at_change; char changeover_active; char cycling_changeover; char cacc_saw_newack; } cacc; struct { __u16 pmtu; __u16 probe_size; __u16 probe_high; __u8 probe_count; __u8 state; } pl; __u64 hb_nonce; struct callback_head rcu; }; struct sctp_ulpevent { struct sctp_association *asoc; struct sctp_chunk *chunk; unsigned int rmem_len; union { __u32 mid; __u16 ssn; }; union { __u32 ppid; __u32 fsn; }; __u32 tsn; __u32 cumtsn; __u16 stream; __u16 flags; __u16 msg_flags; } __attribute__((packed)); struct sctphdr { __be16 source; __be16 dest; __be32 vtag; __le32 checksum; }; struct sd_flag_debug { unsigned int meta_flags; char *name; }; struct sd_flow_limit { u64 count; unsigned int num_buckets; unsigned int history_head; u16 history[128]; u8 buckets[0]; }; struct sg_lb_stats { long unsigned int avg_load; long unsigned int group_load; long unsigned int group_capacity; long unsigned int group_util; long unsigned int group_runnable; unsigned int sum_nr_running; unsigned int sum_h_nr_running; unsigned int idle_cpus; unsigned int group_weight; enum group_type group_type; unsigned int group_asym_packing; unsigned int group_smt_balance; long unsigned int group_misfit_task_load; unsigned int nr_numa_running; unsigned int nr_preferred_running; }; struct sd_lb_stats { struct sched_group *busiest; struct sched_group *local; long unsigned int total_load; long unsigned int total_capacity; long unsigned int avg_load; unsigned int prefer_sibling; struct sg_lb_stats busiest_stat; struct sg_lb_stats local_stat; }; struct shash_desc { struct crypto_shash *tfm; void *__ctx[0]; }; struct sdesc { struct shash_desc shash; char ctx[0]; }; struct xfrm_offload { struct { __u32 low; __u32 hi; } seq; __u32 flags; __u32 status; __u32 orig_mac_len; __u8 proto; __u8 inner_ipproto; }; struct sec_path { int len; int olen; int verified_cnt; struct xfrm_state *xvec[6]; struct xfrm_offload ovec[1]; }; struct seccomp_filter; struct seccomp { int mode; atomic_t filter_count; struct seccomp_filter *filter; }; struct seccomp_data { int nr; __u32 arch; __u64 instruction_pointer; __u64 args[6]; }; struct seccomp_filter { refcount_t refs; refcount_t users; bool log; bool wait_killable_recv; struct action_cache cache; struct seccomp_filter *prev; struct bpf_prog *prog; struct notification *notif; struct mutex notify_lock; wait_queue_head_t wqh; }; struct seccomp_kaddfd { struct file *file; int fd; unsigned int flags; __u32 ioctl_flags; union { bool setfd; int ret; }; struct completion completion; struct list_head list; }; struct seccomp_knotif { struct task_struct *task; u64 id; const struct seccomp_data *data; enum notify_state state; int error; long int val; u32 flags; struct completion ready; struct list_head list; struct list_head addfd; }; struct seccomp_log_name { u32 log; const char *name; }; struct seccomp_notif { __u64 id; __u32 pid; __u32 flags; struct seccomp_data data; }; struct seccomp_notif_addfd { __u64 id; __u32 flags; __u32 srcfd; __u32 newfd; __u32 newfd_flags; }; struct seccomp_notif_resp { __u64 id; __s64 val; __s32 error; __u32 flags; }; struct seccomp_notif_sizes { __u16 seccomp_notif; __u16 seccomp_notif_resp; __u16 seccomp_data; }; struct security_class_mapping { const char *name; const char *perms[33]; }; struct timezone; struct xattr; struct sembuf; union security_list_options { int (*binder_set_context_mgr)(const struct cred *); int (*binder_transaction)(const struct cred *, const struct cred *); int (*binder_transfer_binder)(const struct cred *, const struct cred *); int (*binder_transfer_file)(const struct cred *, const struct cred *, const struct file *); int (*ptrace_access_check)(struct task_struct *, unsigned int); int (*ptrace_traceme)(struct task_struct *); int (*capget)(const struct task_struct *, kernel_cap_t *, kernel_cap_t *, kernel_cap_t *); int (*capset)(struct cred *, const struct cred *, const kernel_cap_t *, const kernel_cap_t *, const kernel_cap_t *); int (*capable)(const struct cred *, struct user_namespace *, int, unsigned int); int (*quotactl)(int, int, int, const struct super_block *); int (*quota_on)(struct dentry *); int (*syslog)(int); int (*settime)(const struct timespec64 *, const struct timezone *); int (*vm_enough_memory)(struct mm_struct *, long int); int (*bprm_creds_for_exec)(struct linux_binprm *); int (*bprm_creds_from_file)(struct linux_binprm *, const struct file *); int (*bprm_check_security)(struct linux_binprm *); void (*bprm_committing_creds)(const struct linux_binprm *); void (*bprm_committed_creds)(const struct linux_binprm *); int (*fs_context_submount)(struct fs_context *, struct super_block *); int (*fs_context_dup)(struct fs_context *, struct fs_context *); int (*fs_context_parse_param)(struct fs_context *, struct fs_parameter *); int (*sb_alloc_security)(struct super_block *); void (*sb_delete)(struct super_block *); void (*sb_free_security)(struct super_block *); void (*sb_free_mnt_opts)(void *); int (*sb_eat_lsm_opts)(char *, void **); int (*sb_mnt_opts_compat)(struct super_block *, void *); int (*sb_remount)(struct super_block *, void *); int (*sb_kern_mount)(const struct super_block *); int (*sb_show_options)(struct seq_file *, struct super_block *); int (*sb_statfs)(struct dentry *); int (*sb_mount)(const char *, const struct path *, const char *, long unsigned int, void *); int (*sb_umount)(struct vfsmount *, int); int (*sb_pivotroot)(const struct path *, const struct path *); int (*sb_set_mnt_opts)(struct super_block *, void *, long unsigned int, long unsigned int *); int (*sb_clone_mnt_opts)(const struct super_block *, struct super_block *, long unsigned int, long unsigned int *); int (*move_mount)(const struct path *, const struct path *); int (*dentry_init_security)(struct dentry *, int, const struct qstr *, const char **, void **, u32 *); int (*dentry_create_files_as)(struct dentry *, int, struct qstr *, const struct cred *, struct cred *); int (*path_unlink)(const struct path *, struct dentry *); int (*path_mkdir)(const struct path *, struct dentry *, umode_t); int (*path_rmdir)(const struct path *, struct dentry *); int (*path_mknod)(const struct path *, struct dentry *, umode_t, unsigned int); void (*path_post_mknod)(struct mnt_idmap *, struct dentry *); int (*path_truncate)(const struct path *); int (*path_symlink)(const struct path *, struct dentry *, const char *); int (*path_link)(struct dentry *, const struct path *, struct dentry *); int (*path_rename)(const struct path *, struct dentry *, const struct path *, struct dentry *, unsigned int); int (*path_chmod)(const struct path *, umode_t); int (*path_chown)(const struct path *, kuid_t, kgid_t); int (*path_chroot)(const struct path *); int (*path_notify)(const struct path *, u64, unsigned int); int (*inode_alloc_security)(struct inode *); void (*inode_free_security)(struct inode *); void (*inode_free_security_rcu)(void *); int (*inode_init_security)(struct inode *, struct inode *, const struct qstr *, struct xattr *, int *); int (*inode_init_security_anon)(struct inode *, const struct qstr *, const struct inode *); int (*inode_create)(struct inode *, struct dentry *, umode_t); void (*inode_post_create_tmpfile)(struct mnt_idmap *, struct inode *); int (*inode_link)(struct dentry *, struct inode *, struct dentry *); int (*inode_unlink)(struct inode *, struct dentry *); int (*inode_symlink)(struct inode *, struct dentry *, const char *); int (*inode_mkdir)(struct inode *, struct dentry *, umode_t); int (*inode_rmdir)(struct inode *, struct dentry *); int (*inode_mknod)(struct inode *, struct dentry *, umode_t, dev_t); int (*inode_rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*inode_readlink)(struct dentry *); int (*inode_follow_link)(struct dentry *, struct inode *, bool); int (*inode_permission)(struct inode *, int); int (*inode_setattr)(struct mnt_idmap *, struct dentry *, struct iattr *); void (*inode_post_setattr)(struct mnt_idmap *, struct dentry *, int); int (*inode_getattr)(const struct path *); int (*inode_xattr_skipcap)(const char *); int (*inode_setxattr)(struct mnt_idmap *, struct dentry *, const char *, const void *, size_t, int); void (*inode_post_setxattr)(struct dentry *, const char *, const void *, size_t, int); int (*inode_getxattr)(struct dentry *, const char *); int (*inode_listxattr)(struct dentry *); int (*inode_removexattr)(struct mnt_idmap *, struct dentry *, const char *); void (*inode_post_removexattr)(struct dentry *, const char *); int (*inode_set_acl)(struct mnt_idmap *, struct dentry *, const char *, struct posix_acl *); void (*inode_post_set_acl)(struct dentry *, const char *, struct posix_acl *); int (*inode_get_acl)(struct mnt_idmap *, struct dentry *, const char *); int (*inode_remove_acl)(struct mnt_idmap *, struct dentry *, const char *); void (*inode_post_remove_acl)(struct mnt_idmap *, struct dentry *, const char *); int (*inode_need_killpriv)(struct dentry *); int (*inode_killpriv)(struct mnt_idmap *, struct dentry *); int (*inode_getsecurity)(struct mnt_idmap *, struct inode *, const char *, void **, bool); int (*inode_setsecurity)(struct inode *, const char *, const void *, size_t, int); int (*inode_listsecurity)(struct inode *, char *, size_t); void (*inode_getsecid)(struct inode *, u32 *); int (*inode_copy_up)(struct dentry *, struct cred **); int (*inode_copy_up_xattr)(struct dentry *, const char *); int (*inode_setintegrity)(const struct inode *, enum lsm_integrity_type, const void *, size_t); int (*kernfs_init_security)(struct kernfs_node *, struct kernfs_node *); int (*file_permission)(struct file *, int); int (*file_alloc_security)(struct file *); void (*file_release)(struct file *); void (*file_free_security)(struct file *); int (*file_ioctl)(struct file *, unsigned int, long unsigned int); int (*file_ioctl_compat)(struct file *, unsigned int, long unsigned int); int (*mmap_addr)(long unsigned int); int (*mmap_file)(struct file *, long unsigned int, long unsigned int, long unsigned int); int (*file_mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int); int (*file_lock)(struct file *, unsigned int); int (*file_fcntl)(struct file *, unsigned int, long unsigned int); void (*file_set_fowner)(struct file *); int (*file_send_sigiotask)(struct task_struct *, struct fown_struct *, int); int (*file_receive)(struct file *); int (*file_open)(struct file *); int (*file_post_open)(struct file *, int); int (*file_truncate)(struct file *); int (*task_alloc)(struct task_struct *, long unsigned int); void (*task_free)(struct task_struct *); int (*cred_alloc_blank)(struct cred *, gfp_t); void (*cred_free)(struct cred *); int (*cred_prepare)(struct cred *, const struct cred *, gfp_t); void (*cred_transfer)(struct cred *, const struct cred *); void (*cred_getsecid)(const struct cred *, u32 *); int (*kernel_act_as)(struct cred *, u32); int (*kernel_create_files_as)(struct cred *, struct inode *); int (*kernel_module_request)(char *); int (*kernel_load_data)(enum kernel_load_data_id, bool); int (*kernel_post_load_data)(char *, loff_t, enum kernel_load_data_id, char *); int (*kernel_read_file)(struct file *, enum kernel_read_file_id, bool); int (*kernel_post_read_file)(struct file *, char *, loff_t, enum kernel_read_file_id); int (*task_fix_setuid)(struct cred *, const struct cred *, int); int (*task_fix_setgid)(struct cred *, const struct cred *, int); int (*task_fix_setgroups)(struct cred *, const struct cred *); int (*task_setpgid)(struct task_struct *, pid_t); int (*task_getpgid)(struct task_struct *); int (*task_getsid)(struct task_struct *); void (*current_getsecid_subj)(u32 *); void (*task_getsecid_obj)(struct task_struct *, u32 *); int (*task_setnice)(struct task_struct *, int); int (*task_setioprio)(struct task_struct *, int); int (*task_getioprio)(struct task_struct *); int (*task_prlimit)(const struct cred *, const struct cred *, unsigned int); int (*task_setrlimit)(struct task_struct *, unsigned int, struct rlimit *); int (*task_setscheduler)(struct task_struct *); int (*task_getscheduler)(struct task_struct *); int (*task_movememory)(struct task_struct *); int (*task_kill)(struct task_struct *, struct kernel_siginfo *, int, const struct cred *); int (*task_prctl)(int, long unsigned int, long unsigned int, long unsigned int, long unsigned int); void (*task_to_inode)(struct task_struct *, struct inode *); int (*userns_create)(const struct cred *); int (*ipc_permission)(struct kern_ipc_perm *, short int); void (*ipc_getsecid)(struct kern_ipc_perm *, u32 *); int (*msg_msg_alloc_security)(struct msg_msg *); void (*msg_msg_free_security)(struct msg_msg *); int (*msg_queue_alloc_security)(struct kern_ipc_perm *); void (*msg_queue_free_security)(struct kern_ipc_perm *); int (*msg_queue_associate)(struct kern_ipc_perm *, int); int (*msg_queue_msgctl)(struct kern_ipc_perm *, int); int (*msg_queue_msgsnd)(struct kern_ipc_perm *, struct msg_msg *, int); int (*msg_queue_msgrcv)(struct kern_ipc_perm *, struct msg_msg *, struct task_struct *, long int, int); int (*shm_alloc_security)(struct kern_ipc_perm *); void (*shm_free_security)(struct kern_ipc_perm *); int (*shm_associate)(struct kern_ipc_perm *, int); int (*shm_shmctl)(struct kern_ipc_perm *, int); int (*shm_shmat)(struct kern_ipc_perm *, char *, int); int (*sem_alloc_security)(struct kern_ipc_perm *); void (*sem_free_security)(struct kern_ipc_perm *); int (*sem_associate)(struct kern_ipc_perm *, int); int (*sem_semctl)(struct kern_ipc_perm *, int); int (*sem_semop)(struct kern_ipc_perm *, struct sembuf *, unsigned int, int); int (*netlink_send)(struct sock *, struct sk_buff *); void (*d_instantiate)(struct dentry *, struct inode *); int (*getselfattr)(unsigned int, struct lsm_ctx *, u32 *, u32); int (*setselfattr)(unsigned int, struct lsm_ctx *, u32, u32); int (*getprocattr)(struct task_struct *, const char *, char **); int (*setprocattr)(const char *, void *, size_t); int (*ismaclabel)(const char *); int (*secid_to_secctx)(u32, char **, u32 *); int (*secctx_to_secid)(const char *, u32, u32 *); void (*release_secctx)(char *, u32); void (*inode_invalidate_secctx)(struct inode *); int (*inode_notifysecctx)(struct inode *, void *, u32); int (*inode_setsecctx)(struct dentry *, void *, u32); int (*inode_getsecctx)(struct inode *, void **, u32 *); int (*unix_stream_connect)(struct sock *, struct sock *, struct sock *); int (*unix_may_send)(struct socket *, struct socket *); int (*socket_create)(int, int, int, int); int (*socket_post_create)(struct socket *, int, int, int, int); int (*socket_socketpair)(struct socket *, struct socket *); int (*socket_bind)(struct socket *, struct sockaddr *, int); int (*socket_connect)(struct socket *, struct sockaddr *, int); int (*socket_listen)(struct socket *, int); int (*socket_accept)(struct socket *, struct socket *); int (*socket_sendmsg)(struct socket *, struct msghdr *, int); int (*socket_recvmsg)(struct socket *, struct msghdr *, int, int); int (*socket_getsockname)(struct socket *); int (*socket_getpeername)(struct socket *); int (*socket_getsockopt)(struct socket *, int, int); int (*socket_setsockopt)(struct socket *, int, int); int (*socket_shutdown)(struct socket *, int); int (*socket_sock_rcv_skb)(struct sock *, struct sk_buff *); int (*socket_getpeersec_stream)(struct socket *, sockptr_t, sockptr_t, unsigned int); int (*socket_getpeersec_dgram)(struct socket *, struct sk_buff *, u32 *); int (*sk_alloc_security)(struct sock *, int, gfp_t); void (*sk_free_security)(struct sock *); void (*sk_clone_security)(const struct sock *, struct sock *); void (*sk_getsecid)(const struct sock *, u32 *); void (*sock_graft)(struct sock *, struct socket *); int (*inet_conn_request)(const struct sock *, struct sk_buff *, struct request_sock *); void (*inet_csk_clone)(struct sock *, const struct request_sock *); void (*inet_conn_established)(struct sock *, struct sk_buff *); int (*secmark_relabel_packet)(u32); void (*secmark_refcount_inc)(void); void (*secmark_refcount_dec)(void); void (*req_classify_flow)(const struct request_sock *, struct flowi_common *); int (*tun_dev_alloc_security)(void *); int (*tun_dev_create)(void); int (*tun_dev_attach_queue)(void *); int (*tun_dev_attach)(struct sock *, void *); int (*tun_dev_open)(void *); int (*sctp_assoc_request)(struct sctp_association *, struct sk_buff *); int (*sctp_bind_connect)(struct sock *, int, struct sockaddr *, int); void (*sctp_sk_clone)(struct sctp_association *, struct sock *, struct sock *); int (*sctp_assoc_established)(struct sctp_association *, struct sk_buff *); int (*mptcp_add_subflow)(struct sock *, struct sock *); int (*key_alloc)(struct key *, const struct cred *, long unsigned int); int (*key_permission)(key_ref_t, const struct cred *, enum key_need_perm); int (*key_getsecurity)(struct key *, char **); void (*key_post_create_or_update)(struct key *, struct key *, const void *, size_t, long unsigned int, bool); int (*audit_rule_init)(u32, u32, char *, void **, gfp_t); int (*audit_rule_known)(struct audit_krule *); int (*audit_rule_match)(u32, u32, u32, void *); void (*audit_rule_free)(void *); int (*bpf)(int, union bpf_attr *, unsigned int); int (*bpf_map)(struct bpf_map *, fmode_t); int (*bpf_prog)(struct bpf_prog *); int (*bpf_map_create)(struct bpf_map *, union bpf_attr *, struct bpf_token *); void (*bpf_map_free)(struct bpf_map *); int (*bpf_prog_load)(struct bpf_prog *, union bpf_attr *, struct bpf_token *); void (*bpf_prog_free)(struct bpf_prog *); int (*bpf_token_create)(struct bpf_token *, union bpf_attr *, const struct path *); void (*bpf_token_free)(struct bpf_token *); int (*bpf_token_cmd)(const struct bpf_token *, enum bpf_cmd); int (*bpf_token_capable)(const struct bpf_token *, int); int (*locked_down)(enum lockdown_reason); int (*perf_event_open)(struct perf_event_attr *, int); int (*perf_event_alloc)(struct perf_event *); int (*perf_event_read)(struct perf_event *); int (*perf_event_write)(struct perf_event *); int (*uring_override_creds)(const struct cred *); int (*uring_sqpoll)(void); int (*uring_cmd)(struct io_uring_cmd *); void (*initramfs_populated)(void); int (*bdev_alloc_security)(struct block_device *); void (*bdev_free_security)(struct block_device *); int (*bdev_setintegrity)(struct block_device *, enum lsm_integrity_type, const void *, size_t); void *lsm_func_addr; }; struct security_hook_list { struct lsm_static_call *scalls; union security_list_options hook; const struct lsm_id *lsmid; }; struct seg6_local_lwt; struct seg6_local_lwtunnel_ops { int (*build_state)(struct seg6_local_lwt *, const void *, struct netlink_ext_ack *); void (*destroy_state)(struct seg6_local_lwt *); }; struct seg6_action_desc { int action; long unsigned int attrs; long unsigned int optattrs; int (*input)(struct sk_buff *, struct seg6_local_lwt *); int static_headroom; struct seg6_local_lwtunnel_ops slwt_ops; }; struct seg6_action_param { int (*parse)(struct nlattr **, struct seg6_local_lwt *, struct netlink_ext_ack *); int (*put)(struct sk_buff *, struct seg6_local_lwt *); int (*cmp)(struct seg6_local_lwt *, struct seg6_local_lwt *); void (*destroy)(struct seg6_local_lwt *); }; struct seg6_bpf_srh_state { local_lock_t bh_lock; struct ipv6_sr_hdr *srh; u16 hdrlen; bool valid; }; struct seg6_end_dt_info { enum seg6_end_dt_mode mode; struct net *net; int vrf_ifindex; int vrf_table; u16 family; }; struct seg6_flavors_info { __u32 flv_ops; __u8 lcblock_bits; __u8 lcnode_func_bits; }; struct seg6_iptunnel_encap { int mode; struct ipv6_sr_hdr srh[0]; }; struct seg6_local_counters { __u64 packets; __u64 bytes; __u64 errors; }; struct seg6_local_lwt { int action; struct ipv6_sr_hdr *srh; int table; struct in_addr nh4; struct in6_addr nh6; int iif; int oif; struct bpf_lwt_prog bpf; struct seg6_end_dt_info dt_info; struct seg6_flavors_info flv_info; struct pcpu_seg6_local_counters *pcpu_counters; int headroom; struct seg6_action_desc *desc; long unsigned int parsed_optattrs; }; struct seg6_lwt { struct dst_cache cache; struct seg6_iptunnel_encap tuninfo[0]; }; struct seg6_pernet_data { struct mutex lock; struct in6_addr *tun_src; }; struct sel_netif { struct list_head list; struct netif_security_struct nsec; struct callback_head callback_head; }; struct sel_netnode { struct netnode_security_struct nsec; struct list_head list; struct callback_head rcu; }; struct sel_netnode_bkt { unsigned int size; struct list_head list; }; struct sel_netport { struct netport_security_struct psec; struct list_head list; struct callback_head rcu; }; struct sel_netport_bkt { int size; struct list_head list; }; struct select_data { struct dentry *start; union { long int found; struct dentry *victim; }; struct list_head dispose; }; struct selinux_audit_data { u32 ssid; u32 tsid; u16 tclass; u32 requested; u32 audited; u32 denied; int result; }; struct selinux_audit_rule { u32 au_seqno; struct context___2 au_ctxt; }; struct selinux_avc { unsigned int avc_cache_threshold; struct avc_cache avc_cache; }; struct selinux_fs_info { struct dentry *bool_dir; unsigned int bool_num; char **bool_pending_names; int *bool_pending_values; struct dentry *class_dir; long unsigned int last_class_ino; bool policy_opened; struct dentry *policycap_dir; long unsigned int last_ino; struct super_block *sb; }; struct selinux_kernel_status { u32 version; u32 sequence; u32 enforcing; u32 policyload; u32 deny_unknown; }; struct selinux_policy; struct selinux_policy_convert_data; struct selinux_load_state { struct selinux_policy *policy; struct selinux_policy_convert_data *convert_data; }; struct selinux_mapping; struct selinux_map { struct selinux_mapping *mapping; u16 size; }; struct selinux_mapping { u16 value; u16 num_perms; u32 perms[32]; }; struct selinux_mnt_opts { u32 fscontext_sid; u32 context_sid; u32 rootcontext_sid; u32 defcontext_sid; }; struct sidtab; struct selinux_policy { struct sidtab *sidtab; struct policydb policydb; struct selinux_map map; u32 latest_granting; }; struct sidtab_convert_params { struct convert_context_args *args; struct sidtab *target; }; struct selinux_policy_convert_data { struct convert_context_args args; struct sidtab_convert_params sidtab_params; }; struct selinux_state { bool enforcing; bool initialized; bool policycap[9]; struct page *status_page; struct mutex status_lock; struct selinux_policy *policy; struct mutex policy_mutex; }; struct selnl_msg_policyload { __u32 seqno; }; struct selnl_msg_setenforce { __s32 val; }; struct sem { int semval; struct pid *sempid; spinlock_t lock; struct list_head pending_alter; struct list_head pending_const; time64_t sem_otime; long: 64; }; struct sem_array { struct kern_ipc_perm sem_perm; time64_t sem_ctime; struct list_head pending_alter; struct list_head pending_const; struct list_head list_id; int sem_nsems; int complex_count; unsigned int use_global_lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct sem sems[0]; }; struct sem_undo; struct sem_queue { struct list_head list; struct task_struct *sleeper; struct sem_undo *undo; struct pid *pid; int status; struct sembuf *sops; struct sembuf *blocking; int nsops; bool alter; bool dupsop; }; struct sem_undo_list; struct sem_undo { struct list_head list_proc; struct callback_head rcu; struct sem_undo_list *ulp; struct list_head list_id; int semid; short int semadj[0]; }; struct sem_undo_list { refcount_t refcnt; spinlock_t lock; struct list_head list_proc; }; struct semaphore_waiter { struct list_head list; struct task_struct *task; bool up; }; struct sembuf { short unsigned int sem_num; short int sem_op; short int sem_flg; }; struct semid64_ds { struct ipc64_perm sem_perm; __kernel_long_t sem_otime; __kernel_ulong_t __unused1; __kernel_long_t sem_ctime; __kernel_ulong_t __unused2; __kernel_ulong_t sem_nsems; __kernel_ulong_t __unused3; __kernel_ulong_t __unused4; }; struct semid_ds { struct ipc_perm sem_perm; __kernel_old_time_t sem_otime; __kernel_old_time_t sem_ctime; struct sem *sem_base; struct sem_queue *sem_pending; struct sem_queue **sem_pending_last; struct sem_undo *undo; short unsigned int sem_nsems; }; struct seminfo { int semmap; int semmni; int semmns; int semmnu; int semmsl; int semopm; int semume; int semusz; int semvmx; int semaem; }; struct send_signal_irq_work { struct irq_work irq_work; struct task_struct *task; u32 sig; enum pid_type type; }; struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); }; struct serial8250_config { const char *name; short unsigned int fifo_size; short unsigned int tx_loadsz; unsigned char fcr; unsigned char rxtrig_bytes[4]; unsigned int flags; }; struct serial_ctrl_device { struct device dev; struct ida port_ida; }; struct serial_icounter_struct { int cts; int dsr; int rng; int dcd; int rx; int tx; int frame; int overrun; int parity; int brk; int buf_overrun; int reserved[9]; }; struct serial_port_device { struct device dev; struct uart_port *port; unsigned int tx_enabled: 1; }; struct serial_private { struct pci_dev *dev; unsigned int nr; struct pci_serial_quirk *quirk; const struct pciserial_board *board; int line[0]; }; struct serial_struct { int type; int line; unsigned int port; int irq; int flags; int xmit_fifo_size; int custom_divisor; int baud_base; short unsigned int close_delay; char io_type; char reserved_char[1]; int hub6; short unsigned int closing_wait; short unsigned int closing_wait2; unsigned char *iomem_base; short unsigned int iomem_reg_shift; unsigned int port_high; long unsigned int iomap_base; }; struct serio_device_id { __u8 type; __u8 extra; __u8 id; __u8 proto; }; struct serio_driver; struct serio { void *port_data; char name[32]; char phys[32]; char firmware_id[128]; bool manual_bind; struct serio_device_id id; spinlock_t lock; int (*write)(struct serio *, unsigned char); int (*open)(struct serio *); void (*close)(struct serio *); int (*start)(struct serio *); void (*stop)(struct serio *); struct serio *parent; struct list_head child_node; struct list_head children; unsigned int depth; struct serio_driver *drv; struct mutex drv_mutex; struct device dev; struct list_head node; struct mutex *ps2_cmd_mutex; }; struct serio_driver { const char *description; const struct serio_device_id *id_table; bool manual_bind; void (*write_wakeup)(struct serio *); irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); int (*connect)(struct serio *, struct serio_driver *); int (*reconnect)(struct serio *); int (*fast_reconnect)(struct serio *); void (*disconnect)(struct serio *); void (*cleanup)(struct serio *); struct device_driver driver; }; struct serio_event { enum serio_event_type type; void *object; struct module *owner; struct list_head node; }; struct tty_struct; struct serport { struct tty_struct *tty; wait_queue_head_t wait; struct serio *serio; struct serio_device_id id; spinlock_t lock; long unsigned int flags; }; struct set_affinity_pending { refcount_t refs; unsigned int stop_pending; struct completion done; struct cpu_stop_work stop_work; struct migration_arg arg; }; struct set_mtrr_data { long unsigned int smp_base; long unsigned int smp_size; unsigned int smp_reg; mtrr_type smp_type; }; struct setup_indirect { __u32 type; __u32 reserved; __u64 len; __u64 addr; }; struct severity { u64 mask; u64 result; unsigned char sev; short unsigned int mcgmask; short unsigned int mcgres; unsigned char ser; unsigned char context; unsigned char excp; unsigned char covered; unsigned int cpu_vfm; unsigned char cpu_minstepping; unsigned char bank_lo; unsigned char bank_hi; char *msg; }; struct sg { struct ext4_group_info info; ext4_grpblk_t counters[18]; }; struct sg_append_table { struct sg_table sgt; struct scatterlist *prv; unsigned int total_nents; }; struct sg_page_iter { struct scatterlist *sg; unsigned int sg_pgoffset; unsigned int __nents; int __pg_advance; }; struct sg_dma_page_iter { struct sg_page_iter base; }; struct sg_io_v4 { __s32 guard; __u32 protocol; __u32 subprotocol; __u32 request_len; __u64 request; __u64 request_tag; __u32 request_attr; __u32 request_priority; __u32 request_extra; __u32 max_response_len; __u64 response; __u32 dout_iovec_count; __u32 dout_xfer_len; __u32 din_iovec_count; __u32 din_xfer_len; __u64 dout_xferp; __u64 din_xferp; __u32 timeout; __u32 flags; __u64 usr_ptr; __u32 spare_in; __u32 driver_status; __u32 transport_status; __u32 device_status; __u32 retry_delay; __u32 info; __u32 duration; __u32 response_len; __s32 din_resid; __s32 dout_resid; __u64 generated_tag; __u32 spare_out; __u32 padding; }; struct sg_mapping_iter { struct page *page; void *addr; size_t length; size_t consumed; struct sg_page_iter piter; unsigned int __offset; unsigned int __remaining; unsigned int __flags; }; struct sgi_volume { s8 name[8]; __be32 block_num; __be32 num_bytes; }; struct sgi_partition { __be32 num_blocks; __be32 first_block; __be32 type; }; struct sgi_disklabel { __be32 magic_mushroom; __be16 root_part_num; __be16 swap_part_num; s8 boot_file[16]; u8 _unused0[48]; struct sgi_volume volume[15]; struct sgi_partition partitions[16]; __be32 csum; __be32 _unused1; }; struct sha1_state { u32 state[5]; u64 count; u8 buffer[64]; }; struct sha256_state { u32 state[8]; u64 count; u8 buf[64]; }; struct sha3_state { u64 st[25]; unsigned int rsiz; unsigned int rsizw; unsigned int partial; u8 buf[144]; }; struct sha512_state { u64 state[8]; u64 count[2]; u8 buf[128]; }; struct shared_policy { struct rb_root root; rwlock_t lock; }; struct shash_alg { int (*init)(struct shash_desc *); int (*update)(struct shash_desc *, const u8 *, unsigned int); int (*final)(struct shash_desc *, u8 *); int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); int (*export)(struct shash_desc *, void *); int (*import)(struct shash_desc *, const void *); int (*setkey)(struct crypto_shash *, const u8 *, unsigned int); int (*init_tfm)(struct crypto_shash *); void (*exit_tfm)(struct crypto_shash *); int (*clone_tfm)(struct crypto_shash *, struct crypto_shash *); unsigned int descsize; union { struct { unsigned int digestsize; unsigned int statesize; struct crypto_alg base; }; struct hash_alg_common halg; }; }; struct shash_instance { void (*free)(struct shash_instance *); union { struct { char head[104]; struct crypto_instance base; } s; struct shash_alg alg; }; }; struct shm_file_data { int id; struct ipc_namespace *ns; struct file *file; const struct vm_operations_struct *vm_ops; }; struct shm_info { int used_ids; __kernel_ulong_t shm_tot; __kernel_ulong_t shm_rss; __kernel_ulong_t shm_swp; __kernel_ulong_t swap_attempts; __kernel_ulong_t swap_successes; }; struct shmem_falloc { wait_queue_head_t *waitq; long unsigned int start; long unsigned int next; long unsigned int nr_falloced; long unsigned int nr_unswapped; }; struct shmem_inode_info { spinlock_t lock; unsigned int seals; long unsigned int flags; long unsigned int alloced; long unsigned int swapped; union { struct offset_ctx dir_offsets; struct { struct list_head shrinklist; struct list_head swaplist; }; }; struct timespec64 i_crtime; struct shared_policy policy; struct simple_xattrs xattrs; long unsigned int fallocend; unsigned int fsflags; atomic_t stop_eviction; struct inode vfs_inode; }; struct shmem_quota_limits { qsize_t usrquota_bhardlimit; qsize_t usrquota_ihardlimit; qsize_t grpquota_bhardlimit; qsize_t grpquota_ihardlimit; }; struct shmem_options { long long unsigned int blocks; long long unsigned int inodes; struct mempolicy *mpol; kuid_t uid; kgid_t gid; umode_t mode; bool full_inums; int huge; int seen; bool noswap; short unsigned int quota_types; struct shmem_quota_limits qlimits; }; struct shmem_sb_info { long unsigned int max_blocks; struct percpu_counter used_blocks; long unsigned int max_inodes; long unsigned int free_ispace; raw_spinlock_t stat_lock; umode_t mode; unsigned char huge; kuid_t uid; kgid_t gid; bool full_inums; bool noswap; ino_t next_ino; ino_t *ino_batch; struct mempolicy *mpol; spinlock_t shrinklist_lock; struct list_head shrinklist; long unsigned int shrinklist_len; struct shmem_quota_limits qlimits; }; struct shmid64_ds { struct ipc64_perm shm_perm; __kernel_size_t shm_segsz; long int shm_atime; long int shm_dtime; long int shm_ctime; __kernel_pid_t shm_cpid; __kernel_pid_t shm_lpid; long unsigned int shm_nattch; long unsigned int __unused4; long unsigned int __unused5; }; struct shmid_ds { struct ipc_perm shm_perm; int shm_segsz; __kernel_old_time_t shm_atime; __kernel_old_time_t shm_dtime; __kernel_old_time_t shm_ctime; __kernel_ipc_pid_t shm_cpid; __kernel_ipc_pid_t shm_lpid; short unsigned int shm_nattch; short unsigned int shm_unused; void *shm_unused2; void *shm_unused3; }; struct shmid_kernel { struct kern_ipc_perm shm_perm; struct file *shm_file; long unsigned int shm_nattch; long unsigned int shm_segsz; time64_t shm_atim; time64_t shm_dtim; time64_t shm_ctim; struct pid *shm_cprid; struct pid *shm_lprid; struct ucounts *mlock_ucounts; struct task_struct *shm_creator; struct list_head shm_clist; struct ipc_namespace *ns; long: 64; long: 64; long: 64; }; struct shminfo { int shmmax; int shmmin; int shmmni; int shmseg; int shmall; }; struct shminfo64 { long unsigned int shmmax; long unsigned int shmmin; long unsigned int shmmni; long unsigned int shmseg; long unsigned int shmall; long unsigned int __unused1; long unsigned int __unused2; long unsigned int __unused3; long unsigned int __unused4; }; struct show_busy_params { struct seq_file *m; struct blk_mq_hw_ctx *hctx; }; struct shrink_control { gfp_t gfp_mask; int nid; long unsigned int nr_to_scan; long unsigned int nr_scanned; struct mem_cgroup *memcg; }; struct shrinker { long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *); long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *); long int batch; int seeks; unsigned int flags; refcount_t refcount; struct completion done; struct callback_head rcu; void *private_data; struct list_head list; int id; atomic_long_t *nr_deferred; }; struct shrinker_info_unit; struct shrinker_info { struct callback_head rcu; int map_nr_max; struct shrinker_info_unit *unit[0]; }; struct shrinker_info_unit { atomic_long_t nr_deferred[64]; long unsigned int map[1]; }; struct sidtab_node_inner; struct sidtab_node_leaf; union sidtab_entry_inner { struct sidtab_node_inner *ptr_inner; struct sidtab_node_leaf *ptr_leaf; }; struct sidtab_str_cache; struct sidtab_entry { u32 sid; u32 hash; struct context___2 context; struct sidtab_str_cache *cache; struct hlist_node list; }; struct sidtab_isid_entry { int set; struct sidtab_entry entry; }; struct sidtab { union sidtab_entry_inner roots[4]; u32 count; struct sidtab_convert_params *convert; bool frozen; spinlock_t lock; u32 cache_free_slots; struct list_head cache_lru_list; spinlock_t cache_lock; struct sidtab_isid_entry isids[27]; struct hlist_head context_to_sid[512]; }; struct sidtab_node_inner { union sidtab_entry_inner entries[512]; }; struct sidtab_node_leaf { struct sidtab_entry entries[39]; }; struct sidtab_str_cache { struct callback_head rcu_member; struct list_head lru_member; struct sidtab_entry *parent; u32 len; char str[0]; }; typedef struct sigevent sigevent_t; struct sighand_struct { spinlock_t siglock; refcount_t count; wait_queue_head_t signalfd_wqh; struct k_sigaction action[64]; }; struct sigpending { struct list_head list; sigset_t signal; }; struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; }; struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; }; struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; }; struct taskstats; struct tty_audit_buf; struct signal_struct { refcount_t sigcnt; atomic_t live; int nr_threads; int quick_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; struct hlist_head multiprocess; int group_exit_code; int notify_count; struct task_struct *group_exec_task; int group_stop_count; unsigned int flags; struct core_state *core_state; unsigned int is_child_subreaper: 1; unsigned int has_child_subreaper: 1; unsigned int next_posix_timer_id; struct hlist_head posix_timers; struct hrtimer real_timer; ktime_t it_real_incr; struct cpu_itimer it[2]; struct thread_group_cputimer cputimer; struct posix_cputimers posix_cputimers; struct pid *pids[4]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; seqlock_t stats_lock; u64 utime; u64 stime; u64 cutime; u64 cstime; u64 gtime; u64 cgtime; struct prev_cputime prev_cputime; long unsigned int nvcsw; long unsigned int nivcsw; long unsigned int cnvcsw; long unsigned int cnivcsw; long unsigned int min_flt; long unsigned int maj_flt; long unsigned int cmin_flt; long unsigned int cmaj_flt; long unsigned int inblock; long unsigned int oublock; long unsigned int cinblock; long unsigned int coublock; long unsigned int maxrss; long unsigned int cmaxrss; struct task_io_accounting ioac; long long unsigned int sum_sched_runtime; struct rlimit rlim[16]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short int oom_score_adj; short int oom_score_adj_min; struct mm_struct *oom_mm; struct mutex cred_guard_mutex; struct rw_semaphore exec_update_lock; }; struct signalfd_ctx { sigset_t sigmask; }; struct signalfd_siginfo { __u32 ssi_signo; __s32 ssi_errno; __s32 ssi_code; __u32 ssi_pid; __u32 ssi_uid; __s32 ssi_fd; __u32 ssi_tid; __u32 ssi_band; __u32 ssi_overrun; __u32 ssi_trapno; __s32 ssi_status; __s32 ssi_int; __u64 ssi_ptr; __u64 ssi_utime; __u64 ssi_stime; __u64 ssi_addr; __u16 ssi_addr_lsb; __u16 __pad2; __s32 ssi_syscall; __u64 ssi_call_addr; __u32 ssi_arch; __u8 __pad[28]; }; struct sigpool_entry { struct crypto_ahash *hash; const char *alg; struct kref kref; uint16_t needs_key: 1; uint16_t reserved: 15; }; struct sigpool_scratch { local_lock_t bh_lock; void *pad; }; struct sigqueue { struct list_head list; int flags; kernel_siginfo_t info; struct ucounts *ucounts; }; struct sigset_argpack { sigset_t *p; size_t size; }; struct simple_attr { int (*get)(void *, u64 *); int (*set)(void *, u64); char get_buf[24]; char set_buf[24]; void *data; const char *fmt; struct mutex mutex; }; struct simple_transaction_argresp { ssize_t size; char data[0]; }; struct simple_xattr { struct rb_node rb_node; char *name; size_t size; char value[0]; }; struct simplefb_platform_data { u32 width; u32 height; u32 stride; const char *format; }; struct sioc_sg_req { struct in_addr src; struct in_addr grp; long unsigned int pktcnt; long unsigned int bytecnt; long unsigned int wrong_if; }; struct sioc_vif_req { vifi_t vifi; long unsigned int icount; long unsigned int ocount; long unsigned int ibytes; long unsigned int obytes; }; struct sit_net { struct ip_tunnel *tunnels_r_l[16]; struct ip_tunnel *tunnels_r[16]; struct ip_tunnel *tunnels_l[16]; struct ip_tunnel *tunnels_wc[1]; struct ip_tunnel **tunnels[4]; struct net_device *fb_tunnel_dev; }; struct sk_buff__safe_rcu_or_null { struct sock *sk; }; struct sk_buff_fclones { struct sk_buff skb1; struct sk_buff skb2; refcount_t fclone_ref; }; struct sk_filter { refcount_t refcnt; struct callback_head rcu; struct bpf_prog *prog; }; struct strp_stats { long long unsigned int msgs; long long unsigned int bytes; unsigned int mem_fail; unsigned int need_more_hdr; unsigned int msg_too_big; unsigned int msg_timeouts; unsigned int bad_hdr_len; }; struct strparser; struct strp_callbacks { int (*parse_msg)(struct strparser *, struct sk_buff *); void (*rcv_msg)(struct strparser *, struct sk_buff *); int (*read_sock_done)(struct strparser *, int); void (*abort_parser)(struct strparser *, int); void (*lock)(struct strparser *); void (*unlock)(struct strparser *); }; struct strparser { struct sock *sk; u32 stopped: 1; u32 paused: 1; u32 aborted: 1; u32 interrupted: 1; u32 unrecov_intr: 1; struct sk_buff **skb_nextp; struct sk_buff *skb_head; unsigned int need_bytes; struct delayed_work msg_timer_work; struct work_struct work; struct strp_stats stats; struct strp_callbacks cb; }; struct sk_psock_work_state { u32 len; u32 off; }; struct sk_psock { struct sock *sk; struct sock *sk_redir; u32 apply_bytes; u32 cork_bytes; u32 eval; bool redir_ingress; struct sk_msg *cork; struct sk_psock_progs progs; struct strparser strp; struct sk_buff_head ingress_skb; struct list_head ingress_msg; spinlock_t ingress_lock; long unsigned int state; struct list_head link; spinlock_t link_lock; refcount_t refcnt; void (*saved_unhash)(struct sock *); void (*saved_destroy)(struct sock *); void (*saved_close)(struct sock *, long int); void (*saved_write_space)(struct sock *); void (*saved_data_ready)(struct sock *); int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); struct proto *sk_proto; struct mutex work_mutex; struct sk_psock_work_state work_state; struct delayed_work work; struct sock *sk_pair; struct rcu_work rwork; }; struct sk_psock_link { struct list_head list; struct bpf_map *map; void *link_raw; }; struct sk_security_struct { enum { NLBL_UNSET = 0, NLBL_REQUIRE = 1, NLBL_LABELED = 2, NLBL_REQSKB = 3, NLBL_CONNLABELED = 4, } nlbl_state; struct netlbl_lsm_secattr *nlbl_secattr; u32 sid; u32 peer_sid; u16 sclass; enum { SCTP_ASSOC_UNSET = 0, SCTP_ASSOC_SET = 1, } sctp_assoc_state; }; struct tls_msg { u8 control; }; struct sk_skb_cb { unsigned char data[20]; unsigned char pad[4]; struct _strp_msg strp; struct tls_msg tls; u64 temp_reg; }; struct skb_checksum_ops { __wsum (*update)(const void *, int, __wsum); __wsum (*combine)(__wsum, __wsum, int, int); }; struct skb_ext { refcount_t refcnt; u8 offset[3]; u8 chunks; char data[0]; }; struct skb_frag { netmem_ref netmem; unsigned int len; unsigned int offset; }; typedef struct skb_frag skb_frag_t; struct skb_free_array { unsigned int skb_count; void *skb_array[16]; }; struct skb_gso_cb { union { int mac_offset; int data_offset; }; int encap_level; __wsum csum; __u16 csum_start; }; struct skb_seq_state { __u32 lower_offset; __u32 upper_offset; __u32 frag_idx; __u32 stepped_offset; struct sk_buff *root_skb; struct sk_buff *cur_skb; __u8 *frag_data; __u32 frag_off; }; struct skb_shared_hwtstamps { union { ktime_t hwtstamp; void *netdev_data; }; }; struct xsk_tx_metadata_compl { __u64 *tx_timestamp; }; struct skb_shared_info { __u8 flags; __u8 meta_len; __u8 nr_frags; __u8 tx_flags; short unsigned int gso_size; short unsigned int gso_segs; struct sk_buff *frag_list; union { struct skb_shared_hwtstamps hwtstamps; struct xsk_tx_metadata_compl xsk_meta; }; unsigned int gso_type; u32 tskey; atomic_t dataref; unsigned int xdp_frags_size; void *destructor_arg; skb_frag_t frags[17]; }; struct skcipher_alg { int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); int (*encrypt)(struct skcipher_request *); int (*decrypt)(struct skcipher_request *); int (*export)(struct skcipher_request *, void *); int (*import)(struct skcipher_request *, const void *); int (*init)(struct crypto_skcipher *); void (*exit)(struct crypto_skcipher *); unsigned int walksize; union { struct { unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; unsigned int chunksize; unsigned int statesize; struct crypto_alg base; }; struct skcipher_alg_common co; }; }; struct skcipher_ctx_simple { struct crypto_cipher *cipher; }; struct skcipher_instance { void (*free)(struct skcipher_instance *); union { struct { char head[88]; struct crypto_instance base; } s; struct skcipher_alg alg; }; }; struct skcipher_walk { union { struct { struct page *page; long unsigned int offset; } phys; struct { u8 *page; void *addr; } virt; } src; union { struct { struct page *page; long unsigned int offset; } phys; struct { u8 *page; void *addr; } virt; } dst; struct scatter_walk in; unsigned int nbytes; struct scatter_walk out; unsigned int total; struct list_head buffers; u8 *page; u8 *buffer; u8 *oiv; void *iv; unsigned int ivsize; int flags; unsigned int blocksize; unsigned int stride; unsigned int alignmask; }; struct skcipher_walk_buffer { struct list_head entry; struct scatter_walk dst; unsigned int len; u8 *data; u8 buffer[0]; }; struct sku_microcode { u32 vfm; u8 stepping; u32 microcode; }; struct slab { long unsigned int __page_flags; struct kmem_cache *slab_cache; union { struct { union { struct list_head slab_list; struct { struct slab *next; int slabs; }; }; union { struct { void *freelist; union { long unsigned int counters; struct { unsigned int inuse: 16; unsigned int objects: 15; unsigned int frozen: 1; }; }; }; freelist_aba_t freelist_counter; }; }; struct callback_head callback_head; }; unsigned int __page_type; atomic_t __page_refcount; long unsigned int obj_exts; }; struct slab_attribute { struct attribute attr; ssize_t (*show)(struct kmem_cache *, char *); ssize_t (*store)(struct kmem_cache *, const char *, size_t); }; struct slabinfo { long unsigned int active_objs; long unsigned int num_objs; long unsigned int active_slabs; long unsigned int num_slabs; long unsigned int shared_avail; unsigned int limit; unsigned int batchcount; unsigned int shared; unsigned int objects_per_slab; unsigned int cache_order; }; struct slabobj_ext { struct obj_cgroup *objcg; }; struct tlb_slave_info { u32 head; u32 load; }; struct slave { struct net_device *dev; struct bonding *bond; int delay; long unsigned int last_link_up; long unsigned int last_tx; long unsigned int last_rx; long unsigned int target_last_arp_rx[16]; s8 link; s8 link_new_state; u8 backup: 1; u8 inactive: 1; u8 rx_disabled: 1; u8 should_notify: 1; u8 should_notify_link: 1; u8 duplex; u32 original_mtu; u32 link_failure_count; u32 speed; u16 queue_id; u8 perm_hwaddr[32]; int prio; struct ad_slave_info *ad_info; struct tlb_slave_info tlb_info; struct delayed_work notify_work; struct kobject kobj; struct rtnl_link_stats64 slave_stats; }; struct slave_attribute { struct attribute attr; ssize_t (*show)(struct slave *, char *); }; struct slub_flush_work { struct work_struct work; struct kmem_cache *s; bool skip; }; struct smca_hwid; struct smca_bank { const struct smca_hwid *hwid; u32 id; u8 sysfs_id; }; struct smca_hwid { unsigned int bank_type; u32 hwid_mcatype; }; struct smp_alt_module { struct module *mod; char *name; const s32 *locks; const s32 *locks_end; u8 *text; u8 *text_end; struct list_head next; }; struct smp_call_on_cpu_struct { struct work_struct work; struct completion done; int (*func)(void *); void *data; int ret; int cpu; }; struct smp_hotplug_thread { struct task_struct **store; struct list_head list; int (*thread_should_run)(unsigned int); void (*thread_fn)(unsigned int); void (*create)(unsigned int); void (*setup)(unsigned int); void (*cleanup)(unsigned int, bool); void (*park)(unsigned int); void (*unpark)(unsigned int); bool selfparking; const char *thread_comm; }; struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned int); void (*smp_cpus_done)(unsigned int); void (*stop_other_cpus)(int); void (*crash_stop_other_cpus)(void); void (*smp_send_reschedule)(int); void (*cleanup_dead_cpu)(unsigned int); void (*poll_sync_state)(void); int (*kick_ap_alive)(unsigned int, struct task_struct *); int (*cpu_disable)(void); void (*cpu_die)(unsigned int); void (*play_dead)(void); void (*stop_this_cpu)(void); void (*send_call_func_ipi)(const struct cpumask *); void (*send_call_func_single_ipi)(int); }; struct smpboot_thread_data { unsigned int cpu; unsigned int status; struct smp_hotplug_thread *ht; }; struct snmp_mib { const char *name; int entry; }; struct so_timestamping { int flags; int bind_phc; }; struct sock_bh_locked { struct sock *sock; local_lock_t bh_lock; }; struct sock_diag_handler { struct module *owner; __u8 family; int (*dump)(struct sk_buff *, struct nlmsghdr *); int (*get_info)(struct sk_buff *, struct sock *); int (*destroy)(struct sk_buff *, struct nlmsghdr *); }; struct sock_diag_inet_compat { struct module *owner; int (*fn)(struct sk_buff *, struct nlmsghdr *); }; struct sock_diag_req { __u8 sdiag_family; __u8 sdiag_protocol; }; struct sock_ee_data_rfc4884 { __u16 len; __u8 flags; __u8 reserved; }; struct sock_extended_err { __u32 ee_errno; __u8 ee_origin; __u8 ee_type; __u8 ee_code; __u8 ee_pad; __u32 ee_info; union { __u32 ee_data; struct sock_ee_data_rfc4884 ee_rfc4884; }; }; struct sock_exterr_skb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; struct sock_extended_err ee; u16 addr_offset; __be16 port; u8 opt_stats: 1; u8 unused: 7; }; struct sock_fprog { short unsigned int len; struct sock_filter *filter; }; struct sock_fprog_kern { u16 len; struct sock_filter *filter; }; struct sock_hash_seq_info { struct bpf_map *map; struct bpf_shtab *htab; u32 bucket_id; }; struct sock_map_seq_info { struct bpf_map *map; struct sock *sk; u32 index; }; struct sock_reuseport { struct callback_head rcu; u16 max_socks; u16 num_socks; u16 num_closed_socks; u16 incoming_cpu; unsigned int synq_overflow_ts; unsigned int reuseport_id; unsigned int bind_inany: 1; unsigned int has_conns: 1; struct bpf_prog *prog; struct sock *socks[0]; }; struct sock_skb_cb { u32 dropcount; }; struct sock_txtime { __kernel_clockid_t clockid; __u32 flags; }; struct sockaddr_alg_new { __u16 salg_family; __u8 salg_type[14]; __u32 salg_feat; __u32 salg_mask; __u8 salg_name[0]; }; struct sockaddr_nl { __kernel_sa_family_t nl_family; short unsigned int nl_pad; __u32 nl_pid; __u32 nl_groups; }; struct sockaddr_un { __kernel_sa_family_t sun_family; char sun_path[108]; }; struct sockaddr_vm { __kernel_sa_family_t svm_family; short unsigned int svm_reserved1; unsigned int svm_port; unsigned int svm_cid; __u8 svm_flags; unsigned char svm_zero[3]; }; struct sockaddr_xdp { __u16 sxdp_family; __u16 sxdp_flags; __u32 sxdp_ifindex; __u32 sxdp_queue_id; __u32 sxdp_shared_umem_fd; }; struct socket_wq { wait_queue_head_t wait; struct fasync_struct *fasync_list; long unsigned int flags; struct callback_head rcu; long: 64; long: 64; }; struct socket { socket_state state; short int type; long unsigned int flags; struct file *file; struct sock *sk; const struct proto_ops *ops; long: 64; long: 64; long: 64; struct socket_wq wq; }; struct socket__safe_trusted_or_null { struct sock *sk; }; struct socket_alloc { struct socket socket; struct inode vfs_inode; long: 64; }; struct sockmap_link { struct bpf_link link; struct bpf_map *map; enum bpf_attach_type attach_type; }; struct softirq_action { void (*action)(void); }; struct softnet_data { struct list_head poll_list; struct sk_buff_head process_queue; local_lock_t process_queue_bh_lock; unsigned int processed; unsigned int time_squeeze; struct softnet_data *rps_ipi_list; unsigned int received_rps; bool in_net_rx_action; bool in_napi_threaded_poll; struct sd_flow_limit *flow_limit; struct Qdisc *output_queue; struct Qdisc **output_queue_tailp; struct sk_buff *completion_queue; struct netdev_xmit xmit; long: 64; long: 64; long: 64; long: 64; long: 64; unsigned int input_queue_head; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; call_single_data_t csd; struct softnet_data *rps_ipi_next; unsigned int cpu; unsigned int input_queue_tail; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; long: 64; long: 64; long: 64; long: 64; long: 64; atomic_t dropped; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t defer_lock; int defer_count; int defer_ipi_scheduled; struct sk_buff *defer_list; long: 64; long: 64; call_single_data_t defer_csd; }; struct software_node { const char *name; const struct software_node *parent; const struct property_entry *properties; }; struct solaris_x86_slice { __le16 s_tag; __le16 s_flag; __le32 s_start; __le32 s_size; }; struct solaris_x86_vtoc { unsigned int v_bootinfo[3]; __le32 v_sanity; __le32 v_version; char v_volume[8]; __le16 v_sectorsz; __le16 v_nparts; unsigned int v_reserved[10]; struct solaris_x86_slice v_slice[16]; unsigned int timestamp[16]; char v_asciilabel[128]; }; struct sp_node { struct rb_node nd; long unsigned int start; long unsigned int end; struct mempolicy *policy; }; struct space_resv { __s16 l_type; __s16 l_whence; __s64 l_start; __s64 l_len; __s32 l_sysid; __u32 l_pid; __s32 l_pad[4]; }; struct splice_desc { size_t total_len; unsigned int len; unsigned int flags; union { void *userptr; struct file *file; void *data; } u; void (*splice_eof)(struct splice_desc *); loff_t pos; loff_t *opos; size_t num_spliced; bool need_wakeup; }; struct splice_pipe_desc { struct page **pages; struct partial_page *partial; int nr_pages; unsigned int nr_pages_max; const struct pipe_buf_operations *ops; void (*spd_release)(struct splice_pipe_desc *, unsigned int); }; struct squashfs_super_block { __le32 s_magic; __le32 inodes; __le32 mkfs_time; __le32 block_size; __le32 fragments; __le16 compression; __le16 block_log; __le16 flags; __le16 no_ids; __le16 s_major; __le16 s_minor; __le64 root_inode; __le64 bytes_used; __le64 id_table_start; __le64 xattr_id_table_start; __le64 inode_table_start; __le64 directory_table_start; __le64 fragment_table_start; __le64 lookup_table_start; }; struct sr6_tlv { __u8 type; __u8 len; __u8 data[0]; }; struct srcu_node; struct srcu_data { atomic_long_t srcu_lock_count[2]; atomic_long_t srcu_unlock_count[2]; int srcu_nmi_safety; long: 64; long: 64; long: 64; spinlock_t lock; struct rcu_segcblist srcu_cblist; long unsigned int srcu_gp_seq_needed; long unsigned int srcu_gp_seq_needed_exp; bool srcu_cblist_invoking; struct timer_list delay_work; struct work_struct work; struct callback_head srcu_barrier_head; struct srcu_node *mynode; long unsigned int grpmask; int cpu; struct srcu_struct *ssp; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct srcu_node { spinlock_t lock; long unsigned int srcu_have_cbs[4]; long unsigned int srcu_data_have_cbs[4]; long unsigned int srcu_gp_seq_needed_exp; struct srcu_node *srcu_parent; int grplo; int grphi; }; struct srcu_usage { struct srcu_node *node; struct srcu_node *level[3]; int srcu_size_state; struct mutex srcu_cb_mutex; spinlock_t lock; struct mutex srcu_gp_mutex; long unsigned int srcu_gp_seq; long unsigned int srcu_gp_seq_needed; long unsigned int srcu_gp_seq_needed_exp; long unsigned int srcu_gp_start; long unsigned int srcu_last_gp_end; long unsigned int srcu_size_jiffies; long unsigned int srcu_n_lock_retries; long unsigned int srcu_n_exp_nodelay; bool sda_is_static; long unsigned int srcu_barrier_seq; struct mutex srcu_barrier_mutex; struct completion srcu_barrier_completion; atomic_t srcu_barrier_cpu_cnt; long unsigned int reschedule_jiffies; long unsigned int reschedule_count; struct delayed_work work; struct srcu_struct *srcu_ssp; }; struct srcu_notifier_head { struct mutex mutex; struct srcu_usage srcuu; struct srcu_struct srcu; struct notifier_block *head; }; struct ssb_state { struct ssb_state *shared_state; raw_spinlock_t lock; unsigned int disable_state; long unsigned int local_state; }; struct stack_entry { struct trace_entry ent; int size; long unsigned int caller[0]; }; struct stack_frame { struct stack_frame *next_frame; long unsigned int return_address; }; struct stack_frame_user { const void *next_fp; long unsigned int ret_addr; }; struct stack_info { enum stack_type type; long unsigned int *begin; long unsigned int *end; long unsigned int *next_sp; }; struct stack_map_bucket { struct pcpu_freelist_node fnode; u32 hash; u32 nr; u64 data[0]; }; struct stack_record { struct list_head hash_list; u32 hash; u32 size; union handle_parts handle; refcount_t count; union { long unsigned int entries[64]; struct { struct list_head free_list; long unsigned int rcu_state; }; }; }; struct stacktrace_cookie { long unsigned int *store; unsigned int size; unsigned int skip; unsigned int len; }; struct stashed_operations { void (*put_data)(void *); int (*init_inode)(struct inode *, void *); }; struct stat { __kernel_ulong_t st_dev; __kernel_ulong_t st_ino; __kernel_ulong_t st_nlink; unsigned int st_mode; unsigned int st_uid; unsigned int st_gid; unsigned int __pad0; __kernel_ulong_t st_rdev; __kernel_long_t st_size; __kernel_long_t st_blksize; __kernel_long_t st_blocks; __kernel_ulong_t st_atime; __kernel_ulong_t st_atime_nsec; __kernel_ulong_t st_mtime; __kernel_ulong_t st_mtime_nsec; __kernel_ulong_t st_ctime; __kernel_ulong_t st_ctime_nsec; __kernel_long_t __unused[3]; }; struct stat_node { struct rb_node node; void *stat; }; struct tracer_stat; struct stat_session { struct list_head session_list; struct tracer_stat *ts; struct rb_root stat_root; struct mutex stat_mutex; struct dentry *file; }; struct statfs { __kernel_long_t f_type; __kernel_long_t f_bsize; __kernel_long_t f_blocks; __kernel_long_t f_bfree; __kernel_long_t f_bavail; __kernel_long_t f_files; __kernel_long_t f_ffree; __kernel_fsid_t f_fsid; __kernel_long_t f_namelen; __kernel_long_t f_frsize; __kernel_long_t f_flags; __kernel_long_t f_spare[4]; }; struct statfs64 { __kernel_long_t f_type; __kernel_long_t f_bsize; __u64 f_blocks; __u64 f_bfree; __u64 f_bavail; __u64 f_files; __u64 f_ffree; __kernel_fsid_t f_fsid; __kernel_long_t f_namelen; __kernel_long_t f_frsize; __kernel_long_t f_flags; __kernel_long_t f_spare[4]; }; struct static_call_mod; struct static_call_key { void *func; union { long unsigned int type; struct static_call_mod *mods; struct static_call_site *sites; }; }; struct static_call_mod { struct static_call_mod *next; struct module *mod; struct static_call_site *sites; }; struct static_call_site { s32 addr; s32 key; }; struct static_call_tramp_key { s32 tramp; s32 key; }; struct static_key_mod; struct static_key { atomic_t enabled; union { long unsigned int type; struct jump_entry *entries; struct static_key_mod *next; }; }; struct static_key_deferred { struct static_key key; long unsigned int timeout; struct delayed_work work; }; struct static_key_false { struct static_key key; }; struct static_key_false_deferred { struct static_key_false key; long unsigned int timeout; struct delayed_work work; }; struct static_key_mod { struct static_key_mod *next; struct jump_entry *entries; struct module *mod; }; struct static_key_true { struct static_key key; }; struct stats_reply_data { struct ethnl_reply_data base; union { struct { struct ethtool_eth_phy_stats phy_stats; struct ethtool_eth_mac_stats mac_stats; struct ethtool_eth_ctrl_stats ctrl_stats; struct ethtool_rmon_stats rmon_stats; }; struct { struct ethtool_eth_phy_stats phy_stats; struct ethtool_eth_mac_stats mac_stats; struct ethtool_eth_ctrl_stats ctrl_stats; struct ethtool_rmon_stats rmon_stats; } stats; }; const struct ethtool_rmon_hist_range *rmon_ranges; }; struct stats_req_info { struct ethnl_req_info base; long unsigned int stat_mask[1]; enum ethtool_mac_stats_src src; }; struct statx_timestamp { __s64 tv_sec; __u32 tv_nsec; __s32 __reserved; }; struct statx { __u32 stx_mask; __u32 stx_blksize; __u64 stx_attributes; __u32 stx_nlink; __u32 stx_uid; __u32 stx_gid; __u16 stx_mode; __u16 __spare0[1]; __u64 stx_ino; __u64 stx_size; __u64 stx_blocks; __u64 stx_attributes_mask; struct statx_timestamp stx_atime; struct statx_timestamp stx_btime; struct statx_timestamp stx_ctime; struct statx_timestamp stx_mtime; __u32 stx_rdev_major; __u32 stx_rdev_minor; __u32 stx_dev_major; __u32 stx_dev_minor; __u64 stx_mnt_id; __u32 stx_dio_mem_align; __u32 stx_dio_offset_align; __u64 stx_subvol; __u32 stx_atomic_write_unit_min; __u32 stx_atomic_write_unit_max; __u32 stx_atomic_write_segments_max; __u32 __spare1[1]; __u64 __spare3[9]; }; struct stop_event_data { struct perf_event *event; unsigned int restart; }; struct strarray { char **array; size_t n; }; struct strset_info { bool per_dev; bool free_strings; unsigned int count; const char (*strings)[32]; }; struct strset_reply_data { struct ethnl_reply_data base; struct strset_info sets[21]; }; struct strset_req_info { struct ethnl_req_info base; u32 req_ids; bool counts_only; }; struct subflow_send_info { struct sock *ssk; u64 linger_time; }; struct subprocess_info { struct work_struct work; struct completion *complete; const char *path; char **argv; char **envp; int wait; int retval; int (*init)(struct subprocess_info *, struct cred *); void (*cleanup)(struct subprocess_info *); void *data; }; struct subsys_dev_iter { struct klist_iter ki; const struct device_type *type; }; struct subsys_interface { const char *name; const struct bus_type *subsys; struct list_head node; int (*add_dev)(struct device *, struct subsys_interface *); void (*remove_dev)(struct device *, struct subsys_interface *); }; struct subsys_private { struct kset subsys; struct kset *devices_kset; struct list_head interfaces; struct mutex mutex; struct kset *drivers_kset; struct klist klist_devices; struct klist klist_drivers; struct blocking_notifier_head bus_notifier; unsigned int drivers_autoprobe: 1; const struct bus_type *bus; struct device *dev_root; struct kset glue_dirs; const struct class *class; struct lock_class_key lock_key; }; struct sugov_policy; struct sugov_cpu { struct update_util_data update_util; struct sugov_policy *sg_policy; unsigned int cpu; bool iowait_boost_pending; unsigned int iowait_boost; u64 last_update; long unsigned int util; long unsigned int bw_min; long unsigned int saved_idle_calls; }; struct sugov_tunables; struct sugov_policy { struct cpufreq_policy *policy; struct sugov_tunables *tunables; struct list_head tunables_hook; raw_spinlock_t update_lock; u64 last_freq_update_time; s64 freq_update_delay_ns; unsigned int next_freq; unsigned int cached_raw_freq; struct irq_work irq_work; struct kthread_work work; struct mutex work_lock; struct kthread_worker worker; struct task_struct *thread; bool work_in_progress; bool limits_changed; bool need_freq_update; }; struct sugov_tunables { struct gov_attr_set attr_set; unsigned int rate_limit_us; }; struct sun_info { __be16 id; __be16 flags; }; struct sun_vtoc { __be32 version; char volume[8]; __be16 nparts; struct sun_info infos[8]; __be16 padding; __be32 bootinfo[3]; __be32 sanity; __be32 reserved[10]; __be32 timestamp[8]; }; struct sun_partition { __be32 start_cylinder; __be32 num_sectors; }; struct sun_disklabel { unsigned char info[128]; struct sun_vtoc vtoc; __be32 write_reinstruct; __be32 read_reinstruct; unsigned char spare[148]; __be16 rspeed; __be16 pcylcount; __be16 sparecyl; __be16 obs1; __be16 obs2; __be16 ilfact; __be16 ncyl; __be16 nacyl; __be16 ntrks; __be16 nsect; __be16 obs3; __be16 obs4; struct sun_partition partitions[8]; __be16 magic; __be16 csum; }; struct mtd_info; struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; long unsigned int s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; long unsigned int s_flags; long unsigned int s_iflags; long unsigned int s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler * const *s_xattr; const struct fsverity_operations *s_vop; struct hlist_bl_head s_roots; struct list_head s_mounts; struct block_device *s_bdev; struct file *s_bdev_file; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; void *s_fs_info; u32 s_time_gran; time64_t s_time_min; time64_t s_time_max; u32 s_fsnotify_mask; struct fsnotify_sb_info *s_fsnotify_info; char s_id[32]; uuid_t s_uuid; u8 s_uuid_len; char s_sysfs_name[37]; unsigned int s_max_links; struct mutex s_vfs_rename_mutex; const char *s_subtype; const struct dentry_operations *s_d_op; struct shrinker *s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; errseq_t s_wb_err; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; long: 64; long: 64; long: 64; long: 64; }; struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*free_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *, enum freeze_holder); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *, enum freeze_holder); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); void (*shutdown)(struct super_block *); }; struct superblock_security_struct { u32 sid; u32 def_sid; u32 mntpoint_sid; short unsigned int behavior; short unsigned int flags; struct mutex lock; struct list_head isec_head; spinlock_t isec_lock; }; struct suspend_stats { unsigned int step_failures[8]; unsigned int success; unsigned int fail; int last_failed_dev; char failed_devs[80]; int last_failed_errno; int errno[2]; int last_failed_step; u64 last_hw_sleep; u64 total_hw_sleep; u64 max_hw_sleep; enum suspend_stat_step failed_steps[2]; }; struct swait_queue { struct task_struct *task; struct list_head task_list; }; struct swap_cgroup { short unsigned int id; }; struct swap_cgroup_ctrl { struct page **map; long unsigned int length; spinlock_t lock; }; struct swap_cluster_info { spinlock_t lock; u16 count; u8 flags; u8 order; struct list_head list; }; struct swap_extent { struct rb_node rb_node; long unsigned int start_page; long unsigned int nr_pages; sector_t start_block; }; union swap_header { struct { char reserved[4086]; char magic[10]; } magic; struct { char bootbits[1024]; __u32 version; __u32 last_page; __u32 nr_badpages; unsigned char sws_uuid[16]; unsigned char sws_volume[16]; __u32 padding[117]; __u32 badpages[1]; } info; }; struct swap_info_struct { struct percpu_ref users; long unsigned int flags; short int prio; struct plist_node list; signed char type; unsigned int max; unsigned char *swap_map; long unsigned int *zeromap; struct swap_cluster_info *cluster_info; struct list_head free_clusters; struct list_head full_clusters; struct list_head nonfull_clusters[10]; struct list_head frag_clusters[10]; unsigned int frag_cluster_nr[10]; unsigned int lowest_bit; unsigned int highest_bit; unsigned int pages; unsigned int inuse_pages; unsigned int cluster_next; unsigned int cluster_nr; unsigned int *cluster_next_cpu; struct percpu_cluster *percpu_cluster; struct rb_root swap_extent_root; struct block_device *bdev; struct file *swap_file; struct completion comp; spinlock_t lock; spinlock_t cont_lock; struct work_struct discard_work; struct list_head discard_clusters; struct plist_node avail_lists[0]; }; struct swap_iocb { struct kiocb iocb; struct bio_vec bvec[32]; int pages; int len; }; struct swap_slots_cache { bool lock_initialized; struct mutex alloc_lock; swp_entry_t *slots; int nr; int cur; spinlock_t free_lock; swp_entry_t *slots_ret; int n_ret; }; struct swevent_hlist { struct hlist_head heads[256]; struct callback_head callback_head; }; struct swevent_htable { struct swevent_hlist *swevent_hlist; struct mutex hlist_mutex; int hlist_refcount; }; struct switchdev_notifier_info { struct net_device *dev; struct netlink_ext_ack *extack; const void *ctx; }; union vxlan_addr { struct sockaddr_in sin; struct sockaddr_in6 sin6; struct sockaddr sa; }; struct switchdev_notifier_vxlan_fdb_info { struct switchdev_notifier_info info; union vxlan_addr remote_ip; __be16 remote_port; __be32 remote_vni; u32 remote_ifindex; u8 eth_addr[6]; __be32 vni; bool offloaded; bool added_by_user; }; struct swnode { struct kobject kobj; struct fwnode_handle fwnode; const struct software_node *node; int id; struct ida child_ids; struct list_head entry; struct list_head children; struct swnode *parent; unsigned int allocated: 1; unsigned int managed: 1; }; struct sym_count_ctx { unsigned int count; const char *name; }; struct symsearch { const struct kernel_symbol *start; const struct kernel_symbol *stop; const s32 *crcs; enum mod_license license; }; struct synaptics_device_info { u32 model_id; u32 firmware_id; u32 board_id; u32 capabilities; u32 ext_cap; u32 ext_cap_0c; u32 ext_cap_10; u32 identity; u32 x_res; u32 y_res; u32 x_max; u32 y_max; u32 x_min; u32 y_min; }; struct synaptics_hw_state { int x; int y; int z; int w; unsigned int left: 1; unsigned int right: 1; unsigned int middle: 1; unsigned int up: 1; unsigned int down: 1; u8 ext_buttons; s8 scroll; }; struct synaptics_data { struct synaptics_device_info info; enum synaptics_pkt_type pkt_type; u8 mode; int scroll; bool absolute_mode; bool disable_gesture; struct serio *pt_port; struct synaptics_hw_state agm; unsigned int agm_count; long unsigned int press_start; bool press; bool report_press; bool is_forcepad; }; struct sync_fence_info { char obj_name[32]; char driver_name[32]; __s32 status; __u32 flags; __u64 timestamp_ns; }; struct sync_file { struct file *file; char user_name[32]; struct list_head sync_file_list; wait_queue_head_t wq; long unsigned int flags; struct dma_fence *fence; struct dma_fence_cb cb; }; struct sync_file_info { char name[32]; __s32 status; __u32 flags; __u32 num_fences; __u32 pad; __u64 sync_fence_info; }; struct sync_merge_data { char name[32]; __s32 fd2; __s32 fence; __u32 flags; __u32 pad; }; struct sync_set_deadline { __u64 deadline_ns; __u64 pad; }; struct synproxy_stats; struct synproxy_net { struct nf_conn *tmpl; struct synproxy_stats *stats; unsigned int hook_ref4; unsigned int hook_ref6; }; struct synproxy_options { u8 options; u8 wscale; u16 mss_option; u16 mss_encode; u32 tsval; u32 tsecr; }; struct synproxy_stats { unsigned int syn_received; unsigned int cookie_invalid; unsigned int cookie_valid; unsigned int cookie_retrans; unsigned int conn_reopened; }; struct sys_off_data { int mode; void *cb_data; const char *cmd; struct device *dev; }; struct sys_off_handler { struct notifier_block nb; int (*sys_off_cb)(struct sys_off_data *); void *cb_data; enum sys_off_mode mode; bool blocking; void *list; struct device *dev; }; struct syscall_info { __u64 sp; struct seccomp_data data; }; struct syscall_metadata { const char *name; int syscall_nr; int nb_args; const char **types; const char **args; struct list_head enter_fields; struct trace_event_call *enter_event; struct trace_event_call *exit_event; }; struct syscall_tp_t { struct trace_entry ent; int syscall_nr; long unsigned int ret; }; struct syscall_tp_t___2 { struct trace_entry ent; int syscall_nr; long unsigned int args[6]; }; struct syscall_trace_enter { struct trace_entry ent; int nr; long unsigned int args[0]; }; struct syscall_trace_exit { struct trace_entry ent; int nr; long int ret; }; struct syscall_user_dispatch { char *selector; long unsigned int offset; long unsigned int len; bool on_dispatch; }; struct syscore_ops { struct list_head node; int (*suspend)(void); void (*resume)(void); void (*shutdown)(void); }; struct sysctl_alias { const char *kernel_param; const char *sysctl_param; }; struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); }; struct sysinfo { __kernel_long_t uptime; __kernel_ulong_t loads[3]; __kernel_ulong_t totalram; __kernel_ulong_t freeram; __kernel_ulong_t sharedram; __kernel_ulong_t bufferram; __kernel_ulong_t totalswap; __kernel_ulong_t freeswap; __u16 procs; __u16 pad; __kernel_ulong_t totalhigh; __kernel_ulong_t freehigh; __u32 mem_unit; char _f[0]; }; struct sysrq_key_op { void (* const handler)(u8); const char * const help_msg; const char * const action_msg; const int enable_mask; }; struct sysrq_state { struct input_handle handle; struct work_struct reinject_work; long unsigned int key_down[12]; unsigned int alt; unsigned int alt_use; unsigned int shift; unsigned int shift_use; bool active; bool need_reinject; bool reinjecting; bool reset_canceled; bool reset_requested; long unsigned int reset_keybit[12]; int reset_seq_len; int reset_seq_cnt; int reset_seq_version; struct timer_list keyreset_timer; }; struct system_counterval_t { u64 cycles; enum clocksource_ids cs_id; bool use_nsecs; }; struct system_device_crosststamp { ktime_t device; ktime_t sys_realtime; ktime_t sys_monoraw; }; struct system_time_snapshot { u64 cycles; ktime_t real; ktime_t raw; enum clocksource_ids cs_id; unsigned int clock_was_set_seq; u8 cs_was_changed_seq; }; struct sysv_sem { struct sem_undo_list *undo_list; }; struct sysv_shm { struct list_head shm_clist; }; struct taint_flag { char c_true; char c_false; bool module; const char *desc; }; struct tap_filter { unsigned int count; u32 mask[2]; unsigned char addr[48]; }; struct task_delay_info { raw_spinlock_t lock; u64 blkio_start; u64 blkio_delay; u64 swapin_start; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u64 thrashing_start; u64 thrashing_delay; u64 compact_start; u64 compact_delay; u64 wpcopy_start; u64 wpcopy_delay; u64 irq_delay; u32 freepages_count; u32 thrashing_count; u32 compact_count; u32 wpcopy_count; u32 irq_count; }; struct task_group { struct cgroup_subsys_state css; int idle; struct sched_entity **se; struct cfs_rq **cfs_rq; long unsigned int shares; atomic_long_t load_avg; struct callback_head rcu; struct list_head list; struct task_group *parent; struct list_head siblings; struct list_head children; struct cfs_bandwidth cfs_bandwidth; long: 64; }; struct task_numa_env { struct task_struct *p; int src_cpu; int src_nid; int dst_cpu; int dst_nid; int imb_numa_nr; struct numa_stats src_stats; struct numa_stats dst_stats; int imbalance_pct; int dist; struct task_struct *best_task; long int best_imp; int best_cpu; }; struct task_security_struct { u32 osid; u32 sid; u32 exec_sid; u32 create_sid; u32 keycreate_sid; u32 sockcreate_sid; }; typedef struct task_struct *class_find_get_task_t; typedef struct task_struct *class_task_lock_t; struct thread_info { long unsigned int flags; long unsigned int syscall_work; u32 status; u32 cpu; }; struct wake_q_node { struct wake_q_node *next; }; struct tlbflush_unmap_batch { struct arch_tlbflush_unmap_batch arch; bool flush_required; bool writable; }; struct thread_struct { struct desc_struct tls_array[3]; long unsigned int sp; short unsigned int es; short unsigned int ds; short unsigned int fsindex; short unsigned int gsindex; long unsigned int fsbase; long unsigned int gsbase; struct perf_event *ptrace_bps[4]; long unsigned int virtual_dr6; long unsigned int ptrace_dr7; long unsigned int cr2; long unsigned int trap_nr; long unsigned int error_code; struct io_bitmap *io_bitmap; long unsigned int iopl_emul; unsigned int iopl_warn: 1; u32 pkru; long: 64; long: 64; long: 64; long: 64; long: 64; struct fpu fpu; }; struct uprobe_task; struct task_struct { struct thread_info thread_info; unsigned int __state; unsigned int saved_state; void *stack; refcount_t usage; unsigned int flags; unsigned int ptrace; int on_cpu; struct __call_single_node wake_entry; unsigned int wakee_flips; long unsigned int wakee_flip_decay_ts; struct task_struct *last_wakee; int recent_used_cpu; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; struct sched_entity se; struct sched_rt_entity rt; struct sched_dl_entity dl; struct sched_dl_entity *dl_server; const struct sched_class *sched_class; struct task_group *sched_task_group; struct sched_statistics stats; unsigned int btrace_seq; unsigned int policy; long unsigned int max_allowed_capacity; int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t *user_cpus_ptr; cpumask_t cpus_mask; void *migration_pending; short unsigned int migration_disabled; short unsigned int migration_flags; int rcu_read_lock_nesting; union rcu_special rcu_read_unlock_special; struct list_head rcu_node_entry; struct rcu_node *rcu_blocked_node; long unsigned int rcu_tasks_nvcsw; u8 rcu_tasks_holdout; u8 rcu_tasks_idx; int rcu_tasks_idle_cpu; struct list_head rcu_tasks_holdout_list; int rcu_tasks_exit_cpu; struct list_head rcu_tasks_exit_list; int trc_reader_nesting; int trc_ipi_to_cpu; union rcu_special trc_reader_special; struct list_head trc_holdout_list; struct list_head trc_blkd_node; int trc_blkd_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; struct address_space *faults_disabled_mapping; int exit_state; int exit_code; int exit_signal; int pdeath_signal; long unsigned int jobctl; unsigned int personality; unsigned int sched_reset_on_fork: 1; unsigned int sched_contributes_to_load: 1; unsigned int sched_migrated: 1; long: 29; unsigned int sched_remote_wakeup: 1; unsigned int sched_rt_mutex: 1; unsigned int in_execve: 1; unsigned int in_iowait: 1; unsigned int restore_sigmask: 1; unsigned int brk_randomized: 1; unsigned int no_cgroup_migration: 1; unsigned int frozen: 1; unsigned int use_memdelay: 1; unsigned int in_eventfd: 1; unsigned int reported_split_lock: 1; unsigned int in_thrashing: 1; long unsigned int atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; long unsigned int stack_canary; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid *thread_pid; struct hlist_node pid_links[4]; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; void *worker_private; u64 utime; u64 stime; u64 gtime; struct prev_cputime prev_cputime; long unsigned int nvcsw; long unsigned int nivcsw; u64 start_time; u64 start_boottime; long unsigned int min_flt; long unsigned int maj_flt; struct posix_cputimers posix_cputimers; struct posix_cputimers_work posix_cputimers_work; const struct cred *ptracer_cred; const struct cred *real_cred; const struct cred *cred; struct key *cached_requested_key; char comm[16]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; long unsigned int last_switch_count; long unsigned int last_switch_time; struct fs_struct *fs; struct files_struct *files; struct io_uring_task *io_uring; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; long unsigned int sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; struct syscall_user_dispatch syscall_dispatch; u64 parent_exec_id; u64 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root_cached pi_waiters; struct task_struct *pi_top_task; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; int non_block_count; struct irqtrace_events irqtrace; unsigned int hardirq_threaded; u64 hardirq_chain_key; int softirqs_enabled; int softirq_context; int irq_config; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48]; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct io_context *io_context; struct capture_control *capture_control; long unsigned int ptrace_message; kernel_siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; u64 acct_timexpd; nodemask_t mems_allowed; seqcount_spinlock_t mems_allowed_seq; int cpuset_mem_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct mutex futex_exit_mutex; unsigned int futex_state; u8 perf_recursion[4]; struct perf_event_context *perf_event_ctxp; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short int il_prev; u8 il_weight; short int pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; long unsigned int numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct numa_group *numa_group; long unsigned int *numa_faults; long unsigned int total_numa_faults; long unsigned int numa_faults_locality[3]; long unsigned int numa_pages_migrated; struct rseq *rseq; u32 rseq_len; u32 rseq_sig; long unsigned int rseq_event_mask; int mm_cid; int last_mm_cid; int migrate_from_cpu; int mm_cid_active; struct callback_head cid_work; struct tlbflush_unmap_batch tlb_ubc; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; unsigned int fail_nth; int nr_dirtied; int nr_dirtied_pause; long unsigned int dirty_paused_when; u64 timer_slack_ns; u64 default_timer_slack_ns; int curr_ret_stack; int curr_ret_depth; long unsigned int *ret_stack; long long unsigned int ftrace_timestamp; atomic_t trace_overrun; atomic_t tracing_graph_pause; long unsigned int trace_recursion; unsigned int memcg_nr_pages_over_high; struct mem_cgroup *active_memcg; struct obj_cgroup *objcg; struct gendisk *throttle_disk; struct uprobe_task *utask; struct kmap_ctrl kmap_ctrl; long unsigned int task_state_change; struct callback_head rcu; refcount_t rcu_users; int pagefault_disabled; struct task_struct *oom_reaper_list; struct timer_list oom_reaper_timer; struct vm_struct *stack_vm_area; refcount_t stack_refcount; void *security; struct bpf_local_storage *bpf_storage; struct bpf_run_ctx *bpf_ctx; struct bpf_net_context *bpf_net_context; void *mce_vaddr; __u64 mce_kflags; u64 mce_addr; __u64 mce_ripv: 1; __u64 mce_whole_page: 1; __u64 __mce_reserved: 62; struct callback_head mce_kill_me; int mce_count; struct llist_head kretprobe_instances; struct llist_head rethooks; struct callback_head l1d_flush_kill; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct thread_struct thread; }; struct task_struct__safe_rcu { const cpumask_t *cpus_ptr; struct css_set *cgroups; struct task_struct *real_parent; struct task_struct *group_leader; }; struct tasklet_struct; struct tasklet_head { struct tasklet_struct *head; struct tasklet_struct **tail; }; struct tasklet_struct { struct tasklet_struct *next; long unsigned int state; atomic_t count; bool use_callback; union { void (*func)(long unsigned int); void (*callback)(struct tasklet_struct *); }; long unsigned int data; }; struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32]; __u8 ac_sched; __u8 ac_pad[3]; long: 0; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; __u64 thrashing_count; __u64 thrashing_delay_total; __u64 ac_btime64; __u64 compact_count; __u64 compact_delay_total; __u32 ac_tgid; __u64 ac_tgetime; __u64 ac_exe_dev; __u64 ac_exe_inode; __u64 wpcopy_count; __u64 wpcopy_delay_total; __u64 irq_count; __u64 irq_delay_total; }; struct tc_act_bpf { __u32 index; __u32 capab; int action; int refcnt; int bindcnt; }; struct tc_act_pernet_id { struct list_head list; unsigned int id; }; struct tcf_t { __u64 install; __u64 lastuse; __u64 expires; __u64 firstuse; }; struct tc_action_ops; struct tcf_idrinfo; struct tc_cookie; struct tc_action { const struct tc_action_ops *ops; __u32 type; struct tcf_idrinfo *idrinfo; u32 tcfa_index; refcount_t tcfa_refcnt; atomic_t tcfa_bindcnt; int tcfa_action; struct tcf_t tcfa_tm; long: 64; struct gnet_stats_basic_sync tcfa_bstats; struct gnet_stats_basic_sync tcfa_bstats_hw; struct gnet_stats_queue tcfa_qstats; struct net_rate_estimator *tcfa_rate_est; spinlock_t tcfa_lock; struct gnet_stats_basic_sync *cpu_bstats; struct gnet_stats_basic_sync *cpu_bstats_hw; struct gnet_stats_queue *cpu_qstats; struct tc_cookie *user_cookie; struct tcf_chain *goto_chain; u32 tcfa_flags; u8 hw_stats; u8 used_hw_stats; bool used_hw_stats_valid; u32 in_hw_count; long: 64; }; struct tc_action_net { struct tcf_idrinfo *idrinfo; const struct tc_action_ops *ops; }; typedef void (*tc_action_priv_destructor)(void *); struct tc_action_ops { struct list_head head; char kind[16]; enum tca_id id; unsigned int net_id; size_t size; struct module *owner; int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); int (*dump)(struct sk_buff *, struct tc_action *, int, int); void (*cleanup)(struct tc_action *); int (*lookup)(struct net *, struct tc_action **, u32); int (*init)(struct net *, struct nlattr *, struct nlattr *, struct tc_action **, struct tcf_proto *, u32, struct netlink_ext_ack *); int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool); size_t (*get_fill_size)(const struct tc_action *); struct net_device * (*get_dev)(const struct tc_action *, tc_action_priv_destructor *); struct psample_group * (*get_psample_group)(const struct tc_action *, tc_action_priv_destructor *); int (*offload_act_setup)(struct tc_action *, void *, u32 *, bool, struct netlink_ext_ack *); }; struct tc_bind_class_args { struct qdisc_walker w; long unsigned int new_cl; u32 portid; u32 clid; }; struct tc_cls_bpf_offload { struct flow_cls_common_offload common; enum tc_clsbpf_command command; struct tcf_exts *exts; struct bpf_prog *prog; struct bpf_prog *oldprog; const char *name; bool exts_integrated; }; struct tc_cls_matchall_offload { struct flow_cls_common_offload common; enum tc_matchall_command command; struct flow_rule *rule; struct flow_stats stats; bool use_act_stats; long unsigned int cookie; }; struct tc_cookie { u8 *data; u32 len; struct callback_head rcu; }; struct tc_fifo_qopt { __u32 limit; }; struct tc_qopt_offload_stats { struct gnet_stats_basic_sync *bstats; struct gnet_stats_queue *qstats; }; struct tc_fifo_qopt_offload { enum tc_fifo_command command; u32 handle; u32 parent; union { struct tc_qopt_offload_stats stats; }; }; struct tc_fq_codel_cl_stats { __s32 deficit; __u32 ldelay; __u32 count; __u32 lastcount; __u32 dropping; __s32 drop_next; }; struct tc_fq_codel_qd_stats { __u32 maxpacket; __u32 drop_overlimit; __u32 ecn_mark; __u32 new_flow_count; __u32 new_flows_len; __u32 old_flows_len; __u32 ce_mark; __u32 memory_usage; __u32 drop_overmemory; }; struct tc_fq_codel_xstats { __u32 type; union { struct tc_fq_codel_qd_stats qdisc_stats; struct tc_fq_codel_cl_stats class_stats; }; }; struct tc_fq_qd_stats { __u64 gc_flows; __u64 highprio_packets; __u64 tcp_retrans; __u64 throttled; __u64 flows_plimit; __u64 pkts_too_long; __u64 allocation_errors; __s64 time_next_delayed_flow; __u32 flows; __u32 inactive_flows; __u32 throttled_flows; __u32 unthrottle_latency_ns; __u64 ce_mark; __u64 horizon_drops; __u64 horizon_caps; __u64 fastpath_packets; __u64 band_drops[3]; __u32 band_pkt_count[3]; __u32 pad; }; struct tc_matchall_pcnt { __u64 rhit; }; struct tc_mq_opt_offload_graft_params { long unsigned int queue; u32 child_handle; }; struct tc_mq_qopt_offload { enum tc_mq_command command; u32 handle; union { struct tc_qopt_offload_stats stats; struct tc_mq_opt_offload_graft_params graft_params; }; }; struct tc_mqprio_qopt { __u8 num_tc; __u8 prio_tc_map[16]; __u8 hw; __u16 count[16]; __u16 offset[16]; }; struct tc_mqprio_qopt_offload { struct tc_mqprio_qopt qopt; struct netlink_ext_ack *extack; u16 mode; u16 shaper; u32 flags; u64 min_rate[16]; u64 max_rate[16]; long unsigned int preemptible_tcs; }; struct tc_pedit_key { __u32 mask; __u32 val; __u32 off; __u32 at; __u32 offmask; __u32 shift; }; struct tc_prio_qopt { int bands; __u8 priomap[16]; }; struct tc_query_caps_base { enum tc_setup_type type; void *caps; }; struct tc_root_qopt_offload { enum tc_root_command command; u32 handle; bool ingress; }; struct tc_skb_cb { struct qdisc_skb_cb qdisc_cb; u32 drop_reason; u16 zone; u16 mru; u8 post_ct: 1; u8 post_ct_snat: 1; u8 post_ct_dnat: 1; }; struct tc_skb_ext { union { u64 act_miss_cookie; __u32 chain; }; __u16 mru; __u16 zone; u8 post_ct: 1; u8 post_ct_snat: 1; u8 post_ct_dnat: 1; u8 act_miss: 1; u8 l2_miss: 1; }; struct tc_skbmod { __u32 index; __u32 capab; int action; int refcnt; int bindcnt; __u64 flags; }; struct tc_taprio_qopt_stats { u64 window_drops; u64 tx_overruns; }; struct tc_taprio_qopt_queue_stats { int queue; struct tc_taprio_qopt_stats stats; }; struct tc_taprio_sched_entry { u8 command; u32 gate_mask; u32 interval; }; struct tc_taprio_qopt_offload { enum tc_taprio_qopt_cmd cmd; union { struct tc_taprio_qopt_stats stats; struct tc_taprio_qopt_queue_stats queue_stats; struct { struct tc_mqprio_qopt_offload mqprio; struct netlink_ext_ack *extack; ktime_t base_time; u64 cycle_time; u64 cycle_time_extension; u32 max_sdu[16]; size_t num_entries; struct tc_taprio_sched_entry entries[0]; }; }; }; struct tcamsg { unsigned char tca_family; unsigned char tca__pad1; short unsigned int tca__pad2; }; struct tcf_walker { int stop; int skip; int count; bool nonempty; long unsigned int cookie; int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); }; struct tcf_bind_args { struct tcf_walker w; long unsigned int base; long unsigned int cl; u32 classid; }; struct tcf_block { struct xarray ports; struct mutex lock; struct list_head chain_list; u32 index; u32 classid; refcount_t refcnt; struct net *net; struct Qdisc *q; struct rw_semaphore cb_lock; struct flow_block flow_block; struct list_head owner_list; bool keep_dst; bool bypass_wanted; atomic_t filtercnt; atomic_t skipswcnt; atomic_t offloadcnt; unsigned int nooffloaddevcnt; unsigned int lockeddevcnt; struct { struct tcf_chain *chain; struct list_head filter_chain_list; } chain0; struct callback_head rcu; struct hlist_head proto_destroy_ht[128]; struct mutex proto_destroy_lock; }; struct tcf_block_owner_item { struct list_head list; struct Qdisc *q; enum flow_block_binder_type binder_type; }; struct tcf_bpf { struct tc_action common; struct bpf_prog *filter; union { u32 bpf_fd; u16 bpf_num_ops; }; struct sock_filter *bpf_ops; const char *bpf_name; }; struct tcf_bpf_cfg { struct bpf_prog *filter; struct sock_filter *bpf_ops; const char *bpf_name; u16 bpf_num_ops; bool is_ebpf; }; struct tcf_proto_ops; struct tcf_chain { struct mutex filter_chain_lock; struct tcf_proto *filter_chain; struct list_head list; struct tcf_block *block; u32 index; unsigned int refcnt; unsigned int action_refcnt; bool explicitly_created; bool flushing; const struct tcf_proto_ops *tmplt_ops; void *tmplt_priv; struct callback_head rcu; }; struct tcf_chain_info { struct tcf_proto **pprev; struct tcf_proto *next; }; struct tcf_dump_args { struct tcf_walker w; struct sk_buff *skb; struct netlink_callback *cb; struct tcf_block *block; struct Qdisc *q; u32 parent; bool terse_dump; }; struct tcf_ematch_ops; struct tcf_ematch { struct tcf_ematch_ops *ops; long unsigned int data; unsigned int datalen; u16 matchid; u16 flags; struct net *net; }; struct tcf_ematch_hdr { __u16 matchid; __u16 kind; __u16 flags; __u16 pad; }; struct tcf_pkt_info; struct tcf_ematch_ops { int kind; int datalen; int (*change)(struct net *, void *, int, struct tcf_ematch *); int (*match)(struct sk_buff *, struct tcf_ematch *, struct tcf_pkt_info *); void (*destroy)(struct tcf_ematch *); int (*dump)(struct sk_buff *, struct tcf_ematch *); struct module *owner; struct list_head link; }; union tcf_exts_miss_cookie { struct { u32 miss_cookie_base; u32 act_index; }; u64 miss_cookie; }; struct tcf_exts_miss_cookie_node { const struct tcf_chain *chain; const struct tcf_proto *tp; const struct tcf_exts *exts; u32 chain_index; u32 tp_prio; u32 handle; u32 miss_cookie_base; struct callback_head rcu; }; struct tcf_filter_chain_list_item { struct list_head list; tcf_chain_head_change_t *chain_head_change; void *chain_head_change_priv; }; struct tcf_idrinfo { struct mutex lock; struct idr action_idr; struct net *net; }; struct tcf_net { spinlock_t idr_lock; struct idr idr; }; struct tcf_pedit_parms; struct tcf_pedit { struct tc_action common; struct tcf_pedit_parms *parms; long: 64; }; struct tcf_pedit_key_ex { enum pedit_header_type htype; enum pedit_cmd cmd; }; struct tcf_pedit_parms { struct tc_pedit_key *tcfp_keys; struct tcf_pedit_key_ex *tcfp_keys_ex; u32 tcfp_off_max_hint; unsigned char tcfp_nkeys; unsigned char tcfp_flags; struct callback_head rcu; }; struct tcf_pkt_info { unsigned char *ptr; int nexthdr; }; struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; void *data; const struct tcf_proto_ops *ops; struct tcf_chain *chain; spinlock_t lock; bool deleting; bool counted; refcount_t refcnt; struct callback_head rcu; struct hlist_node destroy_ht_node; }; struct tcf_proto_ops { struct list_head head; char kind[16]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *); void * (*get)(struct tcf_proto *, u32); void (*put)(struct tcf_proto *, void *); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, u32, struct netlink_ext_ack *); int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *); bool (*delete_empty)(struct tcf_proto *); void (*walk)(struct tcf_proto *, struct tcf_walker *, bool); int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, struct netlink_ext_ack *); void (*hw_add)(struct tcf_proto *, void *); void (*hw_del)(struct tcf_proto *, void *); void (*bind_class)(void *, u32, long unsigned int, void *, long unsigned int); void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *); void (*tmplt_destroy)(void *); void (*tmplt_reoffload)(struct tcf_chain *, bool, flow_setup_cb_t *, void *); struct tcf_exts * (*get_exts)(const struct tcf_proto *, u32); int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); int (*tmplt_dump)(struct sk_buff *, struct net *, void *); struct module *owner; int flags; }; struct tcf_qevent { struct tcf_block *block; struct tcf_block_ext_info info; struct tcf_proto *filter_chain; }; struct tcf_skbmod_params; struct tcf_skbmod { struct tc_action common; struct tcf_skbmod_params *skbmod_p; long: 64; }; struct tcf_skbmod_params { struct callback_head rcu; u64 flags; u8 eth_dst[6]; u16 eth_type; u8 eth_src[6]; }; struct tcg_efi_specid_event_algs { u16 alg_id; u16 digest_size; }; struct tcg_efi_specid_event_head { u8 signature[16]; u32 platform_class; u8 spec_version_minor; u8 spec_version_major; u8 spec_errata; u8 uintnsize; u32 num_algs; struct tcg_efi_specid_event_algs digest_sizes[0]; }; struct tcg_event_field { u32 event_size; u8 event[0]; }; struct tcg_pcr_event { u32 pcr_idx; u32 event_type; u8 digest[20]; u32 event_size; u8 event[0]; }; struct tpm_digest { u16 alg_id; u8 digest[64]; }; struct tcg_pcr_event2_head { u32 pcr_idx; u32 event_type; u32 count; struct tpm_digest digests[0]; }; struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; short unsigned int tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; }; struct tcp4_pseudohdr { __be32 saddr; __be32 daddr; __u8 pad; __u8 protocol; __be16 len; }; struct tcp6_pseudohdr { struct in6_addr saddr; struct in6_addr daddr; __be32 len; __be32 protocol; }; struct tcp_options_received { int ts_recent_stamp; u32 ts_recent; u32 rcv_tsval; u32 rcv_tsecr; u16 saw_tstamp: 1; u16 tstamp_ok: 1; u16 dsack: 1; u16 wscale_ok: 1; u16 sack_ok: 3; u16 smc_ok: 1; u16 snd_wscale: 4; u16 rcv_wscale: 4; u8 saw_unknown: 1; u8 unused: 7; u8 num_sacks; u16 user_mss; u16 mss_clamp; }; struct tcp_rack { u64 mstamp; u32 rtt_us; u32 end_seq; u32 last_delivered; u8 reo_wnd_steps; u8 reo_wnd_persist: 5; u8 dsack_seen: 1; u8 advanced: 1; }; struct tcp_sack_block { u32 start_seq; u32 end_seq; }; struct tcp_sock_af_ops; struct tcp_md5sig_info; struct tcp_fastopen_request; struct tcp_sock { struct inet_connection_sock inet_conn; __u8 __cacheline_group_begin__tcp_sock_read_tx[0]; u32 max_window; u32 rcv_ssthresh; u32 reordering; u32 notsent_lowat; u16 gso_segs; struct sk_buff *lost_skb_hint; struct sk_buff *retransmit_skb_hint; __u8 __cacheline_group_end__tcp_sock_read_tx[0]; __u8 __cacheline_group_begin__tcp_sock_read_txrx[0]; u32 tsoffset; u32 snd_wnd; u32 mss_cache; u32 snd_cwnd; u32 prr_out; u32 lost_out; u32 sacked_out; u16 tcp_header_len; u8 scaling_ratio; u8 chrono_type: 2; u8 repair: 1; u8 tcp_usec_ts: 1; u8 is_sack_reneg: 1; u8 is_cwnd_limited: 1; __u8 __cacheline_group_end__tcp_sock_read_txrx[0]; __u8 __cacheline_group_begin__tcp_sock_read_rx[0]; u32 copied_seq; u32 rcv_tstamp; u32 snd_wl1; u32 tlp_high_seq; u32 rttvar_us; u32 retrans_out; u16 advmss; u16 urg_data; u32 lost; struct minmax rtt_min; struct rb_root out_of_order_queue; u32 snd_ssthresh; u8 recvmsg_inq: 1; __u8 __cacheline_group_end__tcp_sock_read_rx[0]; long: 0; __u8 __cacheline_group_begin__tcp_sock_write_tx[0]; u32 segs_out; u32 data_segs_out; u64 bytes_sent; u32 snd_sml; u32 chrono_start; u32 chrono_stat[3]; u32 write_seq; u32 pushed_seq; u32 lsndtime; u32 mdev_us; u32 rtt_seq; u64 tcp_wstamp_ns; struct list_head tsorted_sent_queue; struct sk_buff *highest_sack; u8 ecn_flags; __u8 __cacheline_group_end__tcp_sock_write_tx[0]; __u8 __cacheline_group_begin__tcp_sock_write_txrx[0]; __be32 pred_flags; u64 tcp_clock_cache; u64 tcp_mstamp; u32 rcv_nxt; u32 snd_nxt; u32 snd_una; u32 window_clamp; u32 srtt_us; u32 packets_out; u32 snd_up; u32 delivered; u32 delivered_ce; u32 app_limited; u32 rcv_wnd; struct tcp_options_received rx_opt; u8 nonagle: 4; u8 rate_app_limited: 1; __u8 __cacheline_group_end__tcp_sock_write_txrx[0]; long: 0; __u8 __cacheline_group_begin__tcp_sock_write_rx[0]; u64 bytes_received; u32 segs_in; u32 data_segs_in; u32 rcv_wup; u32 max_packets_out; u32 cwnd_usage_seq; u32 rate_delivered; u32 rate_interval_us; u32 rcv_rtt_last_tsecr; u64 first_tx_mstamp; u64 delivered_mstamp; u64 bytes_acked; struct { u32 rtt_us; u32 seq; u64 time; } rcv_rtt_est; struct { u32 space; u32 seq; u64 time; } rcvq_space; __u8 __cacheline_group_end__tcp_sock_write_rx[0]; u32 dsack_dups; u32 compressed_ack_rcv_nxt; struct list_head tsq_node; struct tcp_rack rack; u8 compressed_ack; u8 dup_ack_counter: 2; u8 tlp_retrans: 1; u8 unused: 5; u8 thin_lto: 1; u8 fastopen_connect: 1; u8 fastopen_no_cookie: 1; u8 fastopen_client_fail: 2; u8 frto: 1; u8 repair_queue; u8 save_syn: 2; u8 syn_data: 1; u8 syn_fastopen: 1; u8 syn_fastopen_exp: 1; u8 syn_fastopen_ch: 1; u8 syn_data_acked: 1; u8 keepalive_probes; u32 tcp_tx_delay; u32 mdev_max_us; u32 reord_seen; u32 snd_cwnd_cnt; u32 snd_cwnd_clamp; u32 snd_cwnd_used; u32 snd_cwnd_stamp; u32 prior_cwnd; u32 prr_delivered; u32 last_oow_ack_time; struct hrtimer pacing_timer; struct hrtimer compressed_ack_timer; struct sk_buff *ooo_last_skb; struct tcp_sack_block duplicate_sack[1]; struct tcp_sack_block selective_acks[4]; struct tcp_sack_block recv_sack_cache[4]; int lost_cnt_hint; u32 prior_ssthresh; u32 high_seq; u32 retrans_stamp; u32 undo_marker; int undo_retrans; u64 bytes_retrans; u32 total_retrans; u32 rto_stamp; u16 total_rto; u16 total_rto_recoveries; u32 total_rto_time; u32 urg_seq; unsigned int keepalive_time; unsigned int keepalive_intvl; int linger2; u8 bpf_sock_ops_cb_flags; u8 bpf_chg_cc_inprogress: 1; u16 timeout_rehash; u32 rcv_ooopack; struct { u32 probe_seq_start; u32 probe_seq_end; } mtu_probe; u32 plb_rehash; u32 mtu_info; bool is_mptcp; const struct tcp_sock_af_ops *af_specific; struct tcp_md5sig_info *md5sig_info; struct tcp_fastopen_request *fastopen_req; struct request_sock *fastopen_rsk; struct saved_syn *saved_syn; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct tcp6_sock { struct tcp_sock tcp; struct ipv6_pinfo inet6; long: 64; long: 64; long: 64; long: 64; }; union tcp_ao_addr { struct in_addr a4; struct in6_addr a6; }; struct tcp_ao_hdr { u8 kind; u8 length; u8 keyid; u8 rnext_keyid; }; struct tcp_ao_key { struct hlist_node node; union tcp_ao_addr addr; u8 key[80]; unsigned int tcp_sigpool_id; unsigned int digest_size; int l3index; u8 prefixlen; u8 family; u8 keylen; u8 keyflags; u8 sndid; u8 rcvid; u8 maclen; struct callback_head rcu; atomic64_t pkt_good; atomic64_t pkt_bad; u8 traffic_keys[0]; }; struct tcp_bbr_info { __u32 bbr_bw_lo; __u32 bbr_bw_hi; __u32 bbr_min_rtt; __u32 bbr_pacing_gain; __u32 bbr_cwnd_gain; }; struct tcpvegas_info { __u32 tcpv_enabled; __u32 tcpv_rttcnt; __u32 tcpv_rtt; __u32 tcpv_minrtt; }; struct tcp_dctcp_info { __u16 dctcp_enabled; __u16 dctcp_ce_state; __u32 dctcp_alpha; __u32 dctcp_ab_ecn; __u32 dctcp_ab_tot; }; union tcp_cc_info { struct tcpvegas_info vegas; struct tcp_dctcp_info dctcp; struct tcp_bbr_info bbr; }; struct tcp_diag_md5sig { __u8 tcpm_family; __u8 tcpm_prefixlen; __u16 tcpm_keylen; __be32 tcpm_addr[4]; __u8 tcpm_key[80]; }; struct tcp_fastopen_context { siphash_key_t key[2]; int num; struct callback_head rcu; }; struct tcp_fastopen_cookie { __le64 val[2]; s8 len; bool exp; }; struct tcp_fastopen_metrics { u16 mss; u16 syn_loss: 10; u16 try_exp: 2; long unsigned int last_syn_loss; struct tcp_fastopen_cookie cookie; }; struct tcp_fastopen_request { struct tcp_fastopen_cookie cookie; struct msghdr *data; size_t size; int copied; struct ubuf_info *uarg; }; struct tcp_info { __u8 tcpi_state; __u8 tcpi_ca_state; __u8 tcpi_retransmits; __u8 tcpi_probes; __u8 tcpi_backoff; __u8 tcpi_options; __u8 tcpi_snd_wscale: 4; __u8 tcpi_rcv_wscale: 4; __u8 tcpi_delivery_rate_app_limited: 1; __u8 tcpi_fastopen_client_fail: 2; __u32 tcpi_rto; __u32 tcpi_ato; __u32 tcpi_snd_mss; __u32 tcpi_rcv_mss; __u32 tcpi_unacked; __u32 tcpi_sacked; __u32 tcpi_lost; __u32 tcpi_retrans; __u32 tcpi_fackets; __u32 tcpi_last_data_sent; __u32 tcpi_last_ack_sent; __u32 tcpi_last_data_recv; __u32 tcpi_last_ack_recv; __u32 tcpi_pmtu; __u32 tcpi_rcv_ssthresh; __u32 tcpi_rtt; __u32 tcpi_rttvar; __u32 tcpi_snd_ssthresh; __u32 tcpi_snd_cwnd; __u32 tcpi_advmss; __u32 tcpi_reordering; __u32 tcpi_rcv_rtt; __u32 tcpi_rcv_space; __u32 tcpi_total_retrans; __u64 tcpi_pacing_rate; __u64 tcpi_max_pacing_rate; __u64 tcpi_bytes_acked; __u64 tcpi_bytes_received; __u32 tcpi_segs_out; __u32 tcpi_segs_in; __u32 tcpi_notsent_bytes; __u32 tcpi_min_rtt; __u32 tcpi_data_segs_in; __u32 tcpi_data_segs_out; __u64 tcpi_delivery_rate; __u64 tcpi_busy_time; __u64 tcpi_rwnd_limited; __u64 tcpi_sndbuf_limited; __u32 tcpi_delivered; __u32 tcpi_delivered_ce; __u64 tcpi_bytes_sent; __u64 tcpi_bytes_retrans; __u32 tcpi_dsack_dups; __u32 tcpi_reord_seen; __u32 tcpi_rcv_ooopack; __u32 tcpi_snd_wnd; __u32 tcpi_rcv_wnd; __u32 tcpi_rehash; __u16 tcpi_total_rto; __u16 tcpi_total_rto_recoveries; __u32 tcpi_total_rto_time; }; struct tcp_md5sig_key; struct tcp_key { union { struct { struct tcp_ao_key *ao_key; char *traffic_key; u32 sne; u8 rcv_next; }; struct tcp_md5sig_key *md5_key; }; enum { TCP_KEY_NONE = 0, TCP_KEY_MD5 = 1, TCP_KEY_AO = 2, } type; }; struct tcp_md5sig { struct __kernel_sockaddr_storage tcpm_addr; __u8 tcpm_flags; __u8 tcpm_prefixlen; __u16 tcpm_keylen; int tcpm_ifindex; __u8 tcpm_key[80]; }; struct tcp_md5sig_info { struct hlist_head head; struct callback_head rcu; }; struct tcp_md5sig_key { struct hlist_node node; u8 keylen; u8 family; u8 prefixlen; u8 flags; union tcp_ao_addr addr; int l3index; u8 key[80]; struct callback_head rcu; }; struct tcp_metrics_block { struct tcp_metrics_block *tcpm_next; struct net *tcpm_net; struct inetpeer_addr tcpm_saddr; struct inetpeer_addr tcpm_daddr; long unsigned int tcpm_stamp; u32 tcpm_lock; u32 tcpm_vals[5]; struct tcp_fastopen_metrics tcpm_fastopen; struct callback_head callback_head; }; struct tcp_mib { long unsigned int mibs[16]; }; struct tcp_out_options { u16 options; u16 mss; u8 ws; u8 num_sack_blocks; u8 hash_size; u8 bpf_opt_len; __u8 *hash_location; __u32 tsval; __u32 tsecr; struct tcp_fastopen_cookie *fastopen_cookie; struct mptcp_out_options mptcp; }; struct tcp_repair_opt { __u32 opt_code; __u32 opt_val; }; struct tcp_repair_window { __u32 snd_wl1; __u32 snd_wnd; __u32 max_window; __u32 rcv_wnd; __u32 rcv_wup; }; struct tcp_request_sock_ops { u16 mss_clamp; struct tcp_md5sig_key * (*req_md5_lookup)(const struct sock *, const struct sock *); int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *); __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *); struct dst_entry * (*route_req)(const struct sock *, struct sk_buff *, struct flowi *, struct request_sock *, u32); u32 (*init_seq)(const struct sk_buff *); u32 (*init_ts_off)(const struct net *, const struct sk_buff *); int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type, struct sk_buff *); }; struct tcp_sack_block_wire { __be32 start_seq; __be32 end_seq; }; struct tcp_sacktag_state { u64 first_sackt; u64 last_sackt; u32 reord; u32 sack_delivered; int flag; unsigned int mss_now; struct rate_sample *rate; }; struct tcp_seq_afinfo { sa_family_t family; }; struct tcp_sigpool { void *scratch; struct ahash_request *req; }; struct tcp_skb_cb { __u32 seq; __u32 end_seq; union { struct { u16 tcp_gso_segs; u16 tcp_gso_size; }; }; __u8 tcp_flags; __u8 sacked; __u8 ip_dsfield; __u8 txstamp_ack: 1; __u8 eor: 1; __u8 has_rxtstamp: 1; __u8 unused: 5; __u32 ack_seq; union { struct { __u32 is_app_limited: 1; __u32 delivered_ce: 20; __u32 unused: 11; __u32 delivered; u64 first_tx_mstamp; u64 delivered_mstamp; } tx; union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; }; }; struct tcp_sock_af_ops { struct tcp_md5sig_key * (*md5_lookup)(const struct sock *, const struct sock *); int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *); int (*md5_parse)(struct sock *, int, sockptr_t, int); }; struct tcp_splice_state { struct pipe_inode_info *pipe; size_t len; unsigned int flags; }; struct tcp_timewait_sock { struct inet_timewait_sock tw_sk; u32 tw_rcv_wnd; u32 tw_ts_offset; u32 tw_ts_recent; u32 tw_last_oow_ack_time; int tw_ts_recent_stamp; u32 tw_tx_delay; struct tcp_md5sig_key *tw_md5_key; }; struct tcp_ulp_ops { struct list_head list; int (*init)(struct sock *); void (*update)(struct sock *, struct proto *, void (*)(struct sock *)); void (*release)(struct sock *); int (*get_info)(struct sock *, struct sk_buff *); size_t (*get_info_size)(const struct sock *); void (*clone)(const struct request_sock *, struct sock *, const gfp_t); char name[16]; struct module *owner; }; struct tcphdr { __be16 source; __be16 dest; __be32 seq; __be32 ack_seq; __u16 res1: 4; __u16 doff: 4; __u16 fin: 1; __u16 syn: 1; __u16 rst: 1; __u16 psh: 1; __u16 ack: 1; __u16 urg: 1; __u16 ece: 1; __u16 cwr: 1; __be16 window; __sum16 check; __be16 urg_ptr; }; union tcp_word_hdr { struct tcphdr hdr; __be32 words[5]; }; struct tcp_xa_pool { u8 max; u8 idx; __u32 tokens[17]; netmem_ref netmems[17]; }; struct tcp_zerocopy_receive { __u64 address; __u32 length; __u32 recv_skip_hint; __u32 inq; __s32 err; __u64 copybuf_address; __s32 copybuf_len; __u32 flags; __u64 msg_control; __u64 msg_controllen; __u32 msg_flags; __u32 reserved; }; struct tcpa_event { u32 pcr_index; u32 event_type; u8 pcr_value[20]; u32 event_size; u8 event_data[0]; }; struct tcpa_pc_event { u32 event_id; u32 event_size; u8 event_data[0]; }; struct tcpm_hash_bucket { struct tcp_metrics_block *chain; }; struct tcx_entry { struct mini_Qdisc *miniq; struct bpf_mprog_bundle bundle; u32 miniq_active; struct callback_head rcu; }; struct tcx_link { struct bpf_link link; struct net_device *dev; u32 location; }; struct temp_masks { u32 tcc_offset; u32 digital_readout; u32 pkg_digital_readout; }; struct termio { short unsigned int c_iflag; short unsigned int c_oflag; short unsigned int c_cflag; short unsigned int c_lflag; unsigned char c_line; unsigned char c_cc[8]; }; struct termios { tcflag_t c_iflag; tcflag_t c_oflag; tcflag_t c_cflag; tcflag_t c_lflag; cc_t c_line; cc_t c_cc[19]; }; struct termios2 { tcflag_t c_iflag; tcflag_t c_oflag; tcflag_t c_cflag; tcflag_t c_lflag; cc_t c_line; cc_t c_cc[19]; speed_t c_ispeed; speed_t c_ospeed; }; union text_poke_insn { u8 text[5]; struct { u8 opcode; s32 disp; } __attribute__((packed)); }; struct text_poke_loc { s32 rel_addr; s32 disp; u8 len; u8 opcode; const u8 text[5]; u8 old; }; struct tgid_iter { unsigned int tgid; struct task_struct *task; }; struct thermal_attr { struct device_attribute attr; char name[20]; }; struct thermal_cooling_device_ops; struct thermal_cooling_device { int id; const char *type; long unsigned int max_state; struct device device; struct device_node *np; void *devdata; void *stats; const struct thermal_cooling_device_ops *ops; bool updated; struct mutex lock; struct list_head thermal_instances; struct list_head node; }; struct thermal_cooling_device_ops { int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *); int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *); int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int); int (*get_requested_power)(struct thermal_cooling_device *, u32 *); int (*state2power)(struct thermal_cooling_device *, long unsigned int, u32 *); int (*power2state)(struct thermal_cooling_device *, u32, long unsigned int *); }; struct thermal_trip; struct thermal_governor { const char *name; int (*bind_to_tz)(struct thermal_zone_device *); void (*unbind_from_tz)(struct thermal_zone_device *); void (*trip_crossed)(struct thermal_zone_device *, const struct thermal_trip *, bool); void (*manage)(struct thermal_zone_device *); void (*update_tz)(struct thermal_zone_device *, enum thermal_notify_event); struct list_head governor_list; }; struct thermal_hwmon_attr { struct device_attribute attr; char name[16]; }; struct thermal_hwmon_device { char type[20]; struct device *device; int count; struct list_head tz_list; struct list_head node; }; struct thermal_hwmon_temp { struct list_head hwmon_node; struct thermal_zone_device *tz; struct thermal_hwmon_attr temp_input; struct thermal_hwmon_attr temp_crit; }; struct thermal_instance { int id; char name[20]; struct thermal_cooling_device *cdev; const struct thermal_trip *trip; bool initialized; long unsigned int upper; long unsigned int lower; long unsigned int target; char attr_name[20]; struct device_attribute attr; char weight_attr_name[20]; struct device_attribute weight_attr; struct list_head tz_node; struct list_head cdev_node; unsigned int weight; bool upper_no_limit; }; struct thermal_state { struct _thermal_state core_throttle; struct _thermal_state core_power_limit; struct _thermal_state package_throttle; struct _thermal_state package_power_limit; struct _thermal_state core_thresh0; struct _thermal_state core_thresh1; struct _thermal_state pkg_thresh0; struct _thermal_state pkg_thresh1; }; struct thermal_trip { int temperature; int hysteresis; enum thermal_trip_type type; u8 flags; void *priv; }; struct thermal_trip_attrs { struct thermal_attr type; struct thermal_attr temp; struct thermal_attr hyst; }; struct thermal_trip_desc { struct thermal_trip trip; struct thermal_trip_attrs trip_attrs; struct list_head notify_list_node; int notify_temp; int threshold; }; struct thermal_zone_device_ops { bool (*should_bind)(struct thermal_zone_device *, const struct thermal_trip *, struct thermal_cooling_device *, struct cooling_spec *); int (*get_temp)(struct thermal_zone_device *, int *); int (*set_trips)(struct thermal_zone_device *, int, int); int (*change_mode)(struct thermal_zone_device *, enum thermal_device_mode); int (*set_trip_temp)(struct thermal_zone_device *, const struct thermal_trip *, int); int (*get_crit_temp)(struct thermal_zone_device *, int *); int (*set_emul_temp)(struct thermal_zone_device *, int); int (*get_trend)(struct thermal_zone_device *, const struct thermal_trip *, enum thermal_trend *); void (*hot)(struct thermal_zone_device *); void (*critical)(struct thermal_zone_device *); }; struct thermal_zone_params; struct thermal_zone_device { int id; char type[20]; struct device device; struct completion removal; struct completion resume; struct attribute_group trips_attribute_group; enum thermal_device_mode mode; void *devdata; int num_trips; long unsigned int passive_delay_jiffies; long unsigned int polling_delay_jiffies; long unsigned int recheck_delay_jiffies; int temperature; int last_temperature; int emul_temperature; int passive; int prev_low_trip; int prev_high_trip; atomic_t need_update; struct thermal_zone_device_ops ops; struct thermal_zone_params *tzp; struct thermal_governor *governor; void *governor_data; struct list_head thermal_instances; struct ida ida; struct mutex lock; struct list_head node; struct delayed_work poll_queue; enum thermal_notify_event notify_event; bool suspended; bool resuming; struct thermal_trip_desc trips[0]; }; struct thermal_zone_params { const char *governor_name; bool no_hwmon; u32 sustainable_power; s32 k_po; s32 k_pu; s32 k_i; s32 k_d; s32 integral_cutoff; int slope; int offset; }; struct thpsize { struct kobject kobj; struct list_head node; int order; }; struct threshold_block; struct thresh_restart { struct threshold_block *b; int reset; int set_lvt_off; int lvt_off; u16 old_limit; }; struct threshold_attr { struct attribute attr; ssize_t (*show)(struct threshold_block *, char *); ssize_t (*store)(struct threshold_block *, const char *, size_t); }; struct threshold_bank { struct kobject *kobj; struct threshold_block *blocks; refcount_t cpus; unsigned int shared; }; struct threshold_block { unsigned int block; unsigned int bank; unsigned int cpu; u32 address; u16 interrupt_enable; bool interrupt_capable; u16 threshold_limit; struct kobject kobj; struct list_head miscj; }; struct throtl_service_queue { struct throtl_service_queue *parent_sq; struct list_head queued[2]; unsigned int nr_queued[2]; struct rb_root_cached pending_tree; unsigned int nr_pending; long unsigned int first_pending_disptime; struct timer_list pending_timer; }; struct throtl_data { struct throtl_service_queue service_queue; struct request_queue *queue; unsigned int nr_queued[2]; unsigned int throtl_slice; struct work_struct dispatch_work; bool track_bio_latency; }; struct throtl_grp; struct throtl_qnode { struct list_head node; struct bio_list bios; struct throtl_grp *tg; }; struct throtl_grp { struct blkg_policy_data pd; struct rb_node rb_node; struct throtl_data *td; struct throtl_service_queue service_queue; struct throtl_qnode qnode_on_self[2]; struct throtl_qnode qnode_on_parent[2]; long unsigned int disptime; unsigned int flags; bool has_rules_bps[2]; bool has_rules_iops[2]; uint64_t bps[2]; unsigned int iops[2]; uint64_t bytes_disp[2]; unsigned int io_disp[2]; uint64_t last_bytes_disp[2]; unsigned int last_io_disp[2]; long long int carryover_bytes[2]; int carryover_ios[2]; long unsigned int last_check_time; long unsigned int slice_start[2]; long unsigned int slice_end[2]; struct blkg_rwstat stat_bytes; struct blkg_rwstat stat_ios; }; struct throttling_tstate { unsigned int cpu; int target_state; }; struct tick_device { struct clock_event_device *evtdev; enum tick_device_mode mode; }; struct tick_sched { long unsigned int flags; unsigned int stalled_jiffies; long unsigned int last_tick_jiffies; struct hrtimer sched_timer; ktime_t last_tick; ktime_t next_tick; long unsigned int idle_jiffies; ktime_t idle_waketime; unsigned int got_idle_tick; seqcount_t idle_sleeptime_seq; ktime_t idle_entrytime; long unsigned int last_jiffies; u64 timer_expires_base; u64 timer_expires; u64 next_timer; ktime_t idle_expires; long unsigned int idle_calls; long unsigned int idle_sleeps; ktime_t idle_exittime; ktime_t idle_sleeptime; ktime_t iowait_sleeptime; atomic_t tick_dep_mask; long unsigned int check_clocks; }; struct timens_offsets { struct timespec64 monotonic; struct timespec64 boottime; }; struct time_namespace { struct user_namespace *user_ns; struct ucounts *ucounts; struct ns_common ns; struct timens_offsets offsets; struct page *vvar_page; bool frozen_offsets; }; struct timedia_struct { int num; const short unsigned int *ids; }; struct tk_read_base { struct clocksource *clock; u64 mask; u64 cycle_last; u32 mult; u32 shift; u64 xtime_nsec; ktime_t base; u64 base_real; }; struct timekeeper { struct tk_read_base tkr_mono; struct tk_read_base tkr_raw; u64 xtime_sec; long unsigned int ktime_sec; struct timespec64 wall_to_monotonic; ktime_t offs_real; ktime_t offs_boot; ktime_t offs_tai; s32 tai_offset; unsigned int clock_was_set_seq; u8 cs_was_changed_seq; ktime_t next_leap_ktime; u64 raw_sec; struct timespec64 monotonic_to_boot; u64 cycle_interval; u64 xtime_interval; s64 xtime_remainder; u64 raw_interval; u64 ntp_tick; s64 ntp_error; u32 ntp_error_shift; u32 ntp_err_mult; u32 skip_second_overflow; }; struct timens_offset { s64 sec; u64 nsec; }; struct timer_base { raw_spinlock_t lock; struct timer_list *running_timer; long unsigned int clk; long unsigned int next_expiry; unsigned int cpu; bool next_expiry_recalc; bool is_idle; bool timers_pending; long unsigned int pending_map[9]; struct hlist_head vectors[576]; long: 64; long: 64; long: 64; }; struct timer_events { u64 local; u64 global; }; struct timer_list_iter { int cpu; bool second_pass; u64 now; }; struct timer_rand_state { long unsigned int last_time; long int last_delta; long int last_delta2; }; struct timerfd_ctx { union { struct hrtimer tmr; struct alarm alarm; } t; ktime_t tintv; ktime_t moffs; wait_queue_head_t wqh; u64 ticks; int clockid; short unsigned int expired; short unsigned int settime_flags; struct callback_head rcu; struct list_head clist; spinlock_t cancel_lock; bool might_cancel; }; struct timerlat_entry { struct trace_entry ent; unsigned int seqnum; int context; u64 timer_latency; }; struct timestamp_event_queue { struct ptp_extts_event buf[128]; int head; int tail; spinlock_t lock; struct list_head qlist; long unsigned int *mask; struct dentry *debugfs_instance; struct debugfs_u32_array dfs_bitmap; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; void (*twsk_destructor)(struct sock *); }; struct timezone { int tz_minuteswest; int tz_dsttime; }; struct tiocl_selection { short unsigned int xs; short unsigned int ys; short unsigned int xe; short unsigned int ye; short unsigned int sel_mode; }; struct tipc_basic_hdr { __be32 w[4]; }; struct tis_vendor_durations_override { u32 did_vid; struct tpm1_version version; long unsigned int durations[3]; }; struct tis_vendor_timeout_override { u32 did_vid; long unsigned int timeout_us[4]; }; struct tk_fast { seqcount_latch_t seq; struct tk_read_base base[2]; }; struct tlb_client_info { struct slave *tx_slave; u32 tx_bytes; u32 load_history; u32 next; u32 prev; }; struct tlb_context { u64 ctx_id; u64 tlb_gen; }; struct tlb_state { struct mm_struct *loaded_mm; union { struct mm_struct *last_user_mm; long unsigned int last_user_mm_spec; }; u16 loaded_mm_asid; u16 next_asid; bool invalidate_other; short unsigned int user_pcid_flush_mask; long unsigned int cr4; struct tlb_context ctxs[6]; }; struct tlb_state_shared { bool is_lazy; }; struct tls_crypto_info { __u16 version; __u16 cipher_type; }; struct tls12_crypto_info_aes_ccm_128 { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[16]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_aes_gcm_128 { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[16]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_aes_gcm_256 { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[32]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_aria_gcm_128 { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[16]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_aria_gcm_256 { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[32]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_chacha20_poly1305 { struct tls_crypto_info info; unsigned char iv[12]; unsigned char key[32]; unsigned char salt[0]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_sm4_ccm { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[16]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls12_crypto_info_sm4_gcm { struct tls_crypto_info info; unsigned char iv[8]; unsigned char key[16]; unsigned char salt[4]; unsigned char rec_seq[8]; }; struct tls_cipher_desc { unsigned int nonce; unsigned int iv; unsigned int key; unsigned int salt; unsigned int tag; unsigned int rec_seq; unsigned int iv_offset; unsigned int key_offset; unsigned int salt_offset; unsigned int rec_seq_offset; char *cipher_name; bool offloadable; size_t crypto_info; }; struct tls_prot_info { u16 version; u16 cipher_type; u16 prepend_size; u16 tag_size; u16 overhead_size; u16 iv_size; u16 salt_size; u16 rec_seq_size; u16 aad_size; u16 tail_size; }; union tls_crypto_context { struct tls_crypto_info info; union { struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; struct tls12_crypto_info_sm4_gcm sm4_gcm; struct tls12_crypto_info_sm4_ccm sm4_ccm; }; }; struct tls_context { struct tls_prot_info prot_info; u8 tx_conf: 3; u8 rx_conf: 3; u8 zerocopy_sendfile: 1; u8 rx_no_pad: 1; int (*push_pending_record)(struct sock *, int); void (*sk_write_space)(struct sock *); void *priv_ctx_tx; void *priv_ctx_rx; struct net_device *netdev; struct cipher_context tx; struct cipher_context rx; struct scatterlist *partially_sent_record; u16 partially_sent_offset; bool splicing_pages; bool pending_open_record_frags; struct mutex tx_lock; long unsigned int flags; struct proto *sk_proto; struct sock *sk; void (*sk_destruct)(struct sock *); union tls_crypto_context crypto_send; union tls_crypto_context crypto_recv; struct list_head list; refcount_t refcount; struct callback_head rcu; }; struct tls_decrypt_arg { union { struct { bool zc; bool async; bool async_done; u8 tail; }; struct { bool zc; bool async; bool async_done; u8 tail; } inargs; }; struct sk_buff *skb; }; struct tls_decrypt_ctx { struct sock *sk; u8 iv[16]; u8 aad[13]; u8 tail; bool free_sgout; struct scatterlist sg[0]; }; struct tls_rec { struct list_head list; int tx_ready; int tx_flags; struct sk_msg msg_plaintext; struct sk_msg msg_encrypted; struct scatterlist sg_aead_in[2]; struct scatterlist sg_aead_out[2]; char content_type; struct scatterlist sg_content_type; struct sock *sk; char aad_space[13]; u8 iv_data[16]; struct aead_request aead_req; u8 aead_req_ctx[0]; }; struct tls_strparser { struct sock *sk; u32 mark: 8; u32 stopped: 1; u32 copy_mode: 1; u32 mixed_decrypted: 1; bool msg_ready; struct strp_msg stm; struct sk_buff *anchor; struct work_struct work; }; struct tls_sw_context_rx { struct crypto_aead *aead_recv; struct crypto_wait async_wait; struct sk_buff_head rx_list; void (*saved_data_ready)(struct sock *); u8 reader_present; u8 async_capable: 1; u8 zc_capable: 1; u8 reader_contended: 1; struct tls_strparser strp; atomic_t decrypt_pending; struct sk_buff_head async_hold; struct wait_queue_head wq; }; struct tx_work { struct delayed_work work; struct sock *sk; }; struct tls_sw_context_tx { struct crypto_aead *aead_send; struct crypto_wait async_wait; struct tx_work tx_work; struct tls_rec *open_rec; struct list_head tx_list; atomic_t encrypt_pending; u8 async_capable: 1; long unsigned int tx_bitmask; }; struct tm { int tm_sec; int tm_min; int tm_hour; int tm_mday; int tm_mon; long int tm_year; int tm_wday; int tm_yday; }; struct tmigr_event { struct timerqueue_node nextevt; unsigned int cpu; bool ignore; }; struct tmigr_group; struct tmigr_cpu { raw_spinlock_t lock; bool online; bool idle; bool remote; struct tmigr_group *tmgroup; u8 groupmask; u64 wakeup; struct tmigr_event cpuevt; }; struct tmigr_group { raw_spinlock_t lock; struct tmigr_group *parent; struct tmigr_event groupevt; u64 next_expiry; struct timerqueue_head events; atomic_t migr_state; unsigned int level; int numa_node; unsigned int num_children; u8 groupmask; struct list_head list; }; union tmigr_state { u32 state; struct { u8 active; u8 migrator; u16 seq; }; }; struct tmigr_walk { u64 nextexp; u64 firstexp; struct tmigr_event *evt; u8 childmask; bool remote; long unsigned int basej; u64 now; bool check; bool tmc_active; }; struct tmpmasks { cpumask_var_t addmask; cpumask_var_t delmask; cpumask_var_t new_cpus; }; struct tms { __kernel_clock_t tms_utime; __kernel_clock_t tms_stime; __kernel_clock_t tms_cutime; __kernel_clock_t tms_cstime; }; struct tnl_ptk_info { long unsigned int flags[1]; __be16 proto; __be32 key; __be32 seq; int hdr_len; }; struct tnode { struct callback_head rcu; t_key empty_children; t_key full_children; struct key_vector *parent; struct key_vector kv[1]; }; struct token_bucket { spinlock_t lock; int chain_len; struct hlist_nulls_head req_chain; struct hlist_nulls_head msk_chain; }; struct topa { struct list_head list; u64 offset; size_t size; int last; unsigned int z_count; }; struct topa_entry { u64 end: 1; u64 rsvd0: 1; u64 intr: 1; u64 rsvd1: 1; u64 stop: 1; u64 rsvd2: 1; u64 size: 4; u64 rsvd3: 2; u64 base: 40; u64 rsvd4: 12; }; struct topa_page { struct topa_entry table[507]; struct topa topa; }; struct topo_scan { struct cpuinfo_x86 *c; unsigned int dom_shifts[7]; unsigned int dom_ncpus[7]; unsigned int ebx1_nproc_shift; u16 amd_nodes_per_pkg; u16 amd_node_id; }; struct touchscreen_properties { unsigned int max_x; unsigned int max_y; bool invert_x; bool invert_y; bool swap_x_y; }; struct tp_module { struct list_head list; struct module *mod; }; struct tracepoint_func { void *func; void *data; int prio; }; struct tp_probes { struct callback_head rcu; struct tracepoint_func probes[0]; }; struct tp_transition_snapshot { long unsigned int rcu; long unsigned int srcu; bool ongoing; }; struct tpacket2_hdr { __u32 tp_status; __u32 tp_len; __u32 tp_snaplen; __u16 tp_mac; __u16 tp_net; __u32 tp_sec; __u32 tp_nsec; __u16 tp_vlan_tci; __u16 tp_vlan_tpid; __u8 tp_padding[4]; }; struct tpacket_hdr_variant1 { __u32 tp_rxhash; __u32 tp_vlan_tci; __u16 tp_vlan_tpid; __u16 tp_padding; }; struct tpacket3_hdr { __u32 tp_next_offset; __u32 tp_sec; __u32 tp_nsec; __u32 tp_snaplen; __u32 tp_len; __u32 tp_status; __u16 tp_mac; __u16 tp_net; union { struct tpacket_hdr_variant1 hv1; }; __u8 tp_padding[8]; }; struct tpacket_auxdata { __u32 tp_status; __u32 tp_len; __u32 tp_snaplen; __u16 tp_mac; __u16 tp_net; __u16 tp_vlan_tci; __u16 tp_vlan_tpid; }; struct tpacket_bd_ts { unsigned int ts_sec; union { unsigned int ts_usec; unsigned int ts_nsec; }; }; struct tpacket_hdr_v1 { __u32 block_status; __u32 num_pkts; __u32 offset_to_first_pkt; __u32 blk_len; __u64 seq_num; struct tpacket_bd_ts ts_first_pkt; struct tpacket_bd_ts ts_last_pkt; }; union tpacket_bd_header_u { struct tpacket_hdr_v1 bh1; }; struct tpacket_block_desc { __u32 version; __u32 offset_to_priv; union tpacket_bd_header_u hdr; }; struct tpacket_hdr { long unsigned int tp_status; unsigned int tp_len; unsigned int tp_snaplen; short unsigned int tp_mac; short unsigned int tp_net; unsigned int tp_sec; unsigned int tp_usec; }; struct tpacket_req { unsigned int tp_block_size; unsigned int tp_block_nr; unsigned int tp_frame_size; unsigned int tp_frame_nr; }; struct tpacket_req3 { unsigned int tp_block_size; unsigned int tp_block_nr; unsigned int tp_frame_size; unsigned int tp_frame_nr; unsigned int tp_retire_blk_tov; unsigned int tp_sizeof_priv; unsigned int tp_feature_req_word; }; union tpacket_req_u { struct tpacket_req req; struct tpacket_req3 req3; }; struct tpacket_rollover_stats { __u64 tp_all; __u64 tp_huge; __u64 tp_failed; }; union tpacket_uhdr { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; struct tpacket3_hdr *h3; void *raw; }; struct tpm1_get_random_out { __be32 rng_data_len; u8 rng_data[128]; }; struct tpm2_auth { u32 handle; u32 session; u8 our_nonce[32]; u8 tpm_nonce[32]; union { u8 salt[32]; u8 scratch[32]; }; u8 session_key[32]; u8 passphrase[32]; int passphrase_len; struct crypto_aes_ctx aes_ctx; u8 attrs; __be32 ordinal; u32 name_h[3]; u8 name[198]; }; struct tpm2_cap_handles { u8 more_data; __be32 capability; __be32 count; __be32 handles[0]; } __attribute__((packed)); struct tpm2_context { __be64 sequence; __be32 saved_handle; __be32 hierarchy; __be16 blob_size; } __attribute__((packed)); struct tpm2_crb_pluton { u64 start_addr; u64 reply_addr; }; struct tpm2_crb_smc { u32 interrupt; u8 interrupt_flags; u8 op_flags; u16 reserved2; u32 smc_func_id; }; struct tpm2_get_cap_out { u8 more_data; __be32 subcap_id; __be32 property_cnt; __be32 property_id; __be32 value; } __attribute__((packed)); struct tpm2_get_random_out { __be16 size; u8 buffer[128]; }; struct tpm2_hash { unsigned int crypto_id; unsigned int tpm_id; }; struct tpm2_pcr_read_out { __be32 update_cnt; __be32 pcr_selects_cnt; __be16 hash_alg; u8 pcr_select_size; u8 pcr_select[3]; __be32 digests_cnt; __be16 digest_size; u8 digest[0]; } __attribute__((packed)); struct tpm2_pcr_selection { __be16 hash_alg; u8 size_of_select; u8 pcr_select[3]; }; struct tpm_bank_info { u16 alg_id; u16 digest_size; u16 crypto_id; }; struct tpm_bios_log { void *bios_event_log; void *bios_event_log_end; }; struct tpm_buf { u32 flags; u32 length; u8 *data; u8 handles; }; struct tpm_chip_seqops { struct tpm_chip *chip; const struct seq_operations *seqops; }; struct tpm_space { u32 context_tbl[3]; u8 *context_buf; u32 session_tbl[3]; u8 *session_buf; u32 buf_size; }; struct tpm_class_ops; struct tpm_chip { struct device dev; struct device devs; struct cdev cdev; struct cdev cdevs; struct rw_semaphore ops_sem; const struct tpm_class_ops *ops; struct tpm_bios_log log; struct tpm_chip_seqops bin_log_seqops; struct tpm_chip_seqops ascii_log_seqops; unsigned int flags; int dev_num; long unsigned int is_open; char hwrng_name[64]; struct hwrng hwrng; struct mutex tpm_mutex; long unsigned int timeout_a; long unsigned int timeout_b; long unsigned int timeout_c; long unsigned int timeout_d; bool timeout_adjusted; long unsigned int duration[4]; bool duration_adjusted; struct dentry *bios_dir[3]; const struct attribute_group *groups[8]; unsigned int groups_cnt; u32 nr_allocated_banks; struct tpm_bank_info *allocated_banks; acpi_handle acpi_dev_handle; char ppi_version[4]; struct tpm_space work_space; u32 last_cc; u32 nr_commands; u32 *cc_attrs_tbl; int locality; u8 null_key_context[4096]; u8 null_key_name[34]; u8 null_ec_key_x[32]; u8 null_ec_key_y[32]; struct tpm2_auth *auth; }; struct tpm_class_ops { unsigned int flags; const u8 req_complete_mask; const u8 req_complete_val; bool (*req_canceled)(struct tpm_chip *, u8); int (*recv)(struct tpm_chip *, u8 *, size_t); int (*send)(struct tpm_chip *, u8 *, size_t); void (*cancel)(struct tpm_chip *); u8 (*status)(struct tpm_chip *); void (*update_timeouts)(struct tpm_chip *, long unsigned int *); void (*update_durations)(struct tpm_chip *, long unsigned int *); int (*go_idle)(struct tpm_chip *); int (*cmd_ready)(struct tpm_chip *); int (*request_locality)(struct tpm_chip *, int); int (*relinquish_locality)(struct tpm_chip *, int); void (*clk_enable)(struct tpm_chip *, bool); }; struct tpm_header { __be16 tag; __be32 length; union { __be32 ordinal; __be32 return_code; }; } __attribute__((packed)); struct tpm_info { struct resource res; int irq; }; struct tpm_pcr_attr { int alg_id; int pcr; struct device_attribute attr; }; struct tpm_readpubek_out { u8 algorithm[4]; u8 encscheme[2]; u8 sigscheme[2]; __be32 paramsize; u8 parameters[12]; __be32 keysize; u8 modulus[256]; u8 checksum[20]; }; struct tpm_tis_phy_ops; struct tpm_tis_data { struct tpm_chip *chip; u16 manufacturer_id; struct mutex locality_count_mutex; unsigned int locality_count; int locality; int irq; struct work_struct free_irq_work; long unsigned int last_unhandled_irq; unsigned int unhandled_irqs; unsigned int int_mask; long unsigned int flags; void *ilb_base_addr; u16 clkrun_enabled; wait_queue_head_t int_queue; wait_queue_head_t read_queue; const struct tpm_tis_phy_ops *phy_ops; short unsigned int rng_quality; unsigned int timeout_min; unsigned int timeout_max; }; struct tpm_tis_phy_ops { int (*read_bytes)(struct tpm_tis_data *, u32, u16, u8 *, enum tpm_tis_io_mode); int (*write_bytes)(struct tpm_tis_data *, u32, u16, const u8 *, enum tpm_tis_io_mode); int (*verify_crc)(struct tpm_tis_data *, size_t, const u8 *); }; struct tpm_tis_tcg_phy { struct tpm_tis_data priv; void *iobase; }; struct tpmrm_priv { struct file_priv priv; struct tpm_space space; }; struct trace_pid_list; struct trace_options; struct trace_func_repeats; struct trace_array { struct list_head list; char *name; struct array_buffer array_buffer; unsigned int mapped; long unsigned int range_addr_start; long unsigned int range_addr_size; long int text_delta; long int data_delta; struct trace_pid_list *filtered_pids; struct trace_pid_list *filtered_no_pids; arch_spinlock_t max_lock; int buffer_disabled; int sys_refcount_enter; int sys_refcount_exit; struct trace_event_file *enter_syscall_files[463]; struct trace_event_file *exit_syscall_files[463]; int stop_count; int clock_id; int nr_topts; bool clear_trace; int buffer_percent; unsigned int n_err_log_entries; struct tracer *current_trace; unsigned int trace_flags; unsigned char trace_flags_index[32]; unsigned int flags; raw_spinlock_t start_lock; const char *system_names; struct list_head err_log; struct dentry *dir; struct dentry *options; struct dentry *percpu_dir; struct eventfs_inode *event_dir; struct trace_options *topts; struct list_head systems; struct list_head events; struct trace_event_file *trace_marker_file; cpumask_var_t tracing_cpumask; cpumask_var_t pipe_cpumask; int ref; int trace_ref; struct ftrace_ops *ops; struct trace_pid_list *function_pids; struct trace_pid_list *function_no_pids; struct fgraph_ops *gops; struct list_head func_probes; struct list_head mod_trace; struct list_head mod_notrace; int function_enabled; int no_filter_buffering_ref; struct list_head hist_vars; struct trace_func_repeats *last_func_repeats; bool ring_buffer_expanded; }; struct trace_array_cpu { atomic_t disabled; void *buffer_page; long unsigned int entries; long unsigned int saved_latency; long unsigned int critical_start; long unsigned int critical_end; long unsigned int critical_sequence; long unsigned int nice; long unsigned int policy; long unsigned int rt_priority; long unsigned int skipped_entries; u64 preempt_timestamp; pid_t pid; kuid_t uid; char comm[16]; int ftrace_ignore_pid; bool ignore_pid; }; struct trace_bprintk_fmt { struct list_head list; const char *fmt; }; struct trace_buffer { unsigned int flags; int cpus; atomic_t record_disabled; atomic_t resizing; cpumask_var_t cpumask; struct lock_class_key *reader_lock_key; struct mutex mutex; struct ring_buffer_per_cpu **buffers; struct hlist_node node; u64 (*clock)(void); struct rb_irq_work irq_work; bool time_stamp_abs; long unsigned int range_addr_start; long unsigned int range_addr_end; long int last_text_delta; long int last_data_delta; unsigned int subbuf_size; unsigned int subbuf_order; unsigned int max_data_size; }; struct trace_buffer_meta { __u32 meta_page_size; __u32 meta_struct_len; __u32 subbuf_size; __u32 nr_subbufs; struct { __u64 lost_events; __u32 id; __u32 read; } reader; __u64 flags; __u64 entries; __u64 overrun; __u64 read; __u64 Reserved1; __u64 Reserved2; }; struct trace_buffer_struct { int nesting; char buffer[4096]; }; struct trace_probe_event; struct trace_probe { struct list_head list; struct trace_probe_event *event; ssize_t size; unsigned int nr_args; struct probe_entry_arg *entry_arg; struct probe_arg args[0]; }; struct trace_eprobe { const char *event_system; const char *event_name; char *filter_str; struct trace_event_call *event; struct dyn_event devent; struct trace_probe tp; }; struct trace_eval_map { const char *system; const char *eval_string; long unsigned int eval_value; }; struct trace_event_functions; struct trace_event { struct hlist_node node; int type; struct trace_event_functions *funcs; }; struct trace_event_buffer { struct trace_buffer *buffer; struct ring_buffer_event *event; struct trace_event_file *trace_file; void *entry; unsigned int trace_ctx; struct pt_regs *regs; }; struct trace_event_class; struct trace_event_call { struct list_head list; struct trace_event_class *class; union { char *name; struct tracepoint *tp; }; struct trace_event event; char *print_fmt; struct event_filter *filter; union { void *module; atomic_t refcnt; }; void *data; int flags; int perf_refcount; struct hlist_head *perf_events; struct bpf_prog_array *prog_array; int (*perf_perm)(struct trace_event_call *, struct perf_event *); }; struct trace_event_fields; struct trace_event_class { const char *system; void *probe; void *perf_probe; int (*reg)(struct trace_event_call *, enum trace_reg, void *); struct trace_event_fields *fields_array; struct list_head * (*get_fields)(struct trace_event_call *); struct list_head fields; int (*raw_init)(struct trace_event_call *); }; struct trace_event_data_offsets_ack_update_msk {}; struct trace_event_data_offsets_aer_event { u32 dev_name; const void *dev_name_ptr_; }; struct trace_event_data_offsets_alarm_class {}; struct trace_event_data_offsets_alarmtimer_suspend {}; struct trace_event_data_offsets_alloc_vmap_area {}; struct trace_event_data_offsets_amd_pstate_perf {}; struct trace_event_data_offsets_arm_event {}; struct trace_event_data_offsets_balance_dirty_pages {}; struct trace_event_data_offsets_bdi_dirty_ratelimit {}; struct trace_event_data_offsets_block_bio {}; struct trace_event_data_offsets_block_bio_complete {}; struct trace_event_data_offsets_block_bio_remap {}; struct trace_event_data_offsets_block_buffer {}; struct trace_event_data_offsets_block_plug {}; struct trace_event_data_offsets_block_rq { u32 cmd; const void *cmd_ptr_; }; struct trace_event_data_offsets_block_rq_completion { u32 cmd; const void *cmd_ptr_; }; struct trace_event_data_offsets_block_rq_remap {}; struct trace_event_data_offsets_block_rq_requeue { u32 cmd; const void *cmd_ptr_; }; struct trace_event_data_offsets_block_split {}; struct trace_event_data_offsets_block_unplug {}; struct trace_event_data_offsets_bpf_test_finish {}; struct trace_event_data_offsets_bpf_trace_printk { u32 bpf_string; const void *bpf_string_ptr_; }; struct trace_event_data_offsets_bpf_trigger_tp {}; struct trace_event_data_offsets_bpf_xdp_link_attach_failed { u32 msg; const void *msg_ptr_; }; struct trace_event_data_offsets_cdev_update { u32 type; const void *type_ptr_; }; struct trace_event_data_offsets_cgroup { u32 path; const void *path_ptr_; }; struct trace_event_data_offsets_cgroup_event { u32 path; const void *path_ptr_; }; struct trace_event_data_offsets_cgroup_migrate { u32 dst_path; const void *dst_path_ptr_; u32 comm; const void *comm_ptr_; }; struct trace_event_data_offsets_cgroup_root { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_cgroup_rstat {}; struct trace_event_data_offsets_clock { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_cma_alloc_busy_retry { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_cma_alloc_finish { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_cma_alloc_start { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_cma_release { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_compact_retry {}; struct trace_event_data_offsets_console { u32 msg; const void *msg_ptr_; }; struct trace_event_data_offsets_consume_skb {}; struct trace_event_data_offsets_contention_begin {}; struct trace_event_data_offsets_contention_end {}; struct trace_event_data_offsets_cpu {}; struct trace_event_data_offsets_cpu_frequency_limits {}; struct trace_event_data_offsets_cpu_idle_miss {}; struct trace_event_data_offsets_cpu_latency_qos_request {}; struct trace_event_data_offsets_cpuhp_enter {}; struct trace_event_data_offsets_cpuhp_exit {}; struct trace_event_data_offsets_cpuhp_multi_enter {}; struct trace_event_data_offsets_csd_function {}; struct trace_event_data_offsets_csd_queue_cpu {}; struct trace_event_data_offsets_dev_pm_qos_request { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_device_pm_callback_end { u32 device; const void *device_ptr_; u32 driver; const void *driver_ptr_; }; struct trace_event_data_offsets_device_pm_callback_start { u32 device; const void *device_ptr_; u32 driver; const void *driver_ptr_; u32 parent; const void *parent_ptr_; u32 pm_ops; const void *pm_ops_ptr_; }; struct trace_event_data_offsets_devlink_health_recover_aborted { u32 bus_name; const void *bus_name_ptr_; u32 dev_name; const void *dev_name_ptr_; u32 driver_name; const void *driver_name_ptr_; u32 reporter_name; const void *reporter_name_ptr_; }; struct trace_event_data_offsets_devlink_health_report { u32 bus_name; const void *bus_name_ptr_; u32 dev_name; const void *dev_name_ptr_; u32 driver_name; const void *driver_name_ptr_; u32 reporter_name; const void *reporter_name_ptr_; u32 msg; const void *msg_ptr_; }; struct trace_event_data_offsets_devlink_health_reporter_state_update { u32 bus_name; const void *bus_name_ptr_; u32 dev_name; const void *dev_name_ptr_; u32 driver_name; const void *driver_name_ptr_; u32 reporter_name; const void *reporter_name_ptr_; }; struct trace_event_data_offsets_devlink_hwerr { u32 bus_name; const void *bus_name_ptr_; u32 dev_name; const void *dev_name_ptr_; u32 driver_name; const void *driver_name_ptr_; u32 msg; const void *msg_ptr_; }; struct trace_event_data_offsets_devlink_hwmsg { u32 bus_name; const void *bus_name_ptr_; u32 dev_name; const void *dev_name_ptr_; u32 driver_name; const void *driver_name_ptr_; u32 buf; const void *buf_ptr_; }; struct trace_event_data_offsets_devlink_trap_report { u32 bus_name; const void *bus_name_ptr_; u32 dev_name; const void *dev_name_ptr_; u32 driver_name; const void *driver_name_ptr_; u32 trap_name; const void *trap_name_ptr_; u32 trap_group_name; const void *trap_group_name_ptr_; }; struct trace_event_data_offsets_devres { u32 devname; const void *devname_ptr_; }; struct trace_event_data_offsets_dma_alloc { u32 device; const void *device_ptr_; }; struct trace_event_data_offsets_dma_fence { u32 driver; const void *driver_ptr_; u32 timeline; const void *timeline_ptr_; }; struct trace_event_data_offsets_dma_free { u32 device; const void *device_ptr_; }; struct trace_event_data_offsets_dma_map { u32 device; const void *device_ptr_; }; struct trace_event_data_offsets_dma_map_sg { u32 device; const void *device_ptr_; u32 phys_addrs; const void *phys_addrs_ptr_; u32 dma_addrs; const void *dma_addrs_ptr_; u32 lengths; const void *lengths_ptr_; }; struct trace_event_data_offsets_dma_sync_sg { u32 device; const void *device_ptr_; u32 dma_addrs; const void *dma_addrs_ptr_; u32 lengths; const void *lengths_ptr_; }; struct trace_event_data_offsets_dma_sync_single { u32 device; const void *device_ptr_; }; struct trace_event_data_offsets_dma_unmap { u32 device; const void *device_ptr_; }; struct trace_event_data_offsets_dma_unmap_sg { u32 device; const void *device_ptr_; u32 addrs; const void *addrs_ptr_; }; struct trace_event_data_offsets_dql_stall_detected {}; struct trace_event_data_offsets_emulate_vsyscall {}; struct trace_event_data_offsets_error_report_template {}; struct trace_event_data_offsets_exit_mmap {}; struct trace_event_data_offsets_ext4__bitmap_load {}; struct trace_event_data_offsets_ext4__es_extent {}; struct trace_event_data_offsets_ext4__es_shrink_enter {}; struct trace_event_data_offsets_ext4__fallocate_mode {}; struct trace_event_data_offsets_ext4__folio_op {}; struct trace_event_data_offsets_ext4__map_blocks_enter {}; struct trace_event_data_offsets_ext4__map_blocks_exit {}; struct trace_event_data_offsets_ext4__mb_new_pa {}; struct trace_event_data_offsets_ext4__mballoc {}; struct trace_event_data_offsets_ext4__trim {}; struct trace_event_data_offsets_ext4__truncate {}; struct trace_event_data_offsets_ext4__write_begin {}; struct trace_event_data_offsets_ext4__write_end {}; struct trace_event_data_offsets_ext4_alloc_da_blocks {}; struct trace_event_data_offsets_ext4_allocate_blocks {}; struct trace_event_data_offsets_ext4_allocate_inode {}; struct trace_event_data_offsets_ext4_begin_ordered_truncate {}; struct trace_event_data_offsets_ext4_collapse_range {}; struct trace_event_data_offsets_ext4_da_release_space {}; struct trace_event_data_offsets_ext4_da_reserve_space {}; struct trace_event_data_offsets_ext4_da_update_reserve_space {}; struct trace_event_data_offsets_ext4_da_write_pages {}; struct trace_event_data_offsets_ext4_da_write_pages_extent {}; struct trace_event_data_offsets_ext4_discard_blocks {}; struct trace_event_data_offsets_ext4_discard_preallocations {}; struct trace_event_data_offsets_ext4_drop_inode {}; struct trace_event_data_offsets_ext4_error {}; struct trace_event_data_offsets_ext4_es_find_extent_range_enter {}; struct trace_event_data_offsets_ext4_es_find_extent_range_exit {}; struct trace_event_data_offsets_ext4_es_insert_delayed_extent {}; struct trace_event_data_offsets_ext4_es_lookup_extent_enter {}; struct trace_event_data_offsets_ext4_es_lookup_extent_exit {}; struct trace_event_data_offsets_ext4_es_remove_extent {}; struct trace_event_data_offsets_ext4_es_shrink {}; struct trace_event_data_offsets_ext4_es_shrink_scan_exit {}; struct trace_event_data_offsets_ext4_evict_inode {}; struct trace_event_data_offsets_ext4_ext_convert_to_initialized_enter {}; struct trace_event_data_offsets_ext4_ext_convert_to_initialized_fastpath {}; struct trace_event_data_offsets_ext4_ext_handle_unwritten_extents {}; struct trace_event_data_offsets_ext4_ext_load_extent {}; struct trace_event_data_offsets_ext4_ext_remove_space {}; struct trace_event_data_offsets_ext4_ext_remove_space_done {}; struct trace_event_data_offsets_ext4_ext_rm_idx {}; struct trace_event_data_offsets_ext4_ext_rm_leaf {}; struct trace_event_data_offsets_ext4_ext_show_extent {}; struct trace_event_data_offsets_ext4_fallocate_exit {}; struct trace_event_data_offsets_ext4_fc_cleanup {}; struct trace_event_data_offsets_ext4_fc_commit_start {}; struct trace_event_data_offsets_ext4_fc_commit_stop {}; struct trace_event_data_offsets_ext4_fc_replay {}; struct trace_event_data_offsets_ext4_fc_replay_scan {}; struct trace_event_data_offsets_ext4_fc_stats {}; struct trace_event_data_offsets_ext4_fc_track_dentry {}; struct trace_event_data_offsets_ext4_fc_track_inode {}; struct trace_event_data_offsets_ext4_fc_track_range {}; struct trace_event_data_offsets_ext4_forget {}; struct trace_event_data_offsets_ext4_free_blocks {}; struct trace_event_data_offsets_ext4_free_inode {}; struct trace_event_data_offsets_ext4_fsmap_class {}; struct trace_event_data_offsets_ext4_get_implied_cluster_alloc_exit {}; struct trace_event_data_offsets_ext4_getfsmap_class {}; struct trace_event_data_offsets_ext4_insert_range {}; struct trace_event_data_offsets_ext4_invalidate_folio_op {}; struct trace_event_data_offsets_ext4_journal_start_inode {}; struct trace_event_data_offsets_ext4_journal_start_reserved {}; struct trace_event_data_offsets_ext4_journal_start_sb {}; struct trace_event_data_offsets_ext4_lazy_itable_init {}; struct trace_event_data_offsets_ext4_load_inode {}; struct trace_event_data_offsets_ext4_mark_inode_dirty {}; struct trace_event_data_offsets_ext4_mb_discard_preallocations {}; struct trace_event_data_offsets_ext4_mb_release_group_pa {}; struct trace_event_data_offsets_ext4_mb_release_inode_pa {}; struct trace_event_data_offsets_ext4_mballoc_alloc {}; struct trace_event_data_offsets_ext4_mballoc_prealloc {}; struct trace_event_data_offsets_ext4_nfs_commit_metadata {}; struct trace_event_data_offsets_ext4_other_inode_update_time {}; struct trace_event_data_offsets_ext4_prefetch_bitmaps {}; struct trace_event_data_offsets_ext4_read_block_bitmap_load {}; struct trace_event_data_offsets_ext4_remove_blocks {}; struct trace_event_data_offsets_ext4_request_blocks {}; struct trace_event_data_offsets_ext4_request_inode {}; struct trace_event_data_offsets_ext4_shutdown {}; struct trace_event_data_offsets_ext4_sync_file_enter {}; struct trace_event_data_offsets_ext4_sync_file_exit {}; struct trace_event_data_offsets_ext4_sync_fs {}; struct trace_event_data_offsets_ext4_unlink_enter {}; struct trace_event_data_offsets_ext4_unlink_exit {}; struct trace_event_data_offsets_ext4_update_sb {}; struct trace_event_data_offsets_ext4_writepages {}; struct trace_event_data_offsets_ext4_writepages_result {}; struct trace_event_data_offsets_fib6_table_lookup {}; struct trace_event_data_offsets_fib_table_lookup {}; struct trace_event_data_offsets_file_check_and_advance_wb_err {}; struct trace_event_data_offsets_filelock_lease {}; struct trace_event_data_offsets_filelock_lock {}; struct trace_event_data_offsets_filemap_set_wb_err {}; struct trace_event_data_offsets_finish_task_reaping {}; struct trace_event_data_offsets_flush_foreign {}; struct trace_event_data_offsets_free_vmap_area_noflush {}; struct trace_event_data_offsets_generic_add_lease {}; struct trace_event_data_offsets_global_dirty_state {}; struct trace_event_data_offsets_guest_halt_poll_ns {}; struct trace_event_data_offsets_hrtimer_class {}; struct trace_event_data_offsets_hrtimer_expire_entry {}; struct trace_event_data_offsets_hrtimer_init {}; struct trace_event_data_offsets_hrtimer_start {}; struct trace_event_data_offsets_hugepage_set {}; struct trace_event_data_offsets_hugepage_update {}; struct trace_event_data_offsets_hwmon_attr_class { u32 attr_name; const void *attr_name_ptr_; }; struct trace_event_data_offsets_hwmon_attr_show_string { u32 attr_name; const void *attr_name_ptr_; u32 label; const void *label_ptr_; }; struct trace_event_data_offsets_icmp_send {}; struct trace_event_data_offsets_inet_sk_error_report {}; struct trace_event_data_offsets_inet_sock_set_state {}; struct trace_event_data_offsets_initcall_finish {}; struct trace_event_data_offsets_initcall_level { u32 level; const void *level_ptr_; }; struct trace_event_data_offsets_initcall_start {}; struct trace_event_data_offsets_inode_foreign_history {}; struct trace_event_data_offsets_inode_switch_wbs {}; struct trace_event_data_offsets_io_uring_complete {}; struct trace_event_data_offsets_io_uring_cqe_overflow {}; struct trace_event_data_offsets_io_uring_cqring_wait {}; struct trace_event_data_offsets_io_uring_create {}; struct trace_event_data_offsets_io_uring_defer { u32 op_str; const void *op_str_ptr_; }; struct trace_event_data_offsets_io_uring_fail_link { u32 op_str; const void *op_str_ptr_; }; struct trace_event_data_offsets_io_uring_file_get {}; struct trace_event_data_offsets_io_uring_link {}; struct trace_event_data_offsets_io_uring_local_work_run {}; struct trace_event_data_offsets_io_uring_poll_arm { u32 op_str; const void *op_str_ptr_; }; struct trace_event_data_offsets_io_uring_queue_async_work { u32 op_str; const void *op_str_ptr_; }; struct trace_event_data_offsets_io_uring_register {}; struct trace_event_data_offsets_io_uring_req_failed { u32 op_str; const void *op_str_ptr_; }; struct trace_event_data_offsets_io_uring_short_write {}; struct trace_event_data_offsets_io_uring_submit_req { u32 op_str; const void *op_str_ptr_; }; struct trace_event_data_offsets_io_uring_task_add { u32 op_str; const void *op_str_ptr_; }; struct trace_event_data_offsets_io_uring_task_work_run {}; struct trace_event_data_offsets_iomap_class {}; struct trace_event_data_offsets_iomap_dio_complete {}; struct trace_event_data_offsets_iomap_dio_rw_begin {}; struct trace_event_data_offsets_iomap_iter {}; struct trace_event_data_offsets_iomap_range_class {}; struct trace_event_data_offsets_iomap_readpage_class {}; struct trace_event_data_offsets_iomap_writepage_map {}; struct trace_event_data_offsets_iommu_device_event { u32 device; const void *device_ptr_; }; struct trace_event_data_offsets_iommu_error { u32 device; const void *device_ptr_; u32 driver; const void *driver_ptr_; }; struct trace_event_data_offsets_iommu_group_event { u32 device; const void *device_ptr_; }; struct trace_event_data_offsets_ipi_handler {}; struct trace_event_data_offsets_ipi_raise { u32 target_cpus; const void *target_cpus_ptr_; }; struct trace_event_data_offsets_ipi_send_cpu {}; struct trace_event_data_offsets_ipi_send_cpumask { u32 cpumask; const void *cpumask_ptr_; }; struct trace_event_data_offsets_irq_handler_entry { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_irq_handler_exit {}; struct trace_event_data_offsets_irq_matrix_cpu {}; struct trace_event_data_offsets_irq_matrix_global {}; struct trace_event_data_offsets_irq_matrix_global_update {}; struct trace_event_data_offsets_itimer_expire {}; struct trace_event_data_offsets_itimer_state {}; struct trace_event_data_offsets_jbd2_checkpoint {}; struct trace_event_data_offsets_jbd2_checkpoint_stats {}; struct trace_event_data_offsets_jbd2_commit {}; struct trace_event_data_offsets_jbd2_end_commit {}; struct trace_event_data_offsets_jbd2_handle_extend {}; struct trace_event_data_offsets_jbd2_handle_start_class {}; struct trace_event_data_offsets_jbd2_handle_stats {}; struct trace_event_data_offsets_jbd2_journal_shrink {}; struct trace_event_data_offsets_jbd2_lock_buffer_stall {}; struct trace_event_data_offsets_jbd2_run_stats {}; struct trace_event_data_offsets_jbd2_shrink_checkpoint_list {}; struct trace_event_data_offsets_jbd2_shrink_scan_exit {}; struct trace_event_data_offsets_jbd2_submit_inode_data {}; struct trace_event_data_offsets_jbd2_update_log_tail {}; struct trace_event_data_offsets_jbd2_write_superblock {}; struct trace_event_data_offsets_kcompactd_wake_template {}; struct trace_event_data_offsets_kfree {}; struct trace_event_data_offsets_kfree_skb {}; struct trace_event_data_offsets_kmalloc {}; struct trace_event_data_offsets_kmem_cache_alloc {}; struct trace_event_data_offsets_kmem_cache_free { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_ksm_advisor {}; struct trace_event_data_offsets_ksm_enter_exit_template {}; struct trace_event_data_offsets_ksm_merge_one_page {}; struct trace_event_data_offsets_ksm_merge_with_ksm_page {}; struct trace_event_data_offsets_ksm_remove_ksm_page {}; struct trace_event_data_offsets_ksm_remove_rmap_item {}; struct trace_event_data_offsets_ksm_scan_template {}; struct trace_event_data_offsets_kyber_adjust {}; struct trace_event_data_offsets_kyber_latency {}; struct trace_event_data_offsets_kyber_throttled {}; struct trace_event_data_offsets_leases_conflict {}; struct trace_event_data_offsets_lock { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_lock_acquire { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_locks_get_lock_context {}; struct trace_event_data_offsets_ma_op {}; struct trace_event_data_offsets_ma_read {}; struct trace_event_data_offsets_ma_write {}; struct trace_event_data_offsets_map {}; struct trace_event_data_offsets_mark_victim { u32 comm; const void *comm_ptr_; }; struct trace_event_data_offsets_mc_event { u32 msg; const void *msg_ptr_; u32 label; const void *label_ptr_; u32 driver_detail; const void *driver_detail_ptr_; }; struct trace_event_data_offsets_mce_record {}; struct trace_event_data_offsets_mei_pci_cfg_read { u32 dev; const void *dev_ptr_; }; struct trace_event_data_offsets_mei_reg_read { u32 dev; const void *dev_ptr_; }; struct trace_event_data_offsets_mei_reg_write { u32 dev; const void *dev_ptr_; }; struct trace_event_data_offsets_mem_connect {}; struct trace_event_data_offsets_mem_disconnect {}; struct trace_event_data_offsets_mem_return_failed {}; struct trace_event_data_offsets_memory_failure_event {}; struct trace_event_data_offsets_migration_pmd {}; struct trace_event_data_offsets_migration_pte {}; struct trace_event_data_offsets_mm_alloc_contig_migrate_range_info {}; struct trace_event_data_offsets_mm_collapse_huge_page {}; struct trace_event_data_offsets_mm_collapse_huge_page_isolate {}; struct trace_event_data_offsets_mm_collapse_huge_page_swapin {}; struct trace_event_data_offsets_mm_compaction_begin {}; struct trace_event_data_offsets_mm_compaction_defer_template {}; struct trace_event_data_offsets_mm_compaction_end {}; struct trace_event_data_offsets_mm_compaction_isolate_template {}; struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; struct trace_event_data_offsets_mm_compaction_migratepages {}; struct trace_event_data_offsets_mm_compaction_suitable_template {}; struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; struct trace_event_data_offsets_mm_filemap_fault {}; struct trace_event_data_offsets_mm_filemap_op_page_cache {}; struct trace_event_data_offsets_mm_filemap_op_page_cache_range {}; struct trace_event_data_offsets_mm_khugepaged_collapse_file { u32 filename; const void *filename_ptr_; }; struct trace_event_data_offsets_mm_khugepaged_scan_file { u32 filename; const void *filename_ptr_; }; struct trace_event_data_offsets_mm_khugepaged_scan_pmd {}; struct trace_event_data_offsets_mm_lru_activate {}; struct trace_event_data_offsets_mm_lru_insertion {}; struct trace_event_data_offsets_mm_migrate_pages {}; struct trace_event_data_offsets_mm_migrate_pages_start {}; struct trace_event_data_offsets_mm_page {}; struct trace_event_data_offsets_mm_page_alloc {}; struct trace_event_data_offsets_mm_page_alloc_extfrag {}; struct trace_event_data_offsets_mm_page_free {}; struct trace_event_data_offsets_mm_page_free_batched {}; struct trace_event_data_offsets_mm_page_pcpu_drain {}; struct trace_event_data_offsets_mm_shrink_slab_end {}; struct trace_event_data_offsets_mm_shrink_slab_start {}; struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {}; struct trace_event_data_offsets_mm_vmscan_throttled {}; struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; struct trace_event_data_offsets_mm_vmscan_write_folio {}; struct trace_event_data_offsets_mmap_lock { u32 memcg_path; const void *memcg_path_ptr_; }; struct trace_event_data_offsets_mmap_lock_acquire_returned { u32 memcg_path; const void *memcg_path_ptr_; }; struct trace_event_data_offsets_module_free { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_module_load { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_module_refcnt { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_module_request { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_mptcp_dump_mpext {}; struct trace_event_data_offsets_mptcp_subflow_get_send {}; struct trace_event_data_offsets_msr_trace_class {}; struct trace_event_data_offsets_napi_poll { u32 dev_name; const void *dev_name_ptr_; }; struct trace_event_data_offsets_neigh__update { u32 dev; const void *dev_ptr_; }; struct trace_event_data_offsets_neigh_create { u32 dev; const void *dev_ptr_; }; struct trace_event_data_offsets_neigh_update { u32 dev; const void *dev_ptr_; }; struct trace_event_data_offsets_net_dev_rx_exit_template {}; struct trace_event_data_offsets_net_dev_rx_verbose_template { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_net_dev_start_xmit { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_net_dev_template { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_net_dev_xmit { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_net_dev_xmit_timeout { u32 name; const void *name_ptr_; u32 driver; const void *driver_ptr_; }; struct trace_event_data_offsets_netlink_extack { u32 msg; const void *msg_ptr_; }; struct trace_event_data_offsets_nmi_handler {}; struct trace_event_data_offsets_non_standard_event { u32 fru_text; const void *fru_text_ptr_; u32 buf; const void *buf_ptr_; }; struct trace_event_data_offsets_notifier_info {}; struct trace_event_data_offsets_oom_score_adj_update {}; struct trace_event_data_offsets_page_pool_release {}; struct trace_event_data_offsets_page_pool_state_hold {}; struct trace_event_data_offsets_page_pool_state_release {}; struct trace_event_data_offsets_page_pool_update_nid {}; struct trace_event_data_offsets_percpu_alloc_percpu {}; struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; struct trace_event_data_offsets_percpu_create_chunk {}; struct trace_event_data_offsets_percpu_destroy_chunk {}; struct trace_event_data_offsets_percpu_free_percpu {}; struct trace_event_data_offsets_pm_qos_update {}; struct trace_event_data_offsets_power_domain { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_powernv_throttle { u32 reason; const void *reason_ptr_; }; struct trace_event_data_offsets_preemptirq_template {}; struct trace_event_data_offsets_pstate_sample {}; struct trace_event_data_offsets_purge_vmap_area_lazy {}; struct trace_event_data_offsets_qdisc_create { u32 dev; const void *dev_ptr_; u32 kind; const void *kind_ptr_; }; struct trace_event_data_offsets_qdisc_dequeue {}; struct trace_event_data_offsets_qdisc_destroy { u32 dev; const void *dev_ptr_; u32 kind; const void *kind_ptr_; }; struct trace_event_data_offsets_qdisc_enqueue {}; struct trace_event_data_offsets_qdisc_reset { u32 dev; const void *dev_ptr_; u32 kind; const void *kind_ptr_; }; struct trace_event_data_offsets_rcu_barrier {}; struct trace_event_data_offsets_rcu_batch_end {}; struct trace_event_data_offsets_rcu_batch_start {}; struct trace_event_data_offsets_rcu_callback {}; struct trace_event_data_offsets_rcu_exp_funnel_lock {}; struct trace_event_data_offsets_rcu_exp_grace_period {}; struct trace_event_data_offsets_rcu_fqs {}; struct trace_event_data_offsets_rcu_future_grace_period {}; struct trace_event_data_offsets_rcu_grace_period {}; struct trace_event_data_offsets_rcu_grace_period_init {}; struct trace_event_data_offsets_rcu_invoke_callback {}; struct trace_event_data_offsets_rcu_invoke_kfree_bulk_callback {}; struct trace_event_data_offsets_rcu_invoke_kvfree_callback {}; struct trace_event_data_offsets_rcu_kvfree_callback {}; struct trace_event_data_offsets_rcu_preempt_task {}; struct trace_event_data_offsets_rcu_quiescent_state_report {}; struct trace_event_data_offsets_rcu_segcb_stats {}; struct trace_event_data_offsets_rcu_sr_normal {}; struct trace_event_data_offsets_rcu_stall_warning {}; struct trace_event_data_offsets_rcu_torture_read {}; struct trace_event_data_offsets_rcu_unlock_preempted_task {}; struct trace_event_data_offsets_rcu_utilization {}; struct trace_event_data_offsets_rcu_watching {}; struct trace_event_data_offsets_reclaim_retry_zone {}; struct trace_event_data_offsets_rpm_internal { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_rpm_return_int { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_rpm_status { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_rseq_ip_fixup {}; struct trace_event_data_offsets_rseq_update {}; struct trace_event_data_offsets_rss_stat {}; struct trace_event_data_offsets_sched_kthread_stop {}; struct trace_event_data_offsets_sched_kthread_stop_ret {}; struct trace_event_data_offsets_sched_kthread_work_execute_end {}; struct trace_event_data_offsets_sched_kthread_work_execute_start {}; struct trace_event_data_offsets_sched_kthread_work_queue_work {}; struct trace_event_data_offsets_sched_migrate_task {}; struct trace_event_data_offsets_sched_move_numa {}; struct trace_event_data_offsets_sched_numa_pair_template {}; struct trace_event_data_offsets_sched_pi_setprio {}; struct trace_event_data_offsets_sched_prepare_exec { u32 interp; const void *interp_ptr_; u32 filename; const void *filename_ptr_; u32 comm; const void *comm_ptr_; }; struct trace_event_data_offsets_sched_process_exec { u32 filename; const void *filename_ptr_; }; struct trace_event_data_offsets_sched_process_fork {}; struct trace_event_data_offsets_sched_process_hang {}; struct trace_event_data_offsets_sched_process_template {}; struct trace_event_data_offsets_sched_process_wait {}; struct trace_event_data_offsets_sched_skip_vma_numa {}; struct trace_event_data_offsets_sched_stat_runtime {}; struct trace_event_data_offsets_sched_stat_template {}; struct trace_event_data_offsets_sched_switch {}; struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; struct trace_event_data_offsets_sched_wakeup_template {}; struct trace_event_data_offsets_selinux_audited { u32 scontext; const void *scontext_ptr_; u32 tcontext; const void *tcontext_ptr_; u32 tclass; const void *tclass_ptr_; }; struct trace_event_data_offsets_signal_deliver {}; struct trace_event_data_offsets_signal_generate {}; struct trace_event_data_offsets_sk_data_ready {}; struct trace_event_data_offsets_skb_copy_datagram_iovec {}; struct trace_event_data_offsets_skip_task_reaping {}; struct trace_event_data_offsets_sock_exceed_buf_limit {}; struct trace_event_data_offsets_sock_msg_length {}; struct trace_event_data_offsets_sock_rcvqueue_full {}; struct trace_event_data_offsets_softirq {}; struct trace_event_data_offsets_start_task_reaping {}; struct trace_event_data_offsets_subflow_check_data_avail {}; struct trace_event_data_offsets_suspend_resume {}; struct trace_event_data_offsets_swiotlb_bounced { u32 dev_name; const void *dev_name_ptr_; }; struct trace_event_data_offsets_sys_enter {}; struct trace_event_data_offsets_sys_exit {}; struct trace_event_data_offsets_task_newtask {}; struct trace_event_data_offsets_task_rename {}; struct trace_event_data_offsets_tasklet {}; struct trace_event_data_offsets_tcp_ao_event {}; struct trace_event_data_offsets_tcp_ao_event_sk {}; struct trace_event_data_offsets_tcp_ao_event_sne {}; struct trace_event_data_offsets_tcp_cong_state_set {}; struct trace_event_data_offsets_tcp_event_sk {}; struct trace_event_data_offsets_tcp_event_sk_skb {}; struct trace_event_data_offsets_tcp_event_skb {}; struct trace_event_data_offsets_tcp_hash_event {}; struct trace_event_data_offsets_tcp_probe {}; struct trace_event_data_offsets_tcp_retransmit_synack {}; struct trace_event_data_offsets_tcp_send_reset {}; struct trace_event_data_offsets_test_pages_isolated {}; struct trace_event_data_offsets_thermal_temperature { u32 thermal_zone; const void *thermal_zone_ptr_; }; struct trace_event_data_offsets_thermal_zone_trip { u32 thermal_zone; const void *thermal_zone_ptr_; }; struct trace_event_data_offsets_tick_stop {}; struct trace_event_data_offsets_timer_base_idle {}; struct trace_event_data_offsets_timer_class {}; struct trace_event_data_offsets_timer_expire_entry {}; struct trace_event_data_offsets_timer_start {}; struct trace_event_data_offsets_tlb_flush {}; struct trace_event_data_offsets_tls_device_decrypted {}; struct trace_event_data_offsets_tls_device_offload_set {}; struct trace_event_data_offsets_tls_device_rx_resync_nh_delay {}; struct trace_event_data_offsets_tls_device_rx_resync_nh_schedule {}; struct trace_event_data_offsets_tls_device_rx_resync_send {}; struct trace_event_data_offsets_tls_device_tx_resync_req {}; struct trace_event_data_offsets_tls_device_tx_resync_send {}; struct trace_event_data_offsets_tmigr_connect_child_parent {}; struct trace_event_data_offsets_tmigr_connect_cpu_parent {}; struct trace_event_data_offsets_tmigr_cpugroup {}; struct trace_event_data_offsets_tmigr_group_and_cpu {}; struct trace_event_data_offsets_tmigr_group_set {}; struct trace_event_data_offsets_tmigr_handle_remote {}; struct trace_event_data_offsets_tmigr_idle {}; struct trace_event_data_offsets_tmigr_update_events {}; struct trace_event_data_offsets_track_foreign_dirty {}; struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; struct trace_event_data_offsets_unmap {}; struct trace_event_data_offsets_vector_activate {}; struct trace_event_data_offsets_vector_alloc {}; struct trace_event_data_offsets_vector_alloc_managed {}; struct trace_event_data_offsets_vector_config {}; struct trace_event_data_offsets_vector_free_moved {}; struct trace_event_data_offsets_vector_mod {}; struct trace_event_data_offsets_vector_reserve {}; struct trace_event_data_offsets_vector_setup {}; struct trace_event_data_offsets_vector_teardown {}; struct trace_event_data_offsets_virtio_transport_alloc_pkt {}; struct trace_event_data_offsets_virtio_transport_recv_pkt {}; struct trace_event_data_offsets_vm_unmapped_area {}; struct trace_event_data_offsets_vma_mas_szero {}; struct trace_event_data_offsets_vma_store {}; struct trace_event_data_offsets_wake_reaper {}; struct trace_event_data_offsets_wakeup_source { u32 name; const void *name_ptr_; }; struct trace_event_data_offsets_wbc_class {}; struct trace_event_data_offsets_workqueue_activate_work {}; struct trace_event_data_offsets_workqueue_execute_end {}; struct trace_event_data_offsets_workqueue_execute_start {}; struct trace_event_data_offsets_workqueue_queue_work { u32 workqueue; const void *workqueue_ptr_; }; struct trace_event_data_offsets_writeback_bdi_register {}; struct trace_event_data_offsets_writeback_class {}; struct trace_event_data_offsets_writeback_dirty_inode_template {}; struct trace_event_data_offsets_writeback_folio_template {}; struct trace_event_data_offsets_writeback_inode_template {}; struct trace_event_data_offsets_writeback_pages_written {}; struct trace_event_data_offsets_writeback_queue_io {}; struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; struct trace_event_data_offsets_writeback_single_inode_template {}; struct trace_event_data_offsets_writeback_work_class {}; struct trace_event_data_offsets_writeback_write_inode_template {}; struct trace_event_data_offsets_x86_exceptions {}; struct trace_event_data_offsets_x86_fpu {}; struct trace_event_data_offsets_x86_irq_vector {}; struct trace_event_data_offsets_xdp_bulk_tx {}; struct trace_event_data_offsets_xdp_cpumap_enqueue {}; struct trace_event_data_offsets_xdp_cpumap_kthread {}; struct trace_event_data_offsets_xdp_devmap_xmit {}; struct trace_event_data_offsets_xdp_exception {}; struct trace_event_data_offsets_xdp_redirect_template {}; struct trace_event_fields { const char *type; union { struct { const char *name; const int size; const int align; const int is_signed; const int filter_type; const int len; }; int (*define_fields)(struct trace_event_call *); }; }; struct trace_subsystem_dir; struct trace_event_file { struct list_head list; struct trace_event_call *event_call; struct event_filter *filter; struct eventfs_inode *ei; struct trace_array *tr; struct trace_subsystem_dir *system; struct list_head triggers; long unsigned int flags; refcount_t ref; atomic_t sm_ref; atomic_t tm_ref; }; typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); struct trace_event_functions { trace_print_func trace; trace_print_func raw; trace_print_func hex; trace_print_func binary; }; struct trace_event_raw_ack_update_msk { struct trace_entry ent; u64 data_ack; u64 old_snd_una; u64 new_snd_una; u64 new_wnd_end; u64 msk_wnd_end; char __data[0]; }; struct trace_event_raw_aer_event { struct trace_entry ent; u32 __data_loc_dev_name; u32 status; u8 severity; u8 tlp_header_valid; u32 tlp_header[4]; char __data[0]; }; struct trace_event_raw_alarm_class { struct trace_entry ent; void *alarm; unsigned char alarm_type; s64 expires; s64 now; char __data[0]; }; struct trace_event_raw_alarmtimer_suspend { struct trace_entry ent; s64 expires; unsigned char alarm_type; char __data[0]; }; struct trace_event_raw_alloc_vmap_area { struct trace_entry ent; long unsigned int addr; long unsigned int size; long unsigned int align; long unsigned int vstart; long unsigned int vend; int failed; char __data[0]; }; struct trace_event_raw_amd_pstate_perf { struct trace_entry ent; long unsigned int min_perf; long unsigned int target_perf; long unsigned int capacity; long long unsigned int freq; long long unsigned int mperf; long long unsigned int aperf; long long unsigned int tsc; unsigned int cpu_id; bool changed; bool fast_switch; char __data[0]; }; struct trace_event_raw_arm_event { struct trace_entry ent; u64 mpidr; u64 midr; u32 running_state; u32 psci_state; u8 affinity; char __data[0]; }; struct trace_event_raw_balance_dirty_pages { struct trace_entry ent; char bdi[32]; long unsigned int limit; long unsigned int setpoint; long unsigned int dirty; long unsigned int bdi_setpoint; long unsigned int bdi_dirty; long unsigned int dirty_ratelimit; long unsigned int task_ratelimit; unsigned int dirtied; unsigned int dirtied_pause; long unsigned int paused; long int pause; long unsigned int period; long int think; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_bdi_dirty_ratelimit { struct trace_entry ent; char bdi[32]; long unsigned int write_bw; long unsigned int avg_write_bw; long unsigned int dirty_rate; long unsigned int dirty_ratelimit; long unsigned int task_ratelimit; long unsigned int balanced_dirty_ratelimit; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_block_bio { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; char rwbs[8]; char comm[16]; char __data[0]; }; struct trace_event_raw_block_bio_complete { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; int error; char rwbs[8]; char __data[0]; }; struct trace_event_raw_block_bio_remap { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; dev_t old_dev; sector_t old_sector; char rwbs[8]; char __data[0]; }; struct trace_event_raw_block_buffer { struct trace_entry ent; dev_t dev; sector_t sector; size_t size; char __data[0]; }; struct trace_event_raw_block_plug { struct trace_entry ent; char comm[16]; char __data[0]; }; struct trace_event_raw_block_rq { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; unsigned int bytes; short unsigned int ioprio; char rwbs[8]; char comm[16]; u32 __data_loc_cmd; char __data[0]; }; struct trace_event_raw_block_rq_completion { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; int error; short unsigned int ioprio; char rwbs[8]; u32 __data_loc_cmd; char __data[0]; }; struct trace_event_raw_block_rq_remap { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; dev_t old_dev; sector_t old_sector; unsigned int nr_bios; char rwbs[8]; char __data[0]; }; struct trace_event_raw_block_rq_requeue { struct trace_entry ent; dev_t dev; sector_t sector; unsigned int nr_sector; short unsigned int ioprio; char rwbs[8]; u32 __data_loc_cmd; char __data[0]; }; struct trace_event_raw_block_split { struct trace_entry ent; dev_t dev; sector_t sector; sector_t new_sector; char rwbs[8]; char comm[16]; char __data[0]; }; struct trace_event_raw_block_unplug { struct trace_entry ent; int nr_rq; char comm[16]; char __data[0]; }; struct trace_event_raw_bpf_test_finish { struct trace_entry ent; int err; char __data[0]; }; struct trace_event_raw_bpf_trace_printk { struct trace_entry ent; u32 __data_loc_bpf_string; char __data[0]; }; struct trace_event_raw_bpf_trigger_tp { struct trace_entry ent; int nonce; char __data[0]; }; struct trace_event_raw_bpf_xdp_link_attach_failed { struct trace_entry ent; u32 __data_loc_msg; char __data[0]; }; struct trace_event_raw_cdev_update { struct trace_entry ent; u32 __data_loc_type; long unsigned int target; char __data[0]; }; struct trace_event_raw_cgroup { struct trace_entry ent; int root; int level; u64 id; u32 __data_loc_path; char __data[0]; }; struct trace_event_raw_cgroup_event { struct trace_entry ent; int root; int level; u64 id; u32 __data_loc_path; int val; char __data[0]; }; struct trace_event_raw_cgroup_migrate { struct trace_entry ent; int dst_root; int dst_level; u64 dst_id; int pid; u32 __data_loc_dst_path; u32 __data_loc_comm; char __data[0]; }; struct trace_event_raw_cgroup_root { struct trace_entry ent; int root; u16 ss_mask; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_cgroup_rstat { struct trace_entry ent; int root; int level; u64 id; int cpu; bool contended; char __data[0]; }; struct trace_event_raw_clock { struct trace_entry ent; u32 __data_loc_name; u64 state; u64 cpu_id; char __data[0]; }; struct trace_event_raw_cma_alloc_busy_retry { struct trace_entry ent; u32 __data_loc_name; long unsigned int pfn; const struct page *page; long unsigned int count; unsigned int align; char __data[0]; }; struct trace_event_raw_cma_alloc_finish { struct trace_entry ent; u32 __data_loc_name; long unsigned int pfn; const struct page *page; long unsigned int count; unsigned int align; int errorno; char __data[0]; }; struct trace_event_raw_cma_alloc_start { struct trace_entry ent; u32 __data_loc_name; long unsigned int count; unsigned int align; char __data[0]; }; struct trace_event_raw_cma_release { struct trace_entry ent; u32 __data_loc_name; long unsigned int pfn; const struct page *page; long unsigned int count; char __data[0]; }; struct trace_event_raw_compact_retry { struct trace_entry ent; int order; int priority; int result; int retries; int max_retries; bool ret; char __data[0]; }; struct trace_event_raw_console { struct trace_entry ent; u32 __data_loc_msg; char __data[0]; }; struct trace_event_raw_consume_skb { struct trace_entry ent; void *skbaddr; void *location; char __data[0]; }; struct trace_event_raw_contention_begin { struct trace_entry ent; void *lock_addr; unsigned int flags; char __data[0]; }; struct trace_event_raw_contention_end { struct trace_entry ent; void *lock_addr; int ret; char __data[0]; }; struct trace_event_raw_cpu { struct trace_entry ent; u32 state; u32 cpu_id; char __data[0]; }; struct trace_event_raw_cpu_frequency_limits { struct trace_entry ent; u32 min_freq; u32 max_freq; u32 cpu_id; char __data[0]; }; struct trace_event_raw_cpu_idle_miss { struct trace_entry ent; u32 cpu_id; u32 state; bool below; char __data[0]; }; struct trace_event_raw_cpu_latency_qos_request { struct trace_entry ent; s32 value; char __data[0]; }; struct trace_event_raw_cpuhp_enter { struct trace_entry ent; unsigned int cpu; int target; int idx; void *fun; char __data[0]; }; struct trace_event_raw_cpuhp_exit { struct trace_entry ent; unsigned int cpu; int state; int idx; int ret; char __data[0]; }; struct trace_event_raw_cpuhp_multi_enter { struct trace_entry ent; unsigned int cpu; int target; int idx; void *fun; char __data[0]; }; struct trace_event_raw_csd_function { struct trace_entry ent; void *func; void *csd; char __data[0]; }; struct trace_event_raw_csd_queue_cpu { struct trace_entry ent; unsigned int cpu; void *callsite; void *func; void *csd; char __data[0]; }; struct trace_event_raw_dev_pm_qos_request { struct trace_entry ent; u32 __data_loc_name; enum dev_pm_qos_req_type type; s32 new_value; char __data[0]; }; struct trace_event_raw_device_pm_callback_end { struct trace_entry ent; u32 __data_loc_device; u32 __data_loc_driver; int error; char __data[0]; }; struct trace_event_raw_device_pm_callback_start { struct trace_entry ent; u32 __data_loc_device; u32 __data_loc_driver; u32 __data_loc_parent; u32 __data_loc_pm_ops; int event; char __data[0]; }; struct trace_event_raw_devlink_health_recover_aborted { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; u32 __data_loc_reporter_name; bool health_state; u64 time_since_last_recover; char __data[0]; }; struct trace_event_raw_devlink_health_report { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; u32 __data_loc_reporter_name; u32 __data_loc_msg; char __data[0]; }; struct trace_event_raw_devlink_health_reporter_state_update { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; u32 __data_loc_reporter_name; u8 new_state; char __data[0]; }; struct trace_event_raw_devlink_hwerr { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; int err; u32 __data_loc_msg; char __data[0]; }; struct trace_event_raw_devlink_hwmsg { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; bool incoming; long unsigned int type; u32 __data_loc_buf; size_t len; char __data[0]; }; struct trace_event_raw_devlink_trap_report { struct trace_entry ent; u32 __data_loc_bus_name; u32 __data_loc_dev_name; u32 __data_loc_driver_name; u32 __data_loc_trap_name; u32 __data_loc_trap_group_name; char input_dev_name[16]; char __data[0]; }; struct trace_event_raw_devres { struct trace_entry ent; u32 __data_loc_devname; struct device *dev; const char *op; void *node; const char *name; size_t size; char __data[0]; }; struct trace_event_raw_dma_alloc { struct trace_entry ent; u32 __data_loc_device; u64 phys_addr; u64 dma_addr; size_t size; gfp_t flags; long unsigned int attrs; char __data[0]; }; struct trace_event_raw_dma_fence { struct trace_entry ent; u32 __data_loc_driver; u32 __data_loc_timeline; unsigned int context; unsigned int seqno; char __data[0]; }; struct trace_event_raw_dma_free { struct trace_entry ent; u32 __data_loc_device; u64 phys_addr; u64 dma_addr; size_t size; long unsigned int attrs; char __data[0]; }; struct trace_event_raw_dma_map { struct trace_entry ent; u32 __data_loc_device; u64 phys_addr; u64 dma_addr; size_t size; enum dma_data_direction dir; long unsigned int attrs; char __data[0]; }; struct trace_event_raw_dma_map_sg { struct trace_entry ent; u32 __data_loc_device; u32 __data_loc_phys_addrs; u32 __data_loc_dma_addrs; u32 __data_loc_lengths; enum dma_data_direction dir; long unsigned int attrs; char __data[0]; }; struct trace_event_raw_dma_sync_sg { struct trace_entry ent; u32 __data_loc_device; u32 __data_loc_dma_addrs; u32 __data_loc_lengths; enum dma_data_direction dir; char __data[0]; }; struct trace_event_raw_dma_sync_single { struct trace_entry ent; u32 __data_loc_device; u64 dma_addr; size_t size; enum dma_data_direction dir; char __data[0]; }; struct trace_event_raw_dma_unmap { struct trace_entry ent; u32 __data_loc_device; u64 addr; size_t size; enum dma_data_direction dir; long unsigned int attrs; char __data[0]; }; struct trace_event_raw_dma_unmap_sg { struct trace_entry ent; u32 __data_loc_device; u32 __data_loc_addrs; enum dma_data_direction dir; long unsigned int attrs; char __data[0]; }; struct trace_event_raw_dql_stall_detected { struct trace_entry ent; short unsigned int thrs; unsigned int len; long unsigned int last_reap; long unsigned int hist_head; long unsigned int now; long unsigned int hist[4]; char __data[0]; }; struct trace_event_raw_emulate_vsyscall { struct trace_entry ent; int nr; char __data[0]; }; struct trace_event_raw_error_report_template { struct trace_entry ent; enum error_detector error_detector; long unsigned int id; char __data[0]; }; struct trace_event_raw_exit_mmap { struct trace_entry ent; struct mm_struct *mm; struct maple_tree *mt; char __data[0]; }; struct trace_event_raw_ext4__bitmap_load { struct trace_entry ent; dev_t dev; __u32 group; char __data[0]; }; struct trace_event_raw_ext4__es_extent { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; ext4_lblk_t len; ext4_fsblk_t pblk; char status; char __data[0]; }; struct trace_event_raw_ext4__es_shrink_enter { struct trace_entry ent; dev_t dev; int nr_to_scan; int cache_cnt; char __data[0]; }; struct trace_event_raw_ext4__fallocate_mode { struct trace_entry ent; dev_t dev; ino_t ino; loff_t offset; loff_t len; int mode; char __data[0]; }; struct trace_event_raw_ext4__folio_op { struct trace_entry ent; dev_t dev; ino_t ino; long unsigned int index; char __data[0]; }; struct trace_event_raw_ext4__map_blocks_enter { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; unsigned int len; unsigned int flags; char __data[0]; }; struct trace_event_raw_ext4__map_blocks_exit { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int flags; ext4_fsblk_t pblk; ext4_lblk_t lblk; unsigned int len; unsigned int mflags; int ret; char __data[0]; }; struct trace_event_raw_ext4__mb_new_pa { struct trace_entry ent; dev_t dev; ino_t ino; __u64 pa_pstart; __u64 pa_lstart; __u32 pa_len; char __data[0]; }; struct trace_event_raw_ext4__mballoc { struct trace_entry ent; dev_t dev; ino_t ino; int result_start; __u32 result_group; int result_len; char __data[0]; }; struct trace_event_raw_ext4__trim { struct trace_entry ent; int dev_major; int dev_minor; __u32 group; int start; int len; char __data[0]; }; struct trace_event_raw_ext4__truncate { struct trace_entry ent; dev_t dev; ino_t ino; __u64 blocks; char __data[0]; }; struct trace_event_raw_ext4__write_begin { struct trace_entry ent; dev_t dev; ino_t ino; loff_t pos; unsigned int len; char __data[0]; }; struct trace_event_raw_ext4__write_end { struct trace_entry ent; dev_t dev; ino_t ino; loff_t pos; unsigned int len; unsigned int copied; char __data[0]; }; struct trace_event_raw_ext4_alloc_da_blocks { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int data_blocks; char __data[0]; }; struct trace_event_raw_ext4_allocate_blocks { struct trace_entry ent; dev_t dev; ino_t ino; __u64 block; unsigned int len; __u32 logical; __u32 lleft; __u32 lright; __u64 goal; __u64 pleft; __u64 pright; unsigned int flags; char __data[0]; }; struct trace_event_raw_ext4_allocate_inode { struct trace_entry ent; dev_t dev; ino_t ino; ino_t dir; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_begin_ordered_truncate { struct trace_entry ent; dev_t dev; ino_t ino; loff_t new_size; char __data[0]; }; struct trace_event_raw_ext4_collapse_range { struct trace_entry ent; dev_t dev; ino_t ino; loff_t offset; loff_t len; char __data[0]; }; struct trace_event_raw_ext4_da_release_space { struct trace_entry ent; dev_t dev; ino_t ino; __u64 i_blocks; int freed_blocks; int reserved_data_blocks; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_da_reserve_space { struct trace_entry ent; dev_t dev; ino_t ino; __u64 i_blocks; int reserve_blocks; int reserved_data_blocks; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_da_update_reserve_space { struct trace_entry ent; dev_t dev; ino_t ino; __u64 i_blocks; int used_blocks; int reserved_data_blocks; int quota_claim; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_da_write_pages { struct trace_entry ent; dev_t dev; ino_t ino; long unsigned int first_page; long int nr_to_write; int sync_mode; char __data[0]; }; struct trace_event_raw_ext4_da_write_pages_extent { struct trace_entry ent; dev_t dev; ino_t ino; __u64 lblk; __u32 len; __u32 flags; char __data[0]; }; struct trace_event_raw_ext4_discard_blocks { struct trace_entry ent; dev_t dev; __u64 blk; __u64 count; char __data[0]; }; struct trace_event_raw_ext4_discard_preallocations { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int len; char __data[0]; }; struct trace_event_raw_ext4_drop_inode { struct trace_entry ent; dev_t dev; ino_t ino; int drop; char __data[0]; }; struct trace_event_raw_ext4_error { struct trace_entry ent; dev_t dev; const char *function; unsigned int line; char __data[0]; }; struct trace_event_raw_ext4_es_find_extent_range_enter { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; char __data[0]; }; struct trace_event_raw_ext4_es_find_extent_range_exit { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; ext4_lblk_t len; ext4_fsblk_t pblk; char status; char __data[0]; }; struct trace_event_raw_ext4_es_insert_delayed_extent { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; ext4_lblk_t len; ext4_fsblk_t pblk; char status; bool lclu_allocated; bool end_allocated; char __data[0]; }; struct trace_event_raw_ext4_es_lookup_extent_enter { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; char __data[0]; }; struct trace_event_raw_ext4_es_lookup_extent_exit { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t lblk; ext4_lblk_t len; ext4_fsblk_t pblk; char status; int found; char __data[0]; }; struct trace_event_raw_ext4_es_remove_extent { struct trace_entry ent; dev_t dev; ino_t ino; loff_t lblk; loff_t len; char __data[0]; }; struct trace_event_raw_ext4_es_shrink { struct trace_entry ent; dev_t dev; int nr_shrunk; long long unsigned int scan_time; int nr_skipped; int retried; char __data[0]; }; struct trace_event_raw_ext4_es_shrink_scan_exit { struct trace_entry ent; dev_t dev; int nr_shrunk; int cache_cnt; char __data[0]; }; struct trace_event_raw_ext4_evict_inode { struct trace_entry ent; dev_t dev; ino_t ino; int nlink; char __data[0]; }; struct trace_event_raw_ext4_ext_convert_to_initialized_enter { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t m_lblk; unsigned int m_len; ext4_lblk_t u_lblk; unsigned int u_len; ext4_fsblk_t u_pblk; char __data[0]; }; struct trace_event_raw_ext4_ext_convert_to_initialized_fastpath { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t m_lblk; unsigned int m_len; ext4_lblk_t u_lblk; unsigned int u_len; ext4_fsblk_t u_pblk; ext4_lblk_t i_lblk; unsigned int i_len; ext4_fsblk_t i_pblk; char __data[0]; }; struct trace_event_raw_ext4_ext_handle_unwritten_extents { struct trace_entry ent; dev_t dev; ino_t ino; int flags; ext4_lblk_t lblk; ext4_fsblk_t pblk; unsigned int len; unsigned int allocated; ext4_fsblk_t newblk; char __data[0]; }; struct trace_event_raw_ext4_ext_load_extent { struct trace_entry ent; dev_t dev; ino_t ino; ext4_fsblk_t pblk; ext4_lblk_t lblk; char __data[0]; }; struct trace_event_raw_ext4_ext_remove_space { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t start; ext4_lblk_t end; int depth; char __data[0]; }; struct trace_event_raw_ext4_ext_remove_space_done { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t start; ext4_lblk_t end; int depth; ext4_fsblk_t pc_pclu; ext4_lblk_t pc_lblk; int pc_state; short unsigned int eh_entries; char __data[0]; }; struct trace_event_raw_ext4_ext_rm_idx { struct trace_entry ent; dev_t dev; ino_t ino; ext4_fsblk_t pblk; char __data[0]; }; struct trace_event_raw_ext4_ext_rm_leaf { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t start; ext4_lblk_t ee_lblk; ext4_fsblk_t ee_pblk; short int ee_len; ext4_fsblk_t pc_pclu; ext4_lblk_t pc_lblk; int pc_state; char __data[0]; }; struct trace_event_raw_ext4_ext_show_extent { struct trace_entry ent; dev_t dev; ino_t ino; ext4_fsblk_t pblk; ext4_lblk_t lblk; short unsigned int len; char __data[0]; }; struct trace_event_raw_ext4_fallocate_exit { struct trace_entry ent; dev_t dev; ino_t ino; loff_t pos; unsigned int blocks; int ret; char __data[0]; }; struct trace_event_raw_ext4_fc_cleanup { struct trace_entry ent; dev_t dev; int j_fc_off; int full; tid_t tid; char __data[0]; }; struct trace_event_raw_ext4_fc_commit_start { struct trace_entry ent; dev_t dev; tid_t tid; char __data[0]; }; struct trace_event_raw_ext4_fc_commit_stop { struct trace_entry ent; dev_t dev; int nblks; int reason; int num_fc; int num_fc_ineligible; int nblks_agg; tid_t tid; char __data[0]; }; struct trace_event_raw_ext4_fc_replay { struct trace_entry ent; dev_t dev; int tag; int ino; int priv1; int priv2; char __data[0]; }; struct trace_event_raw_ext4_fc_replay_scan { struct trace_entry ent; dev_t dev; int error; int off; char __data[0]; }; struct trace_event_raw_ext4_fc_stats { struct trace_entry ent; dev_t dev; unsigned int fc_ineligible_rc[10]; long unsigned int fc_commits; long unsigned int fc_ineligible_commits; long unsigned int fc_numblks; char __data[0]; }; struct trace_event_raw_ext4_fc_track_dentry { struct trace_entry ent; dev_t dev; tid_t t_tid; ino_t i_ino; tid_t i_sync_tid; int error; char __data[0]; }; struct trace_event_raw_ext4_fc_track_inode { struct trace_entry ent; dev_t dev; tid_t t_tid; ino_t i_ino; tid_t i_sync_tid; int error; char __data[0]; }; struct trace_event_raw_ext4_fc_track_range { struct trace_entry ent; dev_t dev; tid_t t_tid; ino_t i_ino; tid_t i_sync_tid; long int start; long int end; int error; char __data[0]; }; struct trace_event_raw_ext4_forget { struct trace_entry ent; dev_t dev; ino_t ino; __u64 block; int is_metadata; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_free_blocks { struct trace_entry ent; dev_t dev; ino_t ino; __u64 block; long unsigned int count; int flags; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_free_inode { struct trace_entry ent; dev_t dev; ino_t ino; uid_t uid; gid_t gid; __u64 blocks; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_fsmap_class { struct trace_entry ent; dev_t dev; dev_t keydev; u32 agno; u64 bno; u64 len; u64 owner; char __data[0]; }; struct trace_event_raw_ext4_get_implied_cluster_alloc_exit { struct trace_entry ent; dev_t dev; unsigned int flags; ext4_lblk_t lblk; ext4_fsblk_t pblk; unsigned int len; int ret; char __data[0]; }; struct trace_event_raw_ext4_getfsmap_class { struct trace_entry ent; dev_t dev; dev_t keydev; u64 block; u64 len; u64 owner; u64 flags; char __data[0]; }; struct trace_event_raw_ext4_insert_range { struct trace_entry ent; dev_t dev; ino_t ino; loff_t offset; loff_t len; char __data[0]; }; struct trace_event_raw_ext4_invalidate_folio_op { struct trace_entry ent; dev_t dev; ino_t ino; long unsigned int index; size_t offset; size_t length; char __data[0]; }; struct trace_event_raw_ext4_journal_start_inode { struct trace_entry ent; long unsigned int ino; dev_t dev; long unsigned int ip; int blocks; int rsv_blocks; int revoke_creds; int type; char __data[0]; }; struct trace_event_raw_ext4_journal_start_reserved { struct trace_entry ent; dev_t dev; long unsigned int ip; int blocks; char __data[0]; }; struct trace_event_raw_ext4_journal_start_sb { struct trace_entry ent; dev_t dev; long unsigned int ip; int blocks; int rsv_blocks; int revoke_creds; int type; char __data[0]; }; struct trace_event_raw_ext4_lazy_itable_init { struct trace_entry ent; dev_t dev; __u32 group; char __data[0]; }; struct trace_event_raw_ext4_load_inode { struct trace_entry ent; dev_t dev; ino_t ino; char __data[0]; }; struct trace_event_raw_ext4_mark_inode_dirty { struct trace_entry ent; dev_t dev; ino_t ino; long unsigned int ip; char __data[0]; }; struct trace_event_raw_ext4_mb_discard_preallocations { struct trace_entry ent; dev_t dev; int needed; char __data[0]; }; struct trace_event_raw_ext4_mb_release_group_pa { struct trace_entry ent; dev_t dev; __u64 pa_pstart; __u32 pa_len; char __data[0]; }; struct trace_event_raw_ext4_mb_release_inode_pa { struct trace_entry ent; dev_t dev; ino_t ino; __u64 block; __u32 count; char __data[0]; }; struct trace_event_raw_ext4_mballoc_alloc { struct trace_entry ent; dev_t dev; ino_t ino; __u32 orig_logical; int orig_start; __u32 orig_group; int orig_len; __u32 goal_logical; int goal_start; __u32 goal_group; int goal_len; __u32 result_logical; int result_start; __u32 result_group; int result_len; __u16 found; __u16 groups; __u16 buddy; __u16 flags; __u16 tail; __u8 cr; char __data[0]; }; struct trace_event_raw_ext4_mballoc_prealloc { struct trace_entry ent; dev_t dev; ino_t ino; __u32 orig_logical; int orig_start; __u32 orig_group; int orig_len; __u32 result_logical; int result_start; __u32 result_group; int result_len; char __data[0]; }; struct trace_event_raw_ext4_nfs_commit_metadata { struct trace_entry ent; dev_t dev; ino_t ino; char __data[0]; }; struct trace_event_raw_ext4_other_inode_update_time { struct trace_entry ent; dev_t dev; ino_t ino; ino_t orig_ino; uid_t uid; gid_t gid; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_prefetch_bitmaps { struct trace_entry ent; dev_t dev; __u32 group; __u32 next; __u32 ios; char __data[0]; }; struct trace_event_raw_ext4_read_block_bitmap_load { struct trace_entry ent; dev_t dev; __u32 group; bool prefetch; char __data[0]; }; struct trace_event_raw_ext4_remove_blocks { struct trace_entry ent; dev_t dev; ino_t ino; ext4_lblk_t from; ext4_lblk_t to; ext4_fsblk_t ee_pblk; ext4_lblk_t ee_lblk; short unsigned int ee_len; ext4_fsblk_t pc_pclu; ext4_lblk_t pc_lblk; int pc_state; char __data[0]; }; struct trace_event_raw_ext4_request_blocks { struct trace_entry ent; dev_t dev; ino_t ino; unsigned int len; __u32 logical; __u32 lleft; __u32 lright; __u64 goal; __u64 pleft; __u64 pright; unsigned int flags; char __data[0]; }; struct trace_event_raw_ext4_request_inode { struct trace_entry ent; dev_t dev; ino_t dir; __u16 mode; char __data[0]; }; struct trace_event_raw_ext4_shutdown { struct trace_entry ent; dev_t dev; unsigned int flags; char __data[0]; }; struct trace_event_raw_ext4_sync_file_enter { struct trace_entry ent; dev_t dev; ino_t ino; ino_t parent; int datasync; char __data[0]; }; struct trace_event_raw_ext4_sync_file_exit { struct trace_entry ent; dev_t dev; ino_t ino; int ret; char __data[0]; }; struct trace_event_raw_ext4_sync_fs { struct trace_entry ent; dev_t dev; int wait; char __data[0]; }; struct trace_event_raw_ext4_unlink_enter { struct trace_entry ent; dev_t dev; ino_t ino; ino_t parent; loff_t size; char __data[0]; }; struct trace_event_raw_ext4_unlink_exit { struct trace_entry ent; dev_t dev; ino_t ino; int ret; char __data[0]; }; struct trace_event_raw_ext4_update_sb { struct trace_entry ent; dev_t dev; ext4_fsblk_t fsblk; unsigned int flags; char __data[0]; }; struct trace_event_raw_ext4_writepages { struct trace_entry ent; dev_t dev; ino_t ino; long int nr_to_write; long int pages_skipped; loff_t range_start; loff_t range_end; long unsigned int writeback_index; int sync_mode; char for_kupdate; char range_cyclic; char __data[0]; }; struct trace_event_raw_ext4_writepages_result { struct trace_entry ent; dev_t dev; ino_t ino; int ret; int pages_written; long int pages_skipped; long unsigned int writeback_index; int sync_mode; char __data[0]; }; struct trace_event_raw_fib6_table_lookup { struct trace_entry ent; u32 tb_id; int err; int oif; int iif; __u8 tos; __u8 scope; __u8 flags; __u8 src[16]; __u8 dst[16]; u16 sport; u16 dport; u8 proto; u8 rt_type; char name[16]; __u8 gw[16]; char __data[0]; }; struct trace_event_raw_fib_table_lookup { struct trace_entry ent; u32 tb_id; int err; int oif; int iif; u8 proto; __u8 tos; __u8 scope; __u8 flags; __u8 src[4]; __u8 dst[4]; __u8 gw4[4]; __u8 gw6[16]; u16 sport; u16 dport; char name[16]; char __data[0]; }; struct trace_event_raw_file_check_and_advance_wb_err { struct trace_entry ent; struct file *file; long unsigned int i_ino; dev_t s_dev; errseq_t old; errseq_t new; char __data[0]; }; struct trace_event_raw_filelock_lease { struct trace_entry ent; struct file_lease *fl; long unsigned int i_ino; dev_t s_dev; struct file_lock_core *blocker; fl_owner_t owner; unsigned int flags; unsigned char type; long unsigned int break_time; long unsigned int downgrade_time; char __data[0]; }; struct trace_event_raw_filelock_lock { struct trace_entry ent; struct file_lock *fl; long unsigned int i_ino; dev_t s_dev; struct file_lock_core *blocker; fl_owner_t owner; unsigned int pid; unsigned int flags; unsigned char type; loff_t fl_start; loff_t fl_end; int ret; char __data[0]; }; struct trace_event_raw_filemap_set_wb_err { struct trace_entry ent; long unsigned int i_ino; dev_t s_dev; errseq_t errseq; char __data[0]; }; struct trace_event_raw_finish_task_reaping { struct trace_entry ent; int pid; char __data[0]; }; struct trace_event_raw_flush_foreign { struct trace_entry ent; char name[32]; ino_t cgroup_ino; unsigned int frn_bdi_id; unsigned int frn_memcg_id; char __data[0]; }; struct trace_event_raw_free_vmap_area_noflush { struct trace_entry ent; long unsigned int va_start; long unsigned int nr_lazy; long unsigned int nr_lazy_max; char __data[0]; }; struct trace_event_raw_generic_add_lease { struct trace_entry ent; long unsigned int i_ino; int wcount; int rcount; int icount; dev_t s_dev; fl_owner_t owner; unsigned int flags; unsigned char type; char __data[0]; }; struct trace_event_raw_global_dirty_state { struct trace_entry ent; long unsigned int nr_dirty; long unsigned int nr_writeback; long unsigned int background_thresh; long unsigned int dirty_thresh; long unsigned int dirty_limit; long unsigned int nr_dirtied; long unsigned int nr_written; char __data[0]; }; struct trace_event_raw_guest_halt_poll_ns { struct trace_entry ent; bool grow; unsigned int new; unsigned int old; char __data[0]; }; struct trace_event_raw_hrtimer_class { struct trace_entry ent; void *hrtimer; char __data[0]; }; struct trace_event_raw_hrtimer_expire_entry { struct trace_entry ent; void *hrtimer; s64 now; void *function; char __data[0]; }; struct trace_event_raw_hrtimer_init { struct trace_entry ent; void *hrtimer; clockid_t clockid; enum hrtimer_mode mode; char __data[0]; }; struct trace_event_raw_hrtimer_start { struct trace_entry ent; void *hrtimer; void *function; s64 expires; s64 softexpires; enum hrtimer_mode mode; char __data[0]; }; struct trace_event_raw_hugepage_set { struct trace_entry ent; long unsigned int addr; long unsigned int pte; char __data[0]; }; struct trace_event_raw_hugepage_update { struct trace_entry ent; long unsigned int addr; long unsigned int pte; long unsigned int clr; long unsigned int set; char __data[0]; }; struct trace_event_raw_hwmon_attr_class { struct trace_entry ent; int index; u32 __data_loc_attr_name; long int val; char __data[0]; }; struct trace_event_raw_hwmon_attr_show_string { struct trace_entry ent; int index; u32 __data_loc_attr_name; u32 __data_loc_label; char __data[0]; }; struct trace_event_raw_icmp_send { struct trace_entry ent; const void *skbaddr; int type; int code; __u8 saddr[4]; __u8 daddr[4]; __u16 sport; __u16 dport; short unsigned int ulen; char __data[0]; }; struct trace_event_raw_inet_sk_error_report { struct trace_entry ent; int error; __u16 sport; __u16 dport; __u16 family; __u16 protocol; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; char __data[0]; }; struct trace_event_raw_inet_sock_set_state { struct trace_entry ent; const void *skaddr; int oldstate; int newstate; __u16 sport; __u16 dport; __u16 family; __u16 protocol; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; char __data[0]; }; typedef int (*initcall_t)(void); struct trace_event_raw_initcall_finish { struct trace_entry ent; initcall_t func; int ret; char __data[0]; }; struct trace_event_raw_initcall_level { struct trace_entry ent; u32 __data_loc_level; char __data[0]; }; struct trace_event_raw_initcall_start { struct trace_entry ent; initcall_t func; char __data[0]; }; struct trace_event_raw_inode_foreign_history { struct trace_entry ent; char name[32]; ino_t ino; ino_t cgroup_ino; unsigned int history; char __data[0]; }; struct trace_event_raw_inode_switch_wbs { struct trace_entry ent; char name[32]; ino_t ino; ino_t old_cgroup_ino; ino_t new_cgroup_ino; char __data[0]; }; struct trace_event_raw_io_uring_complete { struct trace_entry ent; void *ctx; void *req; u64 user_data; int res; unsigned int cflags; u64 extra1; u64 extra2; char __data[0]; }; struct trace_event_raw_io_uring_cqe_overflow { struct trace_entry ent; void *ctx; long long unsigned int user_data; s32 res; u32 cflags; void *ocqe; char __data[0]; }; struct trace_event_raw_io_uring_cqring_wait { struct trace_entry ent; void *ctx; int min_events; char __data[0]; }; struct trace_event_raw_io_uring_create { struct trace_entry ent; int fd; void *ctx; u32 sq_entries; u32 cq_entries; u32 flags; char __data[0]; }; struct trace_event_raw_io_uring_defer { struct trace_entry ent; void *ctx; void *req; long long unsigned int data; u8 opcode; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_fail_link { struct trace_entry ent; void *ctx; void *req; long long unsigned int user_data; u8 opcode; void *link; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_file_get { struct trace_entry ent; void *ctx; void *req; u64 user_data; int fd; char __data[0]; }; struct trace_event_raw_io_uring_link { struct trace_entry ent; void *ctx; void *req; void *target_req; char __data[0]; }; struct trace_event_raw_io_uring_local_work_run { struct trace_entry ent; void *ctx; int count; unsigned int loops; char __data[0]; }; struct trace_event_raw_io_uring_poll_arm { struct trace_entry ent; void *ctx; void *req; long long unsigned int user_data; u8 opcode; int mask; int events; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_queue_async_work { struct trace_entry ent; void *ctx; void *req; u64 user_data; u8 opcode; long long unsigned int flags; struct io_wq_work *work; int rw; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_register { struct trace_entry ent; void *ctx; unsigned int opcode; unsigned int nr_files; unsigned int nr_bufs; long int ret; char __data[0]; }; struct trace_event_raw_io_uring_req_failed { struct trace_entry ent; void *ctx; void *req; long long unsigned int user_data; u8 opcode; u8 flags; u8 ioprio; u64 off; u64 addr; u32 len; u32 op_flags; u16 buf_index; u16 personality; u32 file_index; u64 pad1; u64 addr3; int error; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_short_write { struct trace_entry ent; void *ctx; u64 fpos; u64 wanted; u64 got; char __data[0]; }; struct trace_event_raw_io_uring_submit_req { struct trace_entry ent; void *ctx; void *req; long long unsigned int user_data; u8 opcode; long long unsigned int flags; bool sq_thread; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_task_add { struct trace_entry ent; void *ctx; void *req; long long unsigned int user_data; u8 opcode; int mask; u32 __data_loc_op_str; char __data[0]; }; struct trace_event_raw_io_uring_task_work_run { struct trace_entry ent; void *tctx; unsigned int count; char __data[0]; }; struct trace_event_raw_iomap_class { struct trace_entry ent; dev_t dev; u64 ino; u64 addr; loff_t offset; u64 length; u16 type; u16 flags; dev_t bdev; char __data[0]; }; struct trace_event_raw_iomap_dio_complete { struct trace_entry ent; dev_t dev; ino_t ino; loff_t isize; loff_t pos; int ki_flags; bool aio; int error; ssize_t ret; char __data[0]; }; struct trace_event_raw_iomap_dio_rw_begin { struct trace_entry ent; dev_t dev; ino_t ino; loff_t isize; loff_t pos; size_t count; size_t done_before; int ki_flags; unsigned int dio_flags; bool aio; char __data[0]; }; struct trace_event_raw_iomap_iter { struct trace_entry ent; dev_t dev; u64 ino; loff_t pos; u64 length; s64 processed; unsigned int flags; const void *ops; long unsigned int caller; char __data[0]; }; struct trace_event_raw_iomap_range_class { struct trace_entry ent; dev_t dev; u64 ino; loff_t size; loff_t offset; u64 length; char __data[0]; }; struct trace_event_raw_iomap_readpage_class { struct trace_entry ent; dev_t dev; u64 ino; int nr_pages; char __data[0]; }; struct trace_event_raw_iomap_writepage_map { struct trace_entry ent; dev_t dev; u64 ino; u64 pos; u64 dirty_len; u64 addr; loff_t offset; u64 length; u16 type; u16 flags; dev_t bdev; char __data[0]; }; struct trace_event_raw_iommu_device_event { struct trace_entry ent; u32 __data_loc_device; char __data[0]; }; struct trace_event_raw_iommu_error { struct trace_entry ent; u32 __data_loc_device; u32 __data_loc_driver; u64 iova; int flags; char __data[0]; }; struct trace_event_raw_iommu_group_event { struct trace_entry ent; int gid; u32 __data_loc_device; char __data[0]; }; struct trace_event_raw_ipi_handler { struct trace_entry ent; const char *reason; char __data[0]; }; struct trace_event_raw_ipi_raise { struct trace_entry ent; u32 __data_loc_target_cpus; const char *reason; char __data[0]; }; struct trace_event_raw_ipi_send_cpu { struct trace_entry ent; unsigned int cpu; void *callsite; void *callback; char __data[0]; }; struct trace_event_raw_ipi_send_cpumask { struct trace_entry ent; u32 __data_loc_cpumask; void *callsite; void *callback; char __data[0]; }; struct trace_event_raw_irq_handler_entry { struct trace_entry ent; int irq; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_irq_handler_exit { struct trace_entry ent; int irq; int ret; char __data[0]; }; struct trace_event_raw_irq_matrix_cpu { struct trace_entry ent; int bit; unsigned int cpu; bool online; unsigned int available; unsigned int allocated; unsigned int managed; unsigned int online_maps; unsigned int global_available; unsigned int global_reserved; unsigned int total_allocated; char __data[0]; }; struct trace_event_raw_irq_matrix_global { struct trace_entry ent; unsigned int online_maps; unsigned int global_available; unsigned int global_reserved; unsigned int total_allocated; char __data[0]; }; struct trace_event_raw_irq_matrix_global_update { struct trace_entry ent; int bit; unsigned int online_maps; unsigned int global_available; unsigned int global_reserved; unsigned int total_allocated; char __data[0]; }; struct trace_event_raw_itimer_expire { struct trace_entry ent; int which; pid_t pid; long long unsigned int now; char __data[0]; }; struct trace_event_raw_itimer_state { struct trace_entry ent; int which; long long unsigned int expires; long int value_sec; long int value_nsec; long int interval_sec; long int interval_nsec; char __data[0]; }; struct trace_event_raw_jbd2_checkpoint { struct trace_entry ent; dev_t dev; int result; char __data[0]; }; struct trace_event_raw_jbd2_checkpoint_stats { struct trace_entry ent; dev_t dev; tid_t tid; long unsigned int chp_time; __u32 forced_to_close; __u32 written; __u32 dropped; char __data[0]; }; struct trace_event_raw_jbd2_commit { struct trace_entry ent; dev_t dev; char sync_commit; tid_t transaction; char __data[0]; }; struct trace_event_raw_jbd2_end_commit { struct trace_entry ent; dev_t dev; char sync_commit; tid_t transaction; tid_t head; char __data[0]; }; struct trace_event_raw_jbd2_handle_extend { struct trace_entry ent; dev_t dev; tid_t tid; unsigned int type; unsigned int line_no; int buffer_credits; int requested_blocks; char __data[0]; }; struct trace_event_raw_jbd2_handle_start_class { struct trace_entry ent; dev_t dev; tid_t tid; unsigned int type; unsigned int line_no; int requested_blocks; char __data[0]; }; struct trace_event_raw_jbd2_handle_stats { struct trace_entry ent; dev_t dev; tid_t tid; unsigned int type; unsigned int line_no; int interval; int sync; int requested_blocks; int dirtied_blocks; char __data[0]; }; struct trace_event_raw_jbd2_journal_shrink { struct trace_entry ent; dev_t dev; long unsigned int nr_to_scan; long unsigned int count; char __data[0]; }; struct trace_event_raw_jbd2_lock_buffer_stall { struct trace_entry ent; dev_t dev; long unsigned int stall_ms; char __data[0]; }; struct trace_event_raw_jbd2_run_stats { struct trace_entry ent; dev_t dev; tid_t tid; long unsigned int wait; long unsigned int request_delay; long unsigned int running; long unsigned int locked; long unsigned int flushing; long unsigned int logging; __u32 handle_count; __u32 blocks; __u32 blocks_logged; char __data[0]; }; struct trace_event_raw_jbd2_shrink_checkpoint_list { struct trace_entry ent; dev_t dev; tid_t first_tid; tid_t tid; tid_t last_tid; long unsigned int nr_freed; tid_t next_tid; char __data[0]; }; struct trace_event_raw_jbd2_shrink_scan_exit { struct trace_entry ent; dev_t dev; long unsigned int nr_to_scan; long unsigned int nr_shrunk; long unsigned int count; char __data[0]; }; struct trace_event_raw_jbd2_submit_inode_data { struct trace_entry ent; dev_t dev; ino_t ino; char __data[0]; }; struct trace_event_raw_jbd2_update_log_tail { struct trace_entry ent; dev_t dev; tid_t tail_sequence; tid_t first_tid; long unsigned int block_nr; long unsigned int freed; char __data[0]; }; struct trace_event_raw_jbd2_write_superblock { struct trace_entry ent; dev_t dev; blk_opf_t write_flags; char __data[0]; }; struct trace_event_raw_kcompactd_wake_template { struct trace_entry ent; int nid; int order; enum zone_type highest_zoneidx; char __data[0]; }; struct trace_event_raw_kfree { struct trace_entry ent; long unsigned int call_site; const void *ptr; char __data[0]; }; struct trace_event_raw_kfree_skb { struct trace_entry ent; void *skbaddr; void *location; void *rx_sk; short unsigned int protocol; enum skb_drop_reason reason; char __data[0]; }; struct trace_event_raw_kmalloc { struct trace_entry ent; long unsigned int call_site; const void *ptr; size_t bytes_req; size_t bytes_alloc; long unsigned int gfp_flags; int node; char __data[0]; }; struct trace_event_raw_kmem_cache_alloc { struct trace_entry ent; long unsigned int call_site; const void *ptr; size_t bytes_req; size_t bytes_alloc; long unsigned int gfp_flags; int node; bool accounted; char __data[0]; }; struct trace_event_raw_kmem_cache_free { struct trace_entry ent; long unsigned int call_site; const void *ptr; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_ksm_advisor { struct trace_entry ent; s64 scan_time; long unsigned int pages_to_scan; unsigned int cpu_percent; char __data[0]; }; struct trace_event_raw_ksm_enter_exit_template { struct trace_entry ent; void *mm; char __data[0]; }; struct trace_event_raw_ksm_merge_one_page { struct trace_entry ent; long unsigned int pfn; void *rmap_item; void *mm; int err; char __data[0]; }; struct trace_event_raw_ksm_merge_with_ksm_page { struct trace_entry ent; void *ksm_page; long unsigned int pfn; void *rmap_item; void *mm; int err; char __data[0]; }; struct trace_event_raw_ksm_remove_ksm_page { struct trace_entry ent; long unsigned int pfn; char __data[0]; }; struct trace_event_raw_ksm_remove_rmap_item { struct trace_entry ent; long unsigned int pfn; void *rmap_item; void *mm; char __data[0]; }; struct trace_event_raw_ksm_scan_template { struct trace_entry ent; int seq; u32 rmap_entries; char __data[0]; }; struct trace_event_raw_kyber_adjust { struct trace_entry ent; dev_t dev; char domain[16]; unsigned int depth; char __data[0]; }; struct trace_event_raw_kyber_latency { struct trace_entry ent; dev_t dev; char domain[16]; char type[8]; u8 percentile; u8 numerator; u8 denominator; unsigned int samples; char __data[0]; }; struct trace_event_raw_kyber_throttled { struct trace_entry ent; dev_t dev; char domain[16]; char __data[0]; }; struct trace_event_raw_leases_conflict { struct trace_entry ent; void *lease; void *breaker; unsigned int l_fl_flags; unsigned int b_fl_flags; unsigned char l_fl_type; unsigned char b_fl_type; bool conflict; char __data[0]; }; struct trace_event_raw_lock { struct trace_entry ent; u32 __data_loc_name; void *lockdep_addr; char __data[0]; }; struct trace_event_raw_lock_acquire { struct trace_entry ent; unsigned int flags; u32 __data_loc_name; void *lockdep_addr; char __data[0]; }; struct trace_event_raw_locks_get_lock_context { struct trace_entry ent; long unsigned int i_ino; dev_t s_dev; unsigned char type; struct file_lock_context *ctx; char __data[0]; }; struct trace_event_raw_ma_op { struct trace_entry ent; const char *fn; long unsigned int min; long unsigned int max; long unsigned int index; long unsigned int last; void *node; char __data[0]; }; struct trace_event_raw_ma_read { struct trace_entry ent; const char *fn; long unsigned int min; long unsigned int max; long unsigned int index; long unsigned int last; void *node; char __data[0]; }; struct trace_event_raw_ma_write { struct trace_entry ent; const char *fn; long unsigned int min; long unsigned int max; long unsigned int index; long unsigned int last; long unsigned int piv; void *val; void *node; char __data[0]; }; struct trace_event_raw_map { struct trace_entry ent; u64 iova; u64 paddr; size_t size; char __data[0]; }; struct trace_event_raw_mark_victim { struct trace_entry ent; int pid; u32 __data_loc_comm; long unsigned int total_vm; long unsigned int anon_rss; long unsigned int file_rss; long unsigned int shmem_rss; uid_t uid; long unsigned int pgtables; short int oom_score_adj; char __data[0]; }; struct trace_event_raw_mc_event { struct trace_entry ent; unsigned int error_type; u32 __data_loc_msg; u32 __data_loc_label; u16 error_count; u8 mc_index; s8 top_layer; s8 middle_layer; s8 lower_layer; long int address; u8 grain_bits; long int syndrome; u32 __data_loc_driver_detail; char __data[0]; }; struct trace_event_raw_mce_record { struct trace_entry ent; u64 mcgcap; u64 mcgstatus; u64 status; u64 addr; u64 misc; u64 synd; u64 ipid; u64 ip; u64 tsc; u64 ppin; u64 walltime; u32 cpu; u32 cpuid; u32 apicid; u32 socketid; u8 cs; u8 bank; u8 cpuvendor; u32 microcode; char __data[0]; }; struct trace_event_raw_mei_pci_cfg_read { struct trace_entry ent; u32 __data_loc_dev; const char *reg; u32 offs; u32 val; char __data[0]; }; struct trace_event_raw_mei_reg_read { struct trace_entry ent; u32 __data_loc_dev; const char *reg; u32 offs; u32 val; char __data[0]; }; struct trace_event_raw_mei_reg_write { struct trace_entry ent; u32 __data_loc_dev; const char *reg; u32 offs; u32 val; char __data[0]; }; struct xdp_mem_allocator; struct trace_event_raw_mem_connect { struct trace_entry ent; const struct xdp_mem_allocator *xa; u32 mem_id; u32 mem_type; const void *allocator; const struct xdp_rxq_info *rxq; int ifindex; char __data[0]; }; struct trace_event_raw_mem_disconnect { struct trace_entry ent; const struct xdp_mem_allocator *xa; u32 mem_id; u32 mem_type; const void *allocator; char __data[0]; }; struct trace_event_raw_mem_return_failed { struct trace_entry ent; const struct page *page; u32 mem_id; u32 mem_type; char __data[0]; }; struct trace_event_raw_memory_failure_event { struct trace_entry ent; long unsigned int pfn; int type; int result; char __data[0]; }; struct trace_event_raw_migration_pmd { struct trace_entry ent; long unsigned int addr; long unsigned int pmd; char __data[0]; }; struct trace_event_raw_migration_pte { struct trace_entry ent; long unsigned int addr; long unsigned int pte; int order; char __data[0]; }; struct trace_event_raw_mm_alloc_contig_migrate_range_info { struct trace_entry ent; long unsigned int start; long unsigned int end; long unsigned int nr_migrated; long unsigned int nr_reclaimed; long unsigned int nr_mapped; int migratetype; char __data[0]; }; struct trace_event_raw_mm_collapse_huge_page { struct trace_entry ent; struct mm_struct *mm; int isolated; int status; char __data[0]; }; struct trace_event_raw_mm_collapse_huge_page_isolate { struct trace_entry ent; long unsigned int pfn; int none_or_zero; int referenced; bool writable; int status; char __data[0]; }; struct trace_event_raw_mm_collapse_huge_page_swapin { struct trace_entry ent; struct mm_struct *mm; int swapped_in; int referenced; int ret; char __data[0]; }; struct trace_event_raw_mm_compaction_begin { struct trace_entry ent; long unsigned int zone_start; long unsigned int migrate_pfn; long unsigned int free_pfn; long unsigned int zone_end; bool sync; char __data[0]; }; struct trace_event_raw_mm_compaction_defer_template { struct trace_entry ent; int nid; enum zone_type idx; int order; unsigned int considered; unsigned int defer_shift; int order_failed; char __data[0]; }; struct trace_event_raw_mm_compaction_end { struct trace_entry ent; long unsigned int zone_start; long unsigned int migrate_pfn; long unsigned int free_pfn; long unsigned int zone_end; bool sync; int status; char __data[0]; }; struct trace_event_raw_mm_compaction_isolate_template { struct trace_entry ent; long unsigned int start_pfn; long unsigned int end_pfn; long unsigned int nr_scanned; long unsigned int nr_taken; char __data[0]; }; struct trace_event_raw_mm_compaction_kcompactd_sleep { struct trace_entry ent; int nid; char __data[0]; }; struct trace_event_raw_mm_compaction_migratepages { struct trace_entry ent; long unsigned int nr_migrated; long unsigned int nr_failed; char __data[0]; }; struct trace_event_raw_mm_compaction_suitable_template { struct trace_entry ent; int nid; enum zone_type idx; int order; int ret; char __data[0]; }; struct trace_event_raw_mm_compaction_try_to_compact_pages { struct trace_entry ent; int order; long unsigned int gfp_mask; int prio; char __data[0]; }; struct trace_event_raw_mm_filemap_fault { struct trace_entry ent; long unsigned int i_ino; dev_t s_dev; long unsigned int index; char __data[0]; }; struct trace_event_raw_mm_filemap_op_page_cache { struct trace_entry ent; long unsigned int pfn; long unsigned int i_ino; long unsigned int index; dev_t s_dev; unsigned char order; char __data[0]; }; struct trace_event_raw_mm_filemap_op_page_cache_range { struct trace_entry ent; long unsigned int i_ino; dev_t s_dev; long unsigned int index; long unsigned int last_index; char __data[0]; }; struct trace_event_raw_mm_khugepaged_collapse_file { struct trace_entry ent; struct mm_struct *mm; long unsigned int hpfn; long unsigned int index; long unsigned int addr; bool is_shmem; u32 __data_loc_filename; int nr; int result; char __data[0]; }; struct trace_event_raw_mm_khugepaged_scan_file { struct trace_entry ent; struct mm_struct *mm; long unsigned int pfn; u32 __data_loc_filename; int present; int swap; int result; char __data[0]; }; struct trace_event_raw_mm_khugepaged_scan_pmd { struct trace_entry ent; struct mm_struct *mm; long unsigned int pfn; bool writable; int referenced; int none_or_zero; int status; int unmapped; char __data[0]; }; struct trace_event_raw_mm_lru_activate { struct trace_entry ent; struct folio *folio; long unsigned int pfn; char __data[0]; }; struct trace_event_raw_mm_lru_insertion { struct trace_entry ent; struct folio *folio; long unsigned int pfn; enum lru_list lru; long unsigned int flags; char __data[0]; }; struct trace_event_raw_mm_migrate_pages { struct trace_entry ent; long unsigned int succeeded; long unsigned int failed; long unsigned int thp_succeeded; long unsigned int thp_failed; long unsigned int thp_split; long unsigned int large_folio_split; enum migrate_mode mode; int reason; char __data[0]; }; struct trace_event_raw_mm_migrate_pages_start { struct trace_entry ent; enum migrate_mode mode; int reason; char __data[0]; }; struct trace_event_raw_mm_page { struct trace_entry ent; long unsigned int pfn; unsigned int order; int migratetype; int percpu_refill; char __data[0]; }; struct trace_event_raw_mm_page_alloc { struct trace_entry ent; long unsigned int pfn; unsigned int order; long unsigned int gfp_flags; int migratetype; char __data[0]; }; struct trace_event_raw_mm_page_alloc_extfrag { struct trace_entry ent; long unsigned int pfn; int alloc_order; int fallback_order; int alloc_migratetype; int fallback_migratetype; int change_ownership; char __data[0]; }; struct trace_event_raw_mm_page_free { struct trace_entry ent; long unsigned int pfn; unsigned int order; char __data[0]; }; struct trace_event_raw_mm_page_free_batched { struct trace_entry ent; long unsigned int pfn; char __data[0]; }; struct trace_event_raw_mm_page_pcpu_drain { struct trace_entry ent; long unsigned int pfn; unsigned int order; int migratetype; char __data[0]; }; struct trace_event_raw_mm_shrink_slab_end { struct trace_entry ent; struct shrinker *shr; int nid; void *shrink; long int unused_scan; long int new_scan; int retval; long int total_scan; char __data[0]; }; struct trace_event_raw_mm_shrink_slab_start { struct trace_entry ent; struct shrinker *shr; void *shrink; int nid; long int nr_objects_to_shrink; long unsigned int gfp_flags; long unsigned int cache_items; long long unsigned int delta; long unsigned int total_scan; int priority; char __data[0]; }; struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { struct trace_entry ent; int order; long unsigned int gfp_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { struct trace_entry ent; long unsigned int nr_reclaimed; char __data[0]; }; struct trace_event_raw_mm_vmscan_kswapd_sleep { struct trace_entry ent; int nid; char __data[0]; }; struct trace_event_raw_mm_vmscan_kswapd_wake { struct trace_entry ent; int nid; int zid; int order; char __data[0]; }; struct trace_event_raw_mm_vmscan_lru_isolate { struct trace_entry ent; int highest_zoneidx; int order; long unsigned int nr_requested; long unsigned int nr_scanned; long unsigned int nr_skipped; long unsigned int nr_taken; int lru; char __data[0]; }; struct trace_event_raw_mm_vmscan_lru_shrink_active { struct trace_entry ent; int nid; long unsigned int nr_taken; long unsigned int nr_active; long unsigned int nr_deactivated; long unsigned int nr_referenced; int priority; int reclaim_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_lru_shrink_inactive { struct trace_entry ent; int nid; long unsigned int nr_scanned; long unsigned int nr_reclaimed; long unsigned int nr_dirty; long unsigned int nr_writeback; long unsigned int nr_congested; long unsigned int nr_immediate; unsigned int nr_activate0; unsigned int nr_activate1; long unsigned int nr_ref_keep; long unsigned int nr_unmap_fail; int priority; int reclaim_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_node_reclaim_begin { struct trace_entry ent; int nid; int order; long unsigned int gfp_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_throttled { struct trace_entry ent; int nid; int usec_timeout; int usec_delayed; int reason; char __data[0]; }; struct trace_event_raw_mm_vmscan_wakeup_kswapd { struct trace_entry ent; int nid; int zid; int order; long unsigned int gfp_flags; char __data[0]; }; struct trace_event_raw_mm_vmscan_write_folio { struct trace_entry ent; long unsigned int pfn; int reclaim_flags; char __data[0]; }; struct trace_event_raw_mmap_lock { struct trace_entry ent; struct mm_struct *mm; u32 __data_loc_memcg_path; bool write; char __data[0]; }; struct trace_event_raw_mmap_lock_acquire_returned { struct trace_entry ent; struct mm_struct *mm; u32 __data_loc_memcg_path; bool write; bool success; char __data[0]; }; struct trace_event_raw_module_free { struct trace_entry ent; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_module_load { struct trace_entry ent; unsigned int taints; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_module_refcnt { struct trace_entry ent; long unsigned int ip; int refcnt; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_module_request { struct trace_entry ent; long unsigned int ip; bool wait; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_mptcp_dump_mpext { struct trace_entry ent; u64 data_ack; u64 data_seq; u32 subflow_seq; u16 data_len; u16 csum; u8 use_map; u8 dsn64; u8 data_fin; u8 use_ack; u8 ack64; u8 mpc_map; u8 frozen; u8 reset_transient; u8 reset_reason; u8 csum_reqd; u8 infinite_map; char __data[0]; }; struct trace_event_raw_mptcp_subflow_get_send { struct trace_entry ent; bool active; bool free; u32 snd_wnd; u32 pace; u8 backup; u64 ratio; char __data[0]; }; struct trace_event_raw_msr_trace_class { struct trace_entry ent; unsigned int msr; u64 val; int failed; char __data[0]; }; struct trace_event_raw_napi_poll { struct trace_entry ent; struct napi_struct *napi; u32 __data_loc_dev_name; int work; int budget; char __data[0]; }; struct trace_event_raw_neigh__update { struct trace_entry ent; u32 family; u32 __data_loc_dev; u8 lladdr[32]; u8 lladdr_len; u8 flags; u8 nud_state; u8 type; u8 dead; int refcnt; __u8 primary_key4[4]; __u8 primary_key6[16]; long unsigned int confirmed; long unsigned int updated; long unsigned int used; u32 err; char __data[0]; }; struct trace_event_raw_neigh_create { struct trace_entry ent; u32 family; u32 __data_loc_dev; int entries; u8 created; u8 gc_exempt; u8 primary_key4[4]; u8 primary_key6[16]; char __data[0]; }; struct trace_event_raw_neigh_update { struct trace_entry ent; u32 family; u32 __data_loc_dev; u8 lladdr[32]; u8 lladdr_len; u8 flags; u8 nud_state; u8 type; u8 dead; int refcnt; __u8 primary_key4[4]; __u8 primary_key6[16]; long unsigned int confirmed; long unsigned int updated; long unsigned int used; u8 new_lladdr[32]; u8 new_state; u32 update_flags; u32 pid; char __data[0]; }; struct trace_event_raw_net_dev_rx_exit_template { struct trace_entry ent; int ret; char __data[0]; }; struct trace_event_raw_net_dev_rx_verbose_template { struct trace_entry ent; u32 __data_loc_name; unsigned int napi_id; u16 queue_mapping; const void *skbaddr; bool vlan_tagged; u16 vlan_proto; u16 vlan_tci; u16 protocol; u8 ip_summed; u32 hash; bool l4_hash; unsigned int len; unsigned int data_len; unsigned int truesize; bool mac_header_valid; int mac_header; unsigned char nr_frags; u16 gso_size; u16 gso_type; char __data[0]; }; struct trace_event_raw_net_dev_start_xmit { struct trace_entry ent; u32 __data_loc_name; u16 queue_mapping; const void *skbaddr; bool vlan_tagged; u16 vlan_proto; u16 vlan_tci; u16 protocol; u8 ip_summed; unsigned int len; unsigned int data_len; int network_offset; bool transport_offset_valid; int transport_offset; u8 tx_flags; u16 gso_size; u16 gso_segs; u16 gso_type; char __data[0]; }; struct trace_event_raw_net_dev_template { struct trace_entry ent; void *skbaddr; unsigned int len; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_net_dev_xmit { struct trace_entry ent; void *skbaddr; unsigned int len; int rc; u32 __data_loc_name; char __data[0]; }; struct trace_event_raw_net_dev_xmit_timeout { struct trace_entry ent; u32 __data_loc_name; u32 __data_loc_driver; int queue_index; char __data[0]; }; struct trace_event_raw_netlink_extack { struct trace_entry ent; u32 __data_loc_msg; char __data[0]; }; struct trace_event_raw_nmi_handler { struct trace_entry ent; void *handler; s64 delta_ns; int handled; char __data[0]; }; struct trace_event_raw_non_standard_event { struct trace_entry ent; char sec_type[16]; char fru_id[16]; u32 __data_loc_fru_text; u8 sev; u32 len; u32 __data_loc_buf; char __data[0]; }; struct trace_event_raw_notifier_info { struct trace_entry ent; void *cb; char __data[0]; }; struct trace_event_raw_oom_score_adj_update { struct trace_entry ent; pid_t pid; char comm[16]; short int oom_score_adj; char __data[0]; }; struct trace_event_raw_page_pool_release { struct trace_entry ent; const struct page_pool *pool; s32 inflight; u32 hold; u32 release; u64 cnt; char __data[0]; }; struct trace_event_raw_page_pool_state_hold { struct trace_entry ent; const struct page_pool *pool; long unsigned int netmem; u32 hold; long unsigned int pfn; char __data[0]; }; struct trace_event_raw_page_pool_state_release { struct trace_entry ent; const struct page_pool *pool; long unsigned int netmem; u32 release; long unsigned int pfn; char __data[0]; }; struct trace_event_raw_page_pool_update_nid { struct trace_entry ent; const struct page_pool *pool; int pool_nid; int new_nid; char __data[0]; }; struct trace_event_raw_percpu_alloc_percpu { struct trace_entry ent; long unsigned int call_site; bool reserved; bool is_atomic; size_t size; size_t align; void *base_addr; int off; void *ptr; size_t bytes_alloc; long unsigned int gfp_flags; char __data[0]; }; struct trace_event_raw_percpu_alloc_percpu_fail { struct trace_entry ent; bool reserved; bool is_atomic; size_t size; size_t align; char __data[0]; }; struct trace_event_raw_percpu_create_chunk { struct trace_entry ent; void *base_addr; char __data[0]; }; struct trace_event_raw_percpu_destroy_chunk { struct trace_entry ent; void *base_addr; char __data[0]; }; struct trace_event_raw_percpu_free_percpu { struct trace_entry ent; void *base_addr; int off; void *ptr; char __data[0]; }; struct trace_event_raw_pm_qos_update { struct trace_entry ent; enum pm_qos_req_action action; int prev_value; int curr_value; char __data[0]; }; struct trace_event_raw_power_domain { struct trace_entry ent; u32 __data_loc_name; u64 state; u64 cpu_id; char __data[0]; }; struct trace_event_raw_powernv_throttle { struct trace_entry ent; int chip_id; u32 __data_loc_reason; int pmax; char __data[0]; }; struct trace_event_raw_preemptirq_template { struct trace_entry ent; s32 caller_offs; s32 parent_offs; char __data[0]; }; struct trace_event_raw_pstate_sample { struct trace_entry ent; u32 core_busy; u32 scaled_busy; u32 from; u32 to; u64 mperf; u64 aperf; u64 tsc; u32 freq; u32 io_boost; char __data[0]; }; struct trace_event_raw_purge_vmap_area_lazy { struct trace_entry ent; long unsigned int start; long unsigned int end; unsigned int npurged; char __data[0]; }; struct trace_event_raw_qdisc_create { struct trace_entry ent; u32 __data_loc_dev; u32 __data_loc_kind; u32 parent; char __data[0]; }; struct trace_event_raw_qdisc_dequeue { struct trace_entry ent; struct Qdisc *qdisc; const struct netdev_queue *txq; int packets; void *skbaddr; int ifindex; u32 handle; u32 parent; long unsigned int txq_state; char __data[0]; }; struct trace_event_raw_qdisc_destroy { struct trace_entry ent; u32 __data_loc_dev; u32 __data_loc_kind; u32 parent; u32 handle; char __data[0]; }; struct trace_event_raw_qdisc_enqueue { struct trace_entry ent; struct Qdisc *qdisc; const struct netdev_queue *txq; void *skbaddr; int ifindex; u32 handle; u32 parent; char __data[0]; }; struct trace_event_raw_qdisc_reset { struct trace_entry ent; u32 __data_loc_dev; u32 __data_loc_kind; u32 parent; u32 handle; char __data[0]; }; struct trace_event_raw_rcu_barrier { struct trace_entry ent; const char *rcuname; const char *s; int cpu; int cnt; long unsigned int done; char __data[0]; }; struct trace_event_raw_rcu_batch_end { struct trace_entry ent; const char *rcuname; int callbacks_invoked; char cb; char nr; char iit; char risk; char __data[0]; }; struct trace_event_raw_rcu_batch_start { struct trace_entry ent; const char *rcuname; long int qlen; long int blimit; char __data[0]; }; struct trace_event_raw_rcu_callback { struct trace_entry ent; const char *rcuname; void *rhp; void *func; long int qlen; char __data[0]; }; struct trace_event_raw_rcu_exp_funnel_lock { struct trace_entry ent; const char *rcuname; u8 level; int grplo; int grphi; const char *gpevent; char __data[0]; }; struct trace_event_raw_rcu_exp_grace_period { struct trace_entry ent; const char *rcuname; long int gpseq; const char *gpevent; char __data[0]; }; struct trace_event_raw_rcu_fqs { struct trace_entry ent; const char *rcuname; long int gp_seq; int cpu; const char *qsevent; char __data[0]; }; struct trace_event_raw_rcu_future_grace_period { struct trace_entry ent; const char *rcuname; long int gp_seq; long int gp_seq_req; u8 level; int grplo; int grphi; const char *gpevent; char __data[0]; }; struct trace_event_raw_rcu_grace_period { struct trace_entry ent; const char *rcuname; long int gp_seq; const char *gpevent; char __data[0]; }; struct trace_event_raw_rcu_grace_period_init { struct trace_entry ent; const char *rcuname; long int gp_seq; u8 level; int grplo; int grphi; long unsigned int qsmask; char __data[0]; }; struct trace_event_raw_rcu_invoke_callback { struct trace_entry ent; const char *rcuname; void *rhp; void *func; char __data[0]; }; struct trace_event_raw_rcu_invoke_kfree_bulk_callback { struct trace_entry ent; const char *rcuname; long unsigned int nr_records; void **p; char __data[0]; }; struct trace_event_raw_rcu_invoke_kvfree_callback { struct trace_entry ent; const char *rcuname; void *rhp; long unsigned int offset; char __data[0]; }; struct trace_event_raw_rcu_kvfree_callback { struct trace_entry ent; const char *rcuname; void *rhp; long unsigned int offset; long int qlen; char __data[0]; }; struct trace_event_raw_rcu_preempt_task { struct trace_entry ent; const char *rcuname; long int gp_seq; int pid; char __data[0]; }; struct trace_event_raw_rcu_quiescent_state_report { struct trace_entry ent; const char *rcuname; long int gp_seq; long unsigned int mask; long unsigned int qsmask; u8 level; int grplo; int grphi; u8 gp_tasks; char __data[0]; }; struct trace_event_raw_rcu_segcb_stats { struct trace_entry ent; const char *ctx; long unsigned int gp_seq[4]; long int seglen[4]; char __data[0]; }; struct trace_event_raw_rcu_sr_normal { struct trace_entry ent; const char *rcuname; void *rhp; const char *srevent; char __data[0]; }; struct trace_event_raw_rcu_stall_warning { struct trace_entry ent; const char *rcuname; const char *msg; char __data[0]; }; struct trace_event_raw_rcu_torture_read { struct trace_entry ent; char rcutorturename[8]; struct callback_head *rhp; long unsigned int secs; long unsigned int c_old; long unsigned int c; char __data[0]; }; struct trace_event_raw_rcu_unlock_preempted_task { struct trace_entry ent; const char *rcuname; long int gp_seq; int pid; char __data[0]; }; struct trace_event_raw_rcu_utilization { struct trace_entry ent; const char *s; char __data[0]; }; struct trace_event_raw_rcu_watching { struct trace_entry ent; const char *polarity; long int oldnesting; long int newnesting; int counter; char __data[0]; }; struct trace_event_raw_reclaim_retry_zone { struct trace_entry ent; int node; int zone_idx; int order; long unsigned int reclaimable; long unsigned int available; long unsigned int min_wmark; int no_progress_loops; bool wmark_check; char __data[0]; }; struct trace_event_raw_rpm_internal { struct trace_entry ent; u32 __data_loc_name; int flags; int usage_count; int disable_depth; int runtime_auto; int request_pending; int irq_safe; int child_count; char __data[0]; }; struct trace_event_raw_rpm_return_int { struct trace_entry ent; u32 __data_loc_name; long unsigned int ip; int ret; char __data[0]; }; struct trace_event_raw_rpm_status { struct trace_entry ent; u32 __data_loc_name; int status; char __data[0]; }; struct trace_event_raw_rseq_ip_fixup { struct trace_entry ent; long unsigned int regs_ip; long unsigned int start_ip; long unsigned int post_commit_offset; long unsigned int abort_ip; char __data[0]; }; struct trace_event_raw_rseq_update { struct trace_entry ent; s32 cpu_id; s32 node_id; s32 mm_cid; char __data[0]; }; struct trace_event_raw_rss_stat { struct trace_entry ent; unsigned int mm_id; unsigned int curr; int member; long int size; char __data[0]; }; struct trace_event_raw_sched_kthread_stop { struct trace_entry ent; char comm[16]; pid_t pid; char __data[0]; }; struct trace_event_raw_sched_kthread_stop_ret { struct trace_entry ent; int ret; char __data[0]; }; struct trace_event_raw_sched_kthread_work_execute_end { struct trace_entry ent; void *work; void *function; char __data[0]; }; struct trace_event_raw_sched_kthread_work_execute_start { struct trace_entry ent; void *work; void *function; char __data[0]; }; struct trace_event_raw_sched_kthread_work_queue_work { struct trace_entry ent; void *work; void *function; void *worker; char __data[0]; }; struct trace_event_raw_sched_migrate_task { struct trace_entry ent; char comm[16]; pid_t pid; int prio; int orig_cpu; int dest_cpu; char __data[0]; }; struct trace_event_raw_sched_move_numa { struct trace_entry ent; pid_t pid; pid_t tgid; pid_t ngid; int src_cpu; int src_nid; int dst_cpu; int dst_nid; char __data[0]; }; struct trace_event_raw_sched_numa_pair_template { struct trace_entry ent; pid_t src_pid; pid_t src_tgid; pid_t src_ngid; int src_cpu; int src_nid; pid_t dst_pid; pid_t dst_tgid; pid_t dst_ngid; int dst_cpu; int dst_nid; char __data[0]; }; struct trace_event_raw_sched_pi_setprio { struct trace_entry ent; char comm[16]; pid_t pid; int oldprio; int newprio; char __data[0]; }; struct trace_event_raw_sched_prepare_exec { struct trace_entry ent; u32 __data_loc_interp; u32 __data_loc_filename; pid_t pid; u32 __data_loc_comm; char __data[0]; }; struct trace_event_raw_sched_process_exec { struct trace_entry ent; u32 __data_loc_filename; pid_t pid; pid_t old_pid; char __data[0]; }; struct trace_event_raw_sched_process_fork { struct trace_entry ent; char parent_comm[16]; pid_t parent_pid; char child_comm[16]; pid_t child_pid; char __data[0]; }; struct trace_event_raw_sched_process_hang { struct trace_entry ent; char comm[16]; pid_t pid; char __data[0]; }; struct trace_event_raw_sched_process_template { struct trace_entry ent; char comm[16]; pid_t pid; int prio; char __data[0]; }; struct trace_event_raw_sched_process_wait { struct trace_entry ent; char comm[16]; pid_t pid; int prio; char __data[0]; }; struct trace_event_raw_sched_skip_vma_numa { struct trace_entry ent; long unsigned int numa_scan_offset; long unsigned int vm_start; long unsigned int vm_end; enum numa_vmaskip_reason reason; char __data[0]; }; struct trace_event_raw_sched_stat_runtime { struct trace_entry ent; char comm[16]; pid_t pid; u64 runtime; char __data[0]; }; struct trace_event_raw_sched_stat_template { struct trace_entry ent; char comm[16]; pid_t pid; u64 delay; char __data[0]; }; struct trace_event_raw_sched_switch { struct trace_entry ent; char prev_comm[16]; pid_t prev_pid; int prev_prio; long int prev_state; char next_comm[16]; pid_t next_pid; int next_prio; char __data[0]; }; struct trace_event_raw_sched_wake_idle_without_ipi { struct trace_entry ent; int cpu; char __data[0]; }; struct trace_event_raw_sched_wakeup_template { struct trace_entry ent; char comm[16]; pid_t pid; int prio; int target_cpu; char __data[0]; }; struct trace_event_raw_selinux_audited { struct trace_entry ent; u32 requested; u32 denied; u32 audited; int result; u32 __data_loc_scontext; u32 __data_loc_tcontext; u32 __data_loc_tclass; char __data[0]; }; struct trace_event_raw_signal_deliver { struct trace_entry ent; int sig; int errno; int code; long unsigned int sa_handler; long unsigned int sa_flags; char __data[0]; }; struct trace_event_raw_signal_generate { struct trace_entry ent; int sig; int errno; int code; char comm[16]; pid_t pid; int group; int result; char __data[0]; }; struct trace_event_raw_sk_data_ready { struct trace_entry ent; const void *skaddr; __u16 family; __u16 protocol; long unsigned int ip; char __data[0]; }; struct trace_event_raw_skb_copy_datagram_iovec { struct trace_entry ent; const void *skbaddr; int len; char __data[0]; }; struct trace_event_raw_skip_task_reaping { struct trace_entry ent; int pid; char __data[0]; }; struct trace_event_raw_sock_exceed_buf_limit { struct trace_entry ent; char name[32]; long int sysctl_mem[3]; long int allocated; int sysctl_rmem; int rmem_alloc; int sysctl_wmem; int wmem_alloc; int wmem_queued; int kind; char __data[0]; }; struct trace_event_raw_sock_msg_length { struct trace_entry ent; void *sk; __u16 family; __u16 protocol; int ret; int flags; char __data[0]; }; struct trace_event_raw_sock_rcvqueue_full { struct trace_entry ent; int rmem_alloc; unsigned int truesize; int sk_rcvbuf; char __data[0]; }; struct trace_event_raw_softirq { struct trace_entry ent; unsigned int vec; char __data[0]; }; struct trace_event_raw_start_task_reaping { struct trace_entry ent; int pid; char __data[0]; }; struct trace_event_raw_subflow_check_data_avail { struct trace_entry ent; u8 status; const void *skb; char __data[0]; }; struct trace_event_raw_suspend_resume { struct trace_entry ent; const char *action; int val; bool start; char __data[0]; }; struct trace_event_raw_swiotlb_bounced { struct trace_entry ent; u32 __data_loc_dev_name; u64 dma_mask; dma_addr_t dev_addr; size_t size; bool force; char __data[0]; }; struct trace_event_raw_sys_enter { struct trace_entry ent; long int id; long unsigned int args[6]; char __data[0]; }; struct trace_event_raw_sys_exit { struct trace_entry ent; long int id; long int ret; char __data[0]; }; struct trace_event_raw_task_newtask { struct trace_entry ent; pid_t pid; char comm[16]; long unsigned int clone_flags; short int oom_score_adj; char __data[0]; }; struct trace_event_raw_task_rename { struct trace_entry ent; pid_t pid; char oldcomm[16]; char newcomm[16]; short int oom_score_adj; char __data[0]; }; struct trace_event_raw_tasklet { struct trace_entry ent; void *tasklet; void *func; char __data[0]; }; struct trace_event_raw_tcp_ao_event { struct trace_entry ent; __u64 net_cookie; const void *skbaddr; const void *skaddr; int state; __u8 saddr[28]; __u8 daddr[28]; int l3index; __u16 sport; __u16 dport; __u16 family; bool fin; bool syn; bool rst; bool psh; bool ack; __u8 keyid; __u8 rnext; __u8 maclen; char __data[0]; }; struct trace_event_raw_tcp_ao_event_sk { struct trace_entry ent; __u64 net_cookie; const void *skaddr; int state; __u8 saddr[28]; __u8 daddr[28]; __u16 sport; __u16 dport; __u16 family; __u8 keyid; __u8 rnext; char __data[0]; }; struct trace_event_raw_tcp_ao_event_sne { struct trace_entry ent; __u64 net_cookie; const void *skaddr; int state; __u8 saddr[28]; __u8 daddr[28]; __u16 sport; __u16 dport; __u16 family; __u32 new_sne; char __data[0]; }; struct trace_event_raw_tcp_cong_state_set { struct trace_entry ent; const void *skaddr; __u16 sport; __u16 dport; __u16 family; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; __u8 cong_state; char __data[0]; }; struct trace_event_raw_tcp_event_sk { struct trace_entry ent; const void *skaddr; __u16 sport; __u16 dport; __u16 family; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; __u64 sock_cookie; char __data[0]; }; struct trace_event_raw_tcp_event_sk_skb { struct trace_entry ent; const void *skbaddr; const void *skaddr; int state; __u16 sport; __u16 dport; __u16 family; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; char __data[0]; }; struct trace_event_raw_tcp_event_skb { struct trace_entry ent; const void *skbaddr; __u8 saddr[28]; __u8 daddr[28]; char __data[0]; }; struct trace_event_raw_tcp_hash_event { struct trace_entry ent; __u64 net_cookie; const void *skbaddr; const void *skaddr; int state; __u8 saddr[28]; __u8 daddr[28]; int l3index; __u16 sport; __u16 dport; __u16 family; bool fin; bool syn; bool rst; bool psh; bool ack; char __data[0]; }; struct trace_event_raw_tcp_probe { struct trace_entry ent; __u8 saddr[28]; __u8 daddr[28]; __u16 sport; __u16 dport; __u16 family; __u32 mark; __u16 data_len; __u32 snd_nxt; __u32 snd_una; __u32 snd_cwnd; __u32 ssthresh; __u32 snd_wnd; __u32 srtt; __u32 rcv_wnd; __u64 sock_cookie; const void *skbaddr; const void *skaddr; char __data[0]; }; struct trace_event_raw_tcp_retransmit_synack { struct trace_entry ent; const void *skaddr; const void *req; __u16 sport; __u16 dport; __u16 family; __u8 saddr[4]; __u8 daddr[4]; __u8 saddr_v6[16]; __u8 daddr_v6[16]; char __data[0]; }; struct trace_event_raw_tcp_send_reset { struct trace_entry ent; const void *skbaddr; const void *skaddr; int state; enum sk_rst_reason reason; __u8 saddr[28]; __u8 daddr[28]; char __data[0]; }; struct trace_event_raw_test_pages_isolated { struct trace_entry ent; long unsigned int start_pfn; long unsigned int end_pfn; long unsigned int fin_pfn; char __data[0]; }; struct trace_event_raw_thermal_temperature { struct trace_entry ent; u32 __data_loc_thermal_zone; int id; int temp_prev; int temp; char __data[0]; }; struct trace_event_raw_thermal_zone_trip { struct trace_entry ent; u32 __data_loc_thermal_zone; int id; int trip; enum thermal_trip_type trip_type; char __data[0]; }; struct trace_event_raw_tick_stop { struct trace_entry ent; int success; int dependency; char __data[0]; }; struct trace_event_raw_timer_base_idle { struct trace_entry ent; bool is_idle; unsigned int cpu; char __data[0]; }; struct trace_event_raw_timer_class { struct trace_entry ent; void *timer; char __data[0]; }; struct trace_event_raw_timer_expire_entry { struct trace_entry ent; void *timer; long unsigned int now; void *function; long unsigned int baseclk; char __data[0]; }; struct trace_event_raw_timer_start { struct trace_entry ent; void *timer; void *function; long unsigned int expires; long unsigned int bucket_expiry; long unsigned int now; unsigned int flags; char __data[0]; }; struct trace_event_raw_tlb_flush { struct trace_entry ent; int reason; long unsigned int pages; char __data[0]; }; struct trace_event_raw_tls_device_decrypted { struct trace_entry ent; struct sock *sk; u64 rec_no; u32 tcp_seq; u32 rec_len; bool encrypted; bool decrypted; char __data[0]; }; struct trace_event_raw_tls_device_offload_set { struct trace_entry ent; struct sock *sk; u64 rec_no; int dir; u32 tcp_seq; int ret; char __data[0]; }; struct trace_event_raw_tls_device_rx_resync_nh_delay { struct trace_entry ent; struct sock *sk; u32 sock_data; u32 rec_len; char __data[0]; }; struct trace_event_raw_tls_device_rx_resync_nh_schedule { struct trace_entry ent; struct sock *sk; char __data[0]; }; struct trace_event_raw_tls_device_rx_resync_send { struct trace_entry ent; struct sock *sk; u64 rec_no; u32 tcp_seq; int sync_type; char __data[0]; }; struct trace_event_raw_tls_device_tx_resync_req { struct trace_entry ent; struct sock *sk; u32 tcp_seq; u32 exp_tcp_seq; char __data[0]; }; struct trace_event_raw_tls_device_tx_resync_send { struct trace_entry ent; struct sock *sk; u64 rec_no; u32 tcp_seq; char __data[0]; }; struct trace_event_raw_tmigr_connect_child_parent { struct trace_entry ent; void *child; void *parent; unsigned int lvl; unsigned int numa_node; unsigned int num_children; u32 groupmask; char __data[0]; }; struct trace_event_raw_tmigr_connect_cpu_parent { struct trace_entry ent; void *parent; unsigned int cpu; unsigned int lvl; unsigned int numa_node; unsigned int num_children; u32 groupmask; char __data[0]; }; struct trace_event_raw_tmigr_cpugroup { struct trace_entry ent; u64 wakeup; void *parent; unsigned int cpu; char __data[0]; }; struct trace_event_raw_tmigr_group_and_cpu { struct trace_entry ent; void *group; void *parent; unsigned int lvl; unsigned int numa_node; u32 childmask; u8 active; u8 migrator; char __data[0]; }; struct trace_event_raw_tmigr_group_set { struct trace_entry ent; void *group; unsigned int lvl; unsigned int numa_node; char __data[0]; }; struct trace_event_raw_tmigr_handle_remote { struct trace_entry ent; void *group; unsigned int lvl; char __data[0]; }; struct trace_event_raw_tmigr_idle { struct trace_entry ent; u64 nextevt; u64 wakeup; void *parent; unsigned int cpu; char __data[0]; }; struct trace_event_raw_tmigr_update_events { struct trace_entry ent; void *child; void *group; u64 nextevt; u64 group_next_expiry; u64 child_evt_expiry; unsigned int group_lvl; unsigned int child_evtcpu; u8 child_active; u8 group_active; char __data[0]; }; struct trace_event_raw_track_foreign_dirty { struct trace_entry ent; char name[32]; u64 bdi_id; ino_t ino; unsigned int memcg_id; ino_t cgroup_ino; ino_t page_cgroup_ino; char __data[0]; }; struct trace_event_raw_udp_fail_queue_rcv_skb { struct trace_entry ent; int rc; __u16 sport; __u16 dport; __u16 family; __u8 saddr[28]; __u8 daddr[28]; char __data[0]; }; struct trace_event_raw_unmap { struct trace_entry ent; u64 iova; size_t size; size_t unmapped_size; char __data[0]; }; struct trace_event_raw_vector_activate { struct trace_entry ent; unsigned int irq; bool is_managed; bool can_reserve; bool reserve; char __data[0]; }; struct trace_event_raw_vector_alloc { struct trace_entry ent; unsigned int irq; unsigned int vector; bool reserved; int ret; char __data[0]; }; struct trace_event_raw_vector_alloc_managed { struct trace_entry ent; unsigned int irq; unsigned int vector; int ret; char __data[0]; }; struct trace_event_raw_vector_config { struct trace_entry ent; unsigned int irq; unsigned int vector; unsigned int cpu; unsigned int apicdest; char __data[0]; }; struct trace_event_raw_vector_free_moved { struct trace_entry ent; unsigned int irq; unsigned int cpu; unsigned int vector; bool is_managed; char __data[0]; }; struct trace_event_raw_vector_mod { struct trace_entry ent; unsigned int irq; unsigned int vector; unsigned int cpu; unsigned int prev_vector; unsigned int prev_cpu; char __data[0]; }; struct trace_event_raw_vector_reserve { struct trace_entry ent; unsigned int irq; int ret; char __data[0]; }; struct trace_event_raw_vector_setup { struct trace_entry ent; unsigned int irq; bool is_legacy; int ret; char __data[0]; }; struct trace_event_raw_vector_teardown { struct trace_entry ent; unsigned int irq; bool is_managed; bool has_reserved; char __data[0]; }; struct trace_event_raw_virtio_transport_alloc_pkt { struct trace_entry ent; __u32 src_cid; __u32 src_port; __u32 dst_cid; __u32 dst_port; __u32 len; __u16 type; __u16 op; __u32 flags; bool zcopy; char __data[0]; }; struct trace_event_raw_virtio_transport_recv_pkt { struct trace_entry ent; __u32 src_cid; __u32 src_port; __u32 dst_cid; __u32 dst_port; __u32 len; __u16 type; __u16 op; __u32 flags; __u32 buf_alloc; __u32 fwd_cnt; char __data[0]; }; struct trace_event_raw_vm_unmapped_area { struct trace_entry ent; long unsigned int addr; long unsigned int total_vm; long unsigned int flags; long unsigned int length; long unsigned int low_limit; long unsigned int high_limit; long unsigned int align_mask; long unsigned int align_offset; char __data[0]; }; struct trace_event_raw_vma_mas_szero { struct trace_entry ent; struct maple_tree *mt; long unsigned int start; long unsigned int end; char __data[0]; }; struct trace_event_raw_vma_store { struct trace_entry ent; struct maple_tree *mt; struct vm_area_struct *vma; long unsigned int vm_start; long unsigned int vm_end; char __data[0]; }; struct trace_event_raw_wake_reaper { struct trace_entry ent; int pid; char __data[0]; }; struct trace_event_raw_wakeup_source { struct trace_entry ent; u32 __data_loc_name; u64 state; char __data[0]; }; struct trace_event_raw_wbc_class { struct trace_entry ent; char name[32]; long int nr_to_write; long int pages_skipped; int sync_mode; int for_kupdate; int for_background; int for_reclaim; int range_cyclic; long int range_start; long int range_end; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_workqueue_activate_work { struct trace_entry ent; void *work; void *function; char __data[0]; }; struct trace_event_raw_workqueue_execute_end { struct trace_entry ent; void *work; void *function; char __data[0]; }; struct trace_event_raw_workqueue_execute_start { struct trace_entry ent; void *work; void *function; char __data[0]; }; struct trace_event_raw_workqueue_queue_work { struct trace_entry ent; void *work; void *function; u32 __data_loc_workqueue; int req_cpu; int cpu; char __data[0]; }; struct trace_event_raw_writeback_bdi_register { struct trace_entry ent; char name[32]; char __data[0]; }; struct trace_event_raw_writeback_class { struct trace_entry ent; char name[32]; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_dirty_inode_template { struct trace_entry ent; char name[32]; ino_t ino; long unsigned int state; long unsigned int flags; char __data[0]; }; struct trace_event_raw_writeback_folio_template { struct trace_entry ent; char name[32]; ino_t ino; long unsigned int index; char __data[0]; }; struct trace_event_raw_writeback_inode_template { struct trace_entry ent; dev_t dev; ino_t ino; long unsigned int state; __u16 mode; long unsigned int dirtied_when; char __data[0]; }; struct trace_event_raw_writeback_pages_written { struct trace_entry ent; long int pages; char __data[0]; }; struct trace_event_raw_writeback_queue_io { struct trace_entry ent; char name[32]; long unsigned int older; long int age; int moved; int reason; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_sb_inodes_requeue { struct trace_entry ent; char name[32]; ino_t ino; long unsigned int state; long unsigned int dirtied_when; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_single_inode_template { struct trace_entry ent; char name[32]; ino_t ino; long unsigned int state; long unsigned int dirtied_when; long unsigned int writeback_index; long int nr_to_write; long unsigned int wrote; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_work_class { struct trace_entry ent; char name[32]; long int nr_pages; dev_t sb_dev; int sync_mode; int for_kupdate; int range_cyclic; int for_background; int reason; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_writeback_write_inode_template { struct trace_entry ent; char name[32]; ino_t ino; int sync_mode; ino_t cgroup_ino; char __data[0]; }; struct trace_event_raw_x86_exceptions { struct trace_entry ent; long unsigned int address; long unsigned int ip; long unsigned int error_code; char __data[0]; }; struct trace_event_raw_x86_fpu { struct trace_entry ent; struct fpu *fpu; bool load_fpu; u64 xfeatures; u64 xcomp_bv; char __data[0]; }; struct trace_event_raw_x86_irq_vector { struct trace_entry ent; int vector; char __data[0]; }; struct trace_event_raw_xdp_bulk_tx { struct trace_entry ent; int ifindex; u32 act; int drops; int sent; int err; char __data[0]; }; struct trace_event_raw_xdp_cpumap_enqueue { struct trace_entry ent; int map_id; u32 act; int cpu; unsigned int drops; unsigned int processed; int to_cpu; char __data[0]; }; struct trace_event_raw_xdp_cpumap_kthread { struct trace_entry ent; int map_id; u32 act; int cpu; unsigned int drops; unsigned int processed; int sched; unsigned int xdp_pass; unsigned int xdp_drop; unsigned int xdp_redirect; char __data[0]; }; struct trace_event_raw_xdp_devmap_xmit { struct trace_entry ent; int from_ifindex; u32 act; int to_ifindex; int drops; int sent; int err; char __data[0]; }; struct trace_event_raw_xdp_exception { struct trace_entry ent; int prog_id; u32 act; int ifindex; char __data[0]; }; struct trace_event_raw_xdp_redirect_template { struct trace_entry ent; int prog_id; u32 act; int ifindex; int err; int to_ifindex; u32 map_id; int map_index; char __data[0]; }; struct trace_export { struct trace_export *next; void (*write)(struct trace_export *, const void *, unsigned int); int flags; }; struct trace_fprobe { struct dyn_event devent; struct fprobe fp; const char *symbol; struct tracepoint *tpoint; struct module *mod; struct trace_probe tp; }; struct trace_func_repeats { long unsigned int ip; long unsigned int parent_ip; long unsigned int count; u64 ts_last_call; }; struct trace_kprobe { struct dyn_event devent; struct kretprobe rp; long unsigned int *nhit; const char *symbol; struct trace_probe tp; }; struct trace_mark { long long unsigned int val; char sym; }; struct trace_min_max_param { struct mutex *lock; u64 *val; u64 *min; u64 *max; }; struct tracer_opt; struct tracer_flags; struct trace_option_dentry { struct tracer_opt *opt; struct tracer_flags *flags; struct trace_array *tr; struct dentry *entry; }; struct trace_options { struct tracer *tracer; struct trace_option_dentry *topts; }; union upper_chunk; struct trace_pid_list { raw_spinlock_t lock; struct irq_work refill_irqwork; union upper_chunk *upper[256]; union upper_chunk *upper_list; union lower_chunk *lower_list; int free_upper_chunks; int free_lower_chunks; }; struct trace_print_flags { long unsigned int mask; const char *name; }; struct trace_uprobe_filter { rwlock_t rwlock; int nr_systemwide; struct list_head perf_events; }; struct trace_probe_event { unsigned int flags; struct trace_event_class class; struct trace_event_call call; struct list_head files; struct list_head probes; struct trace_uprobe_filter filter[0]; }; struct trace_probe_log { const char *subsystem; const char **argv; int argc; int index; }; struct trace_subsystem_dir { struct list_head list; struct event_subsystem *subsystem; struct trace_array *tr; struct eventfs_inode *ei; int ref_count; int nr_events; }; struct trace_uprobe { struct dyn_event devent; struct uprobe_consumer consumer; struct path path; char *filename; struct uprobe *uprobe; long unsigned int offset; long unsigned int ref_ctr_offset; long unsigned int *nhits; struct trace_probe tp; }; struct tracefs_dir_ops { int (*mkdir)(const char *); int (*rmdir)(const char *); }; struct tracefs_fs_info { kuid_t uid; kgid_t gid; umode_t mode; unsigned int opts; }; struct tracefs_inode { struct inode vfs_inode; struct list_head list; long unsigned int flags; void *private; }; struct tracepoint { const char *name; struct static_key key; struct static_call_key *static_call_key; void *static_call_tramp; void *iterator; void *probestub; int (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func *funcs; }; struct traceprobe_parse_context { struct trace_event_call *event; const char *funcname; const struct btf_type *proto; const struct btf_param *params; s32 nr_params; struct btf *btf; const struct btf_type *last_type; u32 last_bitoffs; u32 last_bitsize; struct trace_probe *tp; unsigned int flags; int offset; }; struct tracer { const char *name; int (*init)(struct trace_array *); void (*reset)(struct trace_array *); void (*start)(struct trace_array *); void (*stop)(struct trace_array *); int (*update_thresh)(struct trace_array *); void (*open)(struct trace_iterator *); void (*pipe_open)(struct trace_iterator *); void (*close)(struct trace_iterator *); void (*pipe_close)(struct trace_iterator *); ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *); ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); void (*print_header)(struct seq_file *); enum print_line_t (*print_line)(struct trace_iterator *); int (*set_flag)(struct trace_array *, u32, u32, int); int (*flag_changed)(struct trace_array *, u32, int); struct tracer *next; struct tracer_flags *flags; int enabled; bool print_max; bool allow_instances; bool noboot; }; struct tracer_flags { u32 val; struct tracer_opt *opts; struct tracer *trace; }; struct tracer_opt { const char *name; u32 bit; }; typedef int (*cmp_func_t)(const void *, const void *); struct tracer_stat { const char *name; void * (*stat_start)(struct tracer_stat *); void * (*stat_next)(void *, int); cmp_func_t stat_cmp; int (*stat_show)(struct seq_file *, void *); void (*stat_release)(void *); int (*stat_headers)(struct seq_file *); }; struct tracing_log_err { struct list_head list; struct err_info info; char loc[128]; char *cmd; }; struct track { long unsigned int addr; depot_stack_handle_t handle; int cpu; int pid; long unsigned int when; }; struct trackpoint_attr_data { size_t field_offset; u8 command; u8 mask; bool inverted; u8 power_on_default; }; struct trackpoint_data { u8 variant_id; u8 firmware_id; u8 sensitivity; u8 speed; u8 inertia; u8 reach; u8 draghys; u8 mindrag; u8 thresh; u8 upthresh; u8 ztime; u8 jenks; u8 drift_time; bool press_to_select; bool skipback; bool ext_dev; }; struct trampoline_header { u64 start; u64 efer; u32 cr4; u32 flags; u32 lock; }; struct transaction_chp_stats_s { long unsigned int cs_chp_time; __u32 cs_forced_to_close; __u32 cs_written; __u32 cs_dropped; }; struct transaction_s { journal_t *t_journal; tid_t t_tid; enum { T_RUNNING = 0, T_LOCKED = 1, T_SWITCH = 2, T_FLUSH = 3, T_COMMIT = 4, T_COMMIT_DFLUSH = 5, T_COMMIT_JFLUSH = 6, T_COMMIT_CALLBACK = 7, T_FINISHED = 8, } t_state; long unsigned int t_log_start; int t_nr_buffers; struct journal_head *t_reserved_list; struct journal_head *t_buffers; struct journal_head *t_forget; struct journal_head *t_checkpoint_list; struct journal_head *t_shadow_list; struct list_head t_inode_list; long unsigned int t_max_wait; long unsigned int t_start; long unsigned int t_requested; struct transaction_chp_stats_s t_chp_stats; atomic_t t_updates; atomic_t t_outstanding_credits; atomic_t t_outstanding_revokes; atomic_t t_handle_count; transaction_t *t_cpnext; transaction_t *t_cpprev; long unsigned int t_expires; ktime_t t_start_time; unsigned int t_synchronous_commit: 1; int t_need_data_flush; struct list_head t_private_list; }; struct transport_container { struct attribute_container ac; const struct attribute_group *statistics; }; struct trc_stall_chk_rdr { int nesting; int ipi_to_cpu; u8 needqs; }; struct tree_descr { const char *name; const struct file_operations *ops; int mode; }; struct trie { struct key_vector kv[1]; }; struct trie_stat { unsigned int totdepth; unsigned int maxdepth; unsigned int tnodes; unsigned int leaves; unsigned int nullpointers; unsigned int prefixes; unsigned int nodesizes[32]; }; struct ts_ops; struct ts_state; struct ts_config { struct ts_ops *ops; int flags; unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *); void (*finish)(struct ts_config *, struct ts_state *); }; struct ts_ops { const char *name; struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); unsigned int (*find)(struct ts_config *, struct ts_state *); void (*destroy)(struct ts_config *); void * (*get_pattern)(struct ts_config *); unsigned int (*get_pattern_len)(struct ts_config *); struct module *owner; struct list_head list; }; struct ts_state { unsigned int offset; char cb[48]; }; struct tsc_adjust { s64 bootval; s64 adjusted; long unsigned int nextcheck; bool warned; }; struct tsinfo_reply_data { struct ethnl_reply_data base; struct kernel_ethtool_ts_info ts_info; struct ethtool_ts_stats stats; }; struct tso_t { int next_frag_idx; int size; void *data; u16 ip_id; u8 tlen; bool ipv6; u32 tcp_seq; }; struct tsq_tasklet { struct tasklet_struct tasklet; struct list_head head; }; struct tty_audit_buf { struct mutex mutex; dev_t dev; bool icanon; size_t valid; u8 *data; }; struct tty_buffer { union { struct tty_buffer *next; struct llist_node free; }; unsigned int used; unsigned int size; unsigned int commit; unsigned int lookahead; unsigned int read; bool flags; long: 0; u8 data[0]; }; struct tty_bufhead { struct tty_buffer *head; struct work_struct work; struct mutex lock; atomic_t priority; struct tty_buffer sentinel; struct llist_head free; atomic_t mem_used; int mem_limit; struct tty_buffer *tail; }; struct tty_port; struct tty_operations; struct tty_driver { struct kref kref; struct cdev **cdevs; struct module *owner; const char *driver_name; const char *name; int name_base; int major; int minor_start; unsigned int num; short int type; short int subtype; struct ktermios init_termios; long unsigned int flags; struct proc_dir_entry *proc_entry; struct tty_driver *other; struct tty_struct **ttys; struct tty_port **ports; struct ktermios **termios; void *driver_state; const struct tty_operations *ops; struct list_head tty_drivers; }; struct tty_file_private { struct tty_struct *tty; struct file *file; struct list_head list; }; struct tty_ldisc_ops; struct tty_ldisc { struct tty_ldisc_ops *ops; struct tty_struct *tty; }; struct tty_ldisc_ops { char *name; int num; int (*open)(struct tty_struct *); void (*close)(struct tty_struct *); void (*flush_buffer)(struct tty_struct *); ssize_t (*read)(struct tty_struct *, struct file *, u8 *, size_t, void **, long unsigned int); ssize_t (*write)(struct tty_struct *, struct file *, const u8 *, size_t); int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); void (*set_termios)(struct tty_struct *, const struct ktermios *); __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); void (*hangup)(struct tty_struct *); void (*receive_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, bool); size_t (*receive_buf2)(struct tty_struct *, const u8 *, const u8 *, size_t); void (*lookahead_buf)(struct tty_struct *, const u8 *, const u8 *, size_t); struct module *owner; }; struct winsize; struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int); int (*install)(struct tty_driver *, struct tty_struct *); void (*remove)(struct tty_driver *, struct tty_struct *); int (*open)(struct tty_struct *, struct file *); void (*close)(struct tty_struct *, struct file *); void (*shutdown)(struct tty_struct *); void (*cleanup)(struct tty_struct *); ssize_t (*write)(struct tty_struct *, const u8 *, size_t); int (*put_char)(struct tty_struct *, u8); void (*flush_chars)(struct tty_struct *); unsigned int (*write_room)(struct tty_struct *); unsigned int (*chars_in_buffer)(struct tty_struct *); int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); void (*set_termios)(struct tty_struct *, const struct ktermios *); void (*throttle)(struct tty_struct *); void (*unthrottle)(struct tty_struct *); void (*stop)(struct tty_struct *); void (*start)(struct tty_struct *); void (*hangup)(struct tty_struct *); int (*break_ctl)(struct tty_struct *, int); void (*flush_buffer)(struct tty_struct *); int (*ldisc_ok)(struct tty_struct *, int); void (*set_ldisc)(struct tty_struct *); void (*wait_until_sent)(struct tty_struct *, int); void (*send_xchar)(struct tty_struct *, u8); int (*tiocmget)(struct tty_struct *); int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); int (*resize)(struct tty_struct *, struct winsize *); int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); int (*get_serial)(struct tty_struct *, struct serial_struct *); int (*set_serial)(struct tty_struct *, struct serial_struct *); void (*show_fdinfo)(struct tty_struct *, struct seq_file *); int (*proc_show)(struct seq_file *, void *); }; struct tty_port_operations; struct tty_port_client_operations; struct tty_port { struct tty_bufhead buf; struct tty_struct *tty; struct tty_struct *itty; const struct tty_port_operations *ops; const struct tty_port_client_operations *client_ops; spinlock_t lock; int blocked_open; int count; wait_queue_head_t open_wait; wait_queue_head_t delta_msr_wait; long unsigned int flags; long unsigned int iflags; unsigned char console: 1; struct mutex mutex; struct mutex buf_mutex; u8 *xmit_buf; struct { union { struct __kfifo kfifo; u8 *type; const u8 *const_type; char (*rectype)[0]; u8 *ptr; const u8 *ptr_const; }; u8 buf[0]; } xmit_fifo; unsigned int close_delay; unsigned int closing_wait; int drain_delay; struct kref kref; void *client_data; }; struct tty_port_client_operations { size_t (*receive_buf)(struct tty_port *, const u8 *, const u8 *, size_t); void (*lookahead_buf)(struct tty_port *, const u8 *, const u8 *, size_t); void (*write_wakeup)(struct tty_port *); }; struct tty_port_operations { bool (*carrier_raised)(struct tty_port *); void (*dtr_rts)(struct tty_port *, bool); void (*shutdown)(struct tty_port *); int (*activate)(struct tty_port *, struct tty_struct *); void (*destruct)(struct tty_port *); }; struct winsize { short unsigned int ws_row; short unsigned int ws_col; short unsigned int ws_xpixel; short unsigned int ws_ypixel; }; struct tty_struct { struct kref kref; int index; struct device *dev; struct tty_driver *driver; struct tty_port *port; const struct tty_operations *ops; struct tty_ldisc *ldisc; struct ld_semaphore ldisc_sem; struct mutex atomic_write_lock; struct mutex legacy_mutex; struct mutex throttle_mutex; struct rw_semaphore termios_rwsem; struct mutex winsize_mutex; struct ktermios termios; struct ktermios termios_locked; char name[64]; long unsigned int flags; int count; unsigned int receive_room; struct winsize winsize; struct { spinlock_t lock; bool stopped; bool tco_stopped; } flow; struct { struct pid *pgrp; struct pid *session; spinlock_t lock; unsigned char pktstatus; bool packet; } ctrl; bool hw_stopped; bool closing; int flow_change; struct tty_struct *link; struct fasync_struct *fasync; wait_queue_head_t write_wait; wait_queue_head_t read_wait; struct work_struct hangup_work; void *disc_data; void *driver_data; spinlock_t files_lock; int write_cnt; u8 *write_buf; struct list_head tty_files; struct work_struct SAK_work; }; struct tun_struct; struct tun_file { struct sock sk; struct socket socket; struct tun_struct *tun; struct fasync_struct *fasync; unsigned int flags; union { u16 queue_index; unsigned int ifindex; }; struct napi_struct napi; bool napi_enabled; bool napi_frags_enabled; struct mutex napi_mutex; struct list_head next; struct tun_struct *detached; long: 64; long: 64; long: 64; long: 64; long: 64; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; }; struct tun_filter { __u16 flags; __u16 count; __u8 addr[0]; }; struct tun_flow_entry { struct hlist_node hash_link; struct callback_head rcu; struct tun_struct *tun; u32 rxhash; u32 rps_rxhash; int queue_index; long: 64; long unsigned int updated; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct tun_msg_ctl { short unsigned int type; short unsigned int num; void *ptr; }; struct tun_page { struct page *page; int count; }; struct tun_pi { __u16 flags; __be16 proto; }; struct tun_prog { struct callback_head rcu; struct bpf_prog *prog; }; struct tun_security_struct { u32 sid; }; struct tun_struct { struct tun_file *tfiles[256]; unsigned int numqueues; unsigned int flags; kuid_t owner; kgid_t group; struct net_device *dev; netdev_features_t set_features; int align; int vnet_hdr_sz; int sndbuf; struct tap_filter txflt; struct sock_fprog fprog; bool filter_attached; u32 msg_enable; spinlock_t lock; struct hlist_head flows[1024]; struct timer_list flow_gc_timer; long unsigned int ageing_time; unsigned int numdisabled; struct list_head disabled; void *security; u32 flow_count; u32 rx_batched; atomic_long_t rx_frame_errors; struct bpf_prog *xdp_prog; struct tun_prog *steering_prog; struct tun_prog *filter_prog; struct ethtool_link_ksettings link_ksettings; struct file *file; struct ifreq *ifr; }; struct virtio_net_hdr { __u8 flags; __u8 gso_type; __virtio16 hdr_len; __virtio16 gso_size; __virtio16 csum_start; __virtio16 csum_offset; }; struct tun_xdp_hdr { int buflen; struct virtio_net_hdr gso; }; struct tunnel_msg { __u8 family; __u8 flags; __u16 reserved2; __u32 ifindex; }; struct type_datum { u32 value; u32 bounds; unsigned char primary; unsigned char attribute; }; struct type_set { struct ebitmap types; struct ebitmap negset; u32 flags; }; struct uart_8250_em485 { struct hrtimer start_tx_timer; struct hrtimer stop_tx_timer; struct hrtimer *active_timer; struct uart_8250_port *port; unsigned int tx_stopped: 1; }; struct uart_8250_ops { int (*setup_irq)(struct uart_8250_port *); void (*release_irq)(struct uart_8250_port *); void (*setup_timer)(struct uart_8250_port *); }; struct mctrl_gpios; struct uart_8250_port { struct uart_port port; struct timer_list timer; struct list_head list; u32 capabilities; u16 bugs; unsigned int tx_loadsz; unsigned char acr; unsigned char fcr; unsigned char ier; unsigned char lcr; unsigned char mcr; unsigned char cur_iotype; unsigned int rpm_tx_active; unsigned char canary; unsigned char probe; struct mctrl_gpios *gpios; u16 lsr_saved_flags; u16 lsr_save_mask; unsigned char msr_saved_flags; struct uart_8250_dma *dma; const struct uart_8250_ops *ops; u32 (*dl_read)(struct uart_8250_port *); void (*dl_write)(struct uart_8250_port *, u32); struct uart_8250_em485 *em485; void (*rs485_start_tx)(struct uart_8250_port *); void (*rs485_stop_tx)(struct uart_8250_port *); struct delayed_work overrun_backoff; u32 overrun_backoff_time_ms; }; struct uart_driver { struct module *owner; const char *driver_name; const char *dev_name; int major; int minor; int nr; struct console *cons; struct uart_state *state; struct tty_driver *tty_driver; }; struct uart_match { struct uart_port *port; struct uart_driver *driver; }; struct uart_ops { unsigned int (*tx_empty)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); unsigned int (*get_mctrl)(struct uart_port *); void (*stop_tx)(struct uart_port *); void (*start_tx)(struct uart_port *); void (*throttle)(struct uart_port *); void (*unthrottle)(struct uart_port *); void (*send_xchar)(struct uart_port *, char); void (*stop_rx)(struct uart_port *); void (*start_rx)(struct uart_port *); void (*enable_ms)(struct uart_port *); void (*break_ctl)(struct uart_port *, int); int (*startup)(struct uart_port *); void (*shutdown)(struct uart_port *); void (*flush_buffer)(struct uart_port *); void (*set_termios)(struct uart_port *, struct ktermios *, const struct ktermios *); void (*set_ldisc)(struct uart_port *, struct ktermios *); void (*pm)(struct uart_port *, unsigned int, unsigned int); const char * (*type)(struct uart_port *); void (*release_port)(struct uart_port *); int (*request_port)(struct uart_port *); void (*config_port)(struct uart_port *, int); int (*verify_port)(struct uart_port *, struct serial_struct *); int (*ioctl)(struct uart_port *, unsigned int, long unsigned int); }; struct uart_state { struct tty_port port; enum uart_pm_state pm_state; atomic_t refcount; wait_queue_head_t remove_wait; struct uart_port *uart_port; }; struct ubuf_info_msgzc { struct ubuf_info ubuf; union { struct { long unsigned int desc; void *ctx; }; struct { u32 id; u16 len; u16 zerocopy: 1; u32 bytelen; }; }; struct mmpin mmp; }; struct ubuf_info_ops { void (*complete)(struct sk_buff *, struct ubuf_info *, bool); int (*link_skb)(struct sk_buff *, struct ubuf_info *); }; struct ucode_cpu_info { struct cpu_signature cpu_sig; void *mc; }; struct ucode_patch { struct list_head plist; void *data; unsigned int size; u32 patch_id; u16 equiv_cpu; }; struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; atomic_t count; atomic_long_t ucount[10]; atomic_long_t rlimit[4]; }; struct ucred { __u32 pid; __u32 uid; __u32 gid; }; struct udp_sock { struct inet_sock inet; long unsigned int udp_flags; int pending; __u8 encap_type; __u16 len; __u16 gso_size; __u16 pcslen; __u16 pcrlen; int (*encap_rcv)(struct sock *, struct sk_buff *); void (*encap_err_rcv)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); int (*encap_err_lookup)(struct sock *, struct sk_buff *); void (*encap_destroy)(struct sock *); struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); int (*gro_complete)(struct sock *, struct sk_buff *, int); long: 64; long: 64; long: 64; long: 64; struct sk_buff_head reader_queue; int forward_deficit; int forward_threshold; bool peeking_with_offset; long: 64; long: 64; long: 64; }; struct udp6_sock { struct udp_sock udp; struct ipv6_pinfo inet6; long: 64; long: 64; long: 64; long: 64; }; struct udp_dev_scratch { u32 _tsize_state; u16 len; bool is_linear; bool csum_unnecessary; }; struct udp_hslot { struct hlist_head head; int count; spinlock_t lock; }; struct udp_mib { long unsigned int mibs[10]; }; struct udp_seq_afinfo { sa_family_t family; struct udp_table *udp_table; }; struct udp_skb_cb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; __u16 cscov; __u8 partial_cov; }; struct udp_table { struct udp_hslot *hash; struct udp_hslot *hash2; unsigned int mask; unsigned int log; }; struct udp_tunnel_info { short unsigned int type; sa_family_t sa_family; __be16 port; u8 hw_priv; }; struct udp_tunnel_nic_table_entry; struct udp_tunnel_nic { struct work_struct work; struct net_device *dev; u8 need_sync: 1; u8 need_replay: 1; u8 work_pending: 1; unsigned int n_tables; long unsigned int missed; struct udp_tunnel_nic_table_entry *entries[0]; }; struct udp_tunnel_nic_table_info { unsigned int n_entries; unsigned int tunnel_types; }; struct udp_tunnel_nic_info { int (*set_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); int (*unset_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); int (*sync_table)(struct net_device *, unsigned int); struct udp_tunnel_nic_shared *shared; unsigned int flags; struct udp_tunnel_nic_table_info tables[4]; }; struct udp_tunnel_nic_ops { void (*get_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); void (*set_port_priv)(struct net_device *, unsigned int, unsigned int, u8); void (*add_port)(struct net_device *, struct udp_tunnel_info *); void (*del_port)(struct net_device *, struct udp_tunnel_info *); void (*reset_ntf)(struct net_device *); size_t (*dump_size)(struct net_device *, unsigned int); int (*dump_write)(struct net_device *, unsigned int, struct sk_buff *); }; struct udp_tunnel_nic_shared_node { struct net_device *dev; struct list_head list; }; struct udp_tunnel_nic_table_entry { __be16 port; u8 type; u8 flags; u16 use_cnt; u8 hw_priv; }; typedef int (*udp_tunnel_encap_rcv_t)(struct sock *, struct sk_buff *); typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *, struct sk_buff *); typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); typedef void (*udp_tunnel_encap_destroy_t)(struct sock *); typedef struct sk_buff * (*udp_tunnel_gro_receive_t)(struct sock *, struct list_head *, struct sk_buff *); typedef int (*udp_tunnel_gro_complete_t)(struct sock *, struct sk_buff *, int); struct udp_tunnel_sock_cfg { void *sk_user_data; __u8 encap_type; udp_tunnel_encap_rcv_t encap_rcv; udp_tunnel_encap_err_lookup_t encap_err_lookup; udp_tunnel_encap_err_rcv_t encap_err_rcv; udp_tunnel_encap_destroy_t encap_destroy; udp_tunnel_gro_receive_t gro_receive; udp_tunnel_gro_complete_t gro_complete; }; struct udphdr { __be16 source; __be16 dest; __be16 len; __sum16 check; }; struct uevent_sock { struct list_head list; struct sock *sk; }; struct uffd_msg { __u8 event; __u8 reserved1; __u16 reserved2; __u32 reserved3; union { struct { __u64 flags; __u64 address; union { __u32 ptid; } feat; } pagefault; struct { __u32 ufd; } fork; struct { __u64 from; __u64 to; __u64 len; } remap; struct { __u64 start; __u64 end; } remove; struct { __u64 reserved1; __u64 reserved2; __u64 reserved3; } reserved; } arg; }; struct uffdio_api { __u64 api; __u64 features; __u64 ioctls; }; struct uffdio_range { __u64 start; __u64 len; }; struct uffdio_continue { struct uffdio_range range; __u64 mode; __s64 mapped; }; struct uffdio_copy { __u64 dst; __u64 src; __u64 len; __u64 mode; __s64 copy; }; struct uffdio_move { __u64 dst; __u64 src; __u64 len; __u64 mode; __s64 move; }; struct uffdio_poison { struct uffdio_range range; __u64 mode; __s64 updated; }; struct uffdio_register { struct uffdio_range range; __u64 mode; __u64 ioctls; }; struct uffdio_writeprotect { struct uffdio_range range; __u64 mode; }; struct uffdio_zeropage { struct uffdio_range range; __u64 mode; __s64 zeropage; }; struct umd_info { const char *driver_name; struct file *pipe_to_umh; struct file *pipe_from_umh; struct path wd; struct pid *tgid; }; struct uncached_list { spinlock_t lock; struct list_head head; }; struct uncharge_gather { struct mem_cgroup *memcg; long unsigned int nr_memory; long unsigned int pgpgout; long unsigned int nr_kmem; int nid; }; struct uncore_event_desc { struct device_attribute attr; const char *config; }; struct uncore_global_discovery { union { u64 table1; struct { u64 type: 8; u64 stride: 8; u64 max_units: 10; u64 __reserved_1: 36; u64 access_type: 2; }; }; u64 ctl; union { u64 table3; struct { u64 status_offset: 8; u64 num_status: 16; u64 __reserved_2: 40; }; }; }; struct uncore_iio_topology { int pci_bus_no; int segment; }; struct uncore_unit_discovery { union { u64 table1; struct { u64 num_regs: 8; u64 ctl_offset: 8; u64 bit_width: 8; u64 ctr_offset: 8; u64 status_offset: 8; u64 __reserved_1: 22; u64 access_type: 2; }; }; u64 ctl; union { u64 table3; struct { u64 box_type: 16; u64 box_id: 16; u64 __reserved_2: 32; }; }; }; struct uncore_upi_topology { int die_to; int pmu_idx_to; int enabled; }; struct uni_pagedict { u16 **uni_pgdir[32]; long unsigned int refcount; long unsigned int sum; unsigned char *inverse_translations[4]; u16 *inverse_trans_unicode; }; struct unipair; struct unimapdesc { short unsigned int entry_ct; struct unipair *entries; }; struct unipair { short unsigned int unicode; short unsigned int fontpos; }; struct unix_address { refcount_t refcnt; int len; struct sockaddr_un name[0]; }; struct unix_edge { struct unix_sock *predecessor; struct unix_sock *successor; struct list_head vertex_entry; struct list_head stack_entry; }; struct unix_skb_parms { struct pid *pid; kuid_t uid; kgid_t gid; struct scm_fp_list *fp; u32 secid; u32 consumed; }; struct unix_vertex; struct unix_sock { struct sock sk; struct unix_address *addr; struct path path; struct mutex iolock; struct mutex bindlock; struct sock *peer; struct sock *listener; struct unix_vertex *vertex; spinlock_t lock; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct socket_wq peer_wq; wait_queue_entry_t peer_wake; struct scm_stat scm_stat; struct sk_buff *oob_skb; }; struct unix_stream_read_state { int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); struct socket *socket; struct msghdr *msg; struct pipe_inode_info *pipe; size_t size; int flags; unsigned int splice_flags; }; struct unix_vertex { struct list_head edges; struct list_head entry; struct list_head scc_entry; long unsigned int out_degree; long unsigned int index; long unsigned int scc_index; }; struct unixware_slice { __le16 s_label; __le16 s_flags; __le32 start_sect; __le32 nr_sects; }; struct unixware_vtoc { __le32 v_magic; __le32 v_version; char v_name[8]; __le16 v_nslices; __le16 v_unknown1; __le32 v_reserved[10]; struct unixware_slice v_slice[16]; }; struct unixware_disklabel { __le32 d_type; __le32 d_magic; __le32 d_version; char d_serial[12]; __le32 d_ncylinders; __le32 d_ntracks; __le32 d_nsectors; __le32 d_secsize; __le32 d_part_start; __le32 d_unknown1[12]; __le32 d_alt_tbl; __le32 d_alt_len; __le32 d_phys_cyl; __le32 d_phys_trk; __le32 d_phys_sec; __le32 d_phys_bytes; __le32 d_unknown2; __le32 d_unknown3; __le32 d_pad[8]; struct unixware_vtoc vtoc; }; struct unlink_vma_file_batch { int count; struct vm_area_struct *vmas[8]; }; struct unwind_state { struct stack_info stack_info; long unsigned int stack_mask; struct task_struct *task; int graph_idx; struct llist_node *kr_cur; bool error; bool signal; bool full_regs; long unsigned int sp; long unsigned int bp; long unsigned int ip; struct pt_regs *regs; struct pt_regs *prev_regs; }; struct update_classid_context { u32 classid; unsigned int batch; }; union upper_chunk { union upper_chunk *next; union lower_chunk *data[256]; }; struct uprobe { struct rb_node rb_node; refcount_t ref; struct rw_semaphore register_rwsem; struct rw_semaphore consumer_rwsem; struct list_head pending_list; struct list_head consumers; struct inode *inode; struct callback_head rcu; loff_t offset; loff_t ref_ctr_offset; long unsigned int flags; struct arch_uprobe arch; }; struct uprobe_cpu_buffer { struct mutex mutex; void *buf; int dsize; }; struct uprobe_dispatch_data { struct trace_uprobe *tu; long unsigned int bp_addr; }; struct uprobe_task { enum uprobe_task_state state; union { struct { struct arch_uprobe_task autask; long unsigned int vaddr; }; struct { struct callback_head dup_xol_work; long unsigned int dup_xol_addr; }; }; struct uprobe *active_uprobe; long unsigned int xol_vaddr; struct arch_uprobe *auprobe; struct return_instance *return_instances; unsigned int depth; }; struct uprobe_trace_entry_head { struct trace_entry ent; long unsigned int vaddr[0]; }; struct uprobe_xol_ops { bool (*emulate)(struct arch_uprobe *, struct pt_regs *); int (*pre_xol)(struct arch_uprobe *, struct pt_regs *); int (*post_xol)(struct arch_uprobe *, struct pt_regs *); void (*abort)(struct arch_uprobe *, struct pt_regs *); }; struct uring_cache { struct io_uring_sqe sqes[2]; }; struct usage_priority { __u32 usage; bool global; unsigned int slot_overwrite; }; struct used_address { struct __kernel_sockaddr_storage name; unsigned int name_len; }; struct user_arg_ptr { union { const char * const *native; } ptr; }; struct user_datum { u32 value; u32 bounds; struct ebitmap roles; struct mls_range range; struct mls_level dfltlevel; }; struct user_desc { unsigned int entry_number; unsigned int base_addr; unsigned int limit; unsigned int seg_32bit: 1; unsigned int contents: 2; unsigned int read_exec_only: 1; unsigned int limit_in_pages: 1; unsigned int seg_not_present: 1; unsigned int useable: 1; unsigned int lm: 1; }; struct user_i387_ia32_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20]; }; struct user_key_payload { struct callback_head rcu; short unsigned int datalen; long: 0; char data[0]; }; struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; long unsigned int flags; bool parent_could_setfcap; struct list_head keyring_name_list; struct key *user_keyring_register; struct rw_semaphore keyring_sem; struct work_struct work; struct ctl_table_set set; struct ctl_table_header *sysctls; struct ucounts *ucounts; long int ucount_max[10]; long int rlimit_max[4]; struct binfmt_misc *binfmt_misc; }; struct user_regset; typedef int user_regset_get2_fn(struct task_struct *, const struct user_regset *, struct membuf); typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *); typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); struct user_regset { user_regset_get2_fn *regset_get; user_regset_set_fn *set; user_regset_active_fn *active; user_regset_writeback_fn *writeback; unsigned int n; unsigned int size; unsigned int align; unsigned int bias; unsigned int core_note_type; }; struct user_regset_view { const char *name; const struct user_regset *regsets; unsigned int n; u32 e_flags; u16 e_machine; u8 ei_osabi; }; struct user_struct { refcount_t __count; struct percpu_counter epoll_watches; long unsigned int unix_inflight; atomic_long_t pipe_bufs; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; struct ratelimit_state ratelimit; }; struct user_syms { const char **syms; char *buf; }; struct userfaultfd_ctx { wait_queue_head_t fault_pending_wqh; wait_queue_head_t fault_wqh; wait_queue_head_t fd_wqh; wait_queue_head_t event_wqh; seqcount_spinlock_t refile_seq; refcount_t refcount; unsigned int flags; unsigned int features; bool released; struct rw_semaphore map_changing_lock; atomic_t mmap_changing; struct mm_struct *mm; }; struct userfaultfd_fork_ctx { struct userfaultfd_ctx *orig; struct userfaultfd_ctx *new; struct list_head list; }; struct userfaultfd_unmap_ctx { struct userfaultfd_ctx *ctx; long unsigned int start; long unsigned int end; struct list_head list; }; struct userfaultfd_wait_queue { struct uffd_msg msg; wait_queue_entry_t wq; struct userfaultfd_ctx *ctx; bool waken; }; struct userfaultfd_wake_range { long unsigned int start; long unsigned int len; }; struct userspace_policy { unsigned int is_managed; unsigned int setspeed; struct mutex mutex; }; struct userstack_entry { struct trace_entry ent; unsigned int tgid; long unsigned int caller[8]; }; struct ustat { __kernel_daddr_t f_tfree; long unsigned int f_tinode; char f_fname[6]; char f_fpack[6]; }; struct ustring_buffer { char buffer[1024]; }; struct utf8_table { int cmask; int cval; int shift; long int lmask; long int lval; }; struct utimbuf { __kernel_old_time_t actime; __kernel_old_time_t modtime; }; struct uts_namespace { struct new_utsname name; struct user_namespace *user_ns; struct ucounts *ucounts; struct ns_common ns; }; union uu { short unsigned int us; unsigned char b[2]; }; struct uuidcmp { const char *uuid; int len; }; struct va_alignment { int flags; long unsigned int mask; long unsigned int bits; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct va_format { const char *fmt; va_list *va; }; struct var_mtrr_range_state { long unsigned int base_pfn; long unsigned int size_pfn; mtrr_type type; }; struct var_mtrr_state { long unsigned int range_startk; long unsigned int range_sizek; long unsigned int chunk_sizek; long unsigned int gran_sizek; unsigned int reg; }; struct vc { struct vc_data *d; struct work_struct SAK_work; }; struct vc_state { unsigned int x; unsigned int y; unsigned char color; unsigned char Gx_charset[2]; unsigned int charset: 1; enum vc_intensity intensity; bool italic; bool underline; bool blink; bool reverse; }; struct vt_mode { char mode; char waitv; short int relsig; short int acqsig; short int frsig; }; struct vc_data { struct tty_port port; struct vc_state state; struct vc_state saved_state; short unsigned int vc_num; unsigned int vc_cols; unsigned int vc_rows; unsigned int vc_size_row; unsigned int vc_scan_lines; unsigned int vc_cell_height; long unsigned int vc_origin; long unsigned int vc_scr_end; long unsigned int vc_visible_origin; unsigned int vc_top; unsigned int vc_bottom; const struct consw *vc_sw; short unsigned int *vc_screenbuf; unsigned int vc_screenbuf_size; unsigned char vc_mode; unsigned char vc_attr; unsigned char vc_def_color; unsigned char vc_ulcolor; unsigned char vc_itcolor; unsigned char vc_halfcolor; unsigned int vc_cursor_type; short unsigned int vc_complement_mask; short unsigned int vc_s_complement_mask; long unsigned int vc_pos; short unsigned int vc_hi_font_mask; struct console_font vc_font; short unsigned int vc_video_erase_char; unsigned int vc_state; unsigned int vc_npar; unsigned int vc_par[16]; struct vt_mode vt_mode; struct pid *vt_pid; int vt_newvt; wait_queue_head_t paste_wait; unsigned int vc_disp_ctrl: 1; unsigned int vc_toggle_meta: 1; unsigned int vc_decscnm: 1; unsigned int vc_decom: 1; unsigned int vc_decawm: 1; unsigned int vc_deccm: 1; unsigned int vc_decim: 1; unsigned int vc_priv: 3; unsigned int vc_need_wrap: 1; unsigned int vc_can_do_color: 1; unsigned int vc_report_mouse: 2; unsigned char vc_utf: 1; unsigned char vc_utf_count; int vc_utf_char; long unsigned int vc_tab_stop[4]; unsigned char vc_palette[48]; short unsigned int *vc_translate; unsigned int vc_bell_pitch; unsigned int vc_bell_duration; short unsigned int vc_cur_blink_ms; struct vc_data **vc_display_fg; struct uni_pagedict *uni_pagedict; struct uni_pagedict **uni_pagedict_loc; u32 **vc_uni_lines; }; struct vc_draw_region { long unsigned int from; long unsigned int to; int x; }; struct vc_selection { struct mutex lock; struct vc_data *cons; char *buffer; unsigned int buf_len; volatile int start; int end; }; struct vcs_poll_data { struct notifier_block notifier; unsigned int cons_num; int event; wait_queue_head_t waitq; struct fasync_struct *fasync; }; struct vdso_timestamp { u64 sec; u64 nsec; }; struct vdso_data { u32 seq; s32 clock_mode; u64 cycle_last; u64 max_cycles; u64 mask; u32 mult; u32 shift; union { struct vdso_timestamp basetime[12]; struct timens_offset offset[12]; }; s32 tz_minuteswest; s32 tz_dsttime; u32 hrtimer_res; u32 __unused; struct arch_vdso_data arch_data; }; struct vdso_exception_table_entry { int insn; int fixup; }; struct vdso_image { void *data; long unsigned int size; long unsigned int alt; long unsigned int alt_len; long unsigned int extable_base; long unsigned int extable_len; const void *extable; long int sym_vvar_start; long int sym_vvar_page; long int sym_pvclock_page; long int sym_hvclock_page; long int sym_timens_page; long int sym_VDSO32_NOTE_MASK; long int sym___kernel_sigreturn; long int sym___kernel_rt_sigreturn; long int sym___kernel_vsyscall; long int sym_int80_landing_pad; long int sym_vdso32_sigreturn_landing_pad; long int sym_vdso32_rt_sigreturn_landing_pad; }; struct vdso_rng_data { u64 generation; u8 is_ready; }; struct vector_cleanup { struct hlist_head head; struct timer_list timer; }; struct vesafb_par { u32 pseudo_palette[256]; resource_size_t base; resource_size_t size; int wc_cookie; struct resource *region; }; struct veth { __be16 h_vlan_proto; __be16 h_vlan_TCI; }; struct veth_rq; struct veth_priv { struct net_device *peer; atomic64_t dropped; struct bpf_prog *_xdp_prog; struct veth_rq *rq; unsigned int requested_headroom; }; struct veth_q_stat_desc { char desc[32]; size_t offset; }; struct veth_stats { u64 rx_drops; u64 xdp_packets; u64 xdp_bytes; u64 xdp_redirect; u64 xdp_drops; u64 xdp_tx; u64 xdp_tx_err; u64 peer_tq_xdp_xmit; u64 peer_tq_xdp_xmit_err; }; struct veth_rq_stats { struct veth_stats vs; struct u64_stats_sync syncp; }; struct veth_rq { struct napi_struct xdp_napi; struct napi_struct *napi; struct net_device *dev; struct bpf_prog *xdp_prog; struct xdp_mem_info xdp_mem; struct veth_rq_stats stats; bool rx_notify_masked; struct ptr_ring xdp_ring; struct xdp_rxq_info xdp_rxq; struct page_pool *page_pool; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct veth_xdp_buff { struct xdp_buff xdp; struct sk_buff *skb; }; struct veth_xdp_tx_bq { struct xdp_frame *q[16]; unsigned int count; }; struct vfree_deferred { struct llist_head list; struct work_struct wq; }; struct vfs_cap_data { __le32 magic_etc; struct { __le32 permitted; __le32 inheritable; } data[2]; }; struct vfs_ns_cap_data { __le32 magic_etc; struct { __le32 permitted; __le32 inheritable; } data[2]; __le32 rootid; }; struct vga_arb_user_card { struct pci_dev *pdev; unsigned int mem_cnt; unsigned int io_cnt; }; struct vga_arb_private { struct list_head list; struct pci_dev *target; struct vga_arb_user_card cards[16]; spinlock_t lock; }; struct vga_device { struct list_head list; struct pci_dev *pdev; unsigned int decodes; unsigned int owns; unsigned int locks; unsigned int io_lock_cnt; unsigned int mem_lock_cnt; unsigned int io_norm_cnt; unsigned int mem_norm_cnt; bool bridge_has_one_vga; bool is_firmware_default; unsigned int (*set_decode)(struct pci_dev *, bool); }; struct vgastate { void *vgabase; long unsigned int membase; __u32 memsize; __u32 flags; __u32 depth; __u32 num_attr; __u32 num_crtc; __u32 num_gfx; __u32 num_seq; void *vidstate; }; struct vif_entry_notifier_info { struct fib_notifier_info info; struct net_device *dev; short unsigned int vif_index; short unsigned int vif_flags; u32 tb_id; }; struct vifctl { vifi_t vifc_vifi; unsigned char vifc_flags; unsigned char vifc_threshold; unsigned int vifc_rate_limit; union { struct in_addr vifc_lcl_addr; int vifc_lcl_ifindex; }; struct in_addr vifc_rmt_addr; }; struct virtio_net_hdr_mrg_rxbuf { struct virtio_net_hdr hdr; __virtio16 num_buffers; }; struct vsock_sock; struct vsock_transport_recv_notify_data; struct vsock_transport_send_notify_data; struct vsock_transport { struct module *module; int (*init)(struct vsock_sock *, struct vsock_sock *); void (*destruct)(struct vsock_sock *); void (*release)(struct vsock_sock *); int (*cancel_pkt)(struct vsock_sock *); int (*connect)(struct vsock_sock *); int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *); int (*dgram_dequeue)(struct vsock_sock *, struct msghdr *, size_t, int); int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, struct msghdr *, size_t); bool (*dgram_allow)(u32, u32); ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *, size_t, int); ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *, size_t); s64 (*stream_has_data)(struct vsock_sock *); s64 (*stream_has_space)(struct vsock_sock *); u64 (*stream_rcvhiwat)(struct vsock_sock *); bool (*stream_is_active)(struct vsock_sock *); bool (*stream_allow)(u32, u32); ssize_t (*seqpacket_dequeue)(struct vsock_sock *, struct msghdr *, int); int (*seqpacket_enqueue)(struct vsock_sock *, struct msghdr *, size_t); bool (*seqpacket_allow)(u32); u32 (*seqpacket_has_data)(struct vsock_sock *); int (*notify_poll_in)(struct vsock_sock *, size_t, bool *); int (*notify_poll_out)(struct vsock_sock *, size_t, bool *); int (*notify_recv_init)(struct vsock_sock *, size_t, struct vsock_transport_recv_notify_data *); int (*notify_recv_pre_block)(struct vsock_sock *, size_t, struct vsock_transport_recv_notify_data *); int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t, struct vsock_transport_recv_notify_data *); int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t, ssize_t, bool, struct vsock_transport_recv_notify_data *); int (*notify_send_init)(struct vsock_sock *, struct vsock_transport_send_notify_data *); int (*notify_send_pre_block)(struct vsock_sock *, struct vsock_transport_send_notify_data *); int (*notify_send_pre_enqueue)(struct vsock_sock *, struct vsock_transport_send_notify_data *); int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t, struct vsock_transport_send_notify_data *); void (*notify_buffer_size)(struct vsock_sock *, u64 *); int (*notify_set_rcvlowat)(struct vsock_sock *, int); ssize_t (*unsent_bytes)(struct vsock_sock *); int (*shutdown)(struct vsock_sock *, int); u32 (*get_local_cid)(void); int (*read_skb)(struct vsock_sock *, skb_read_actor_t); bool (*msgzerocopy_allow)(void); }; struct virtio_transport { struct vsock_transport transport; int (*send_pkt)(struct sk_buff *); bool (*can_msgzerocopy)(int); }; struct virtio_vsock_hdr { __le64 src_cid; __le64 dst_cid; __le32 src_port; __le32 dst_port; __le32 len; __le16 type; __le16 op; __le32 flags; __le32 buf_alloc; __le32 fwd_cnt; } __attribute__((packed)); struct virtio_vsock_pkt_info { u32 remote_cid; u32 remote_port; struct vsock_sock *vsk; struct msghdr *msg; u32 pkt_len; u16 type; u16 op; u32 flags; bool reply; }; struct virtio_vsock_skb_cb { bool reply; bool tap_delivered; u32 offset; }; struct virtio_vsock_sock { struct vsock_sock *vsk; spinlock_t tx_lock; spinlock_t rx_lock; u32 tx_cnt; u32 peer_fwd_cnt; u32 peer_buf_alloc; size_t bytes_unsent; u32 fwd_cnt; u32 last_fwd_cnt; u32 rx_bytes; u32 buf_alloc; struct sk_buff_head rx_queue; u32 msg_count; }; struct vlan_priority_tci_mapping; struct vlan_pcpu_stats; struct vlan_dev_priv { unsigned int nr_ingress_mappings; u32 ingress_priority_map[8]; unsigned int nr_egress_mappings; struct vlan_priority_tci_mapping *egress_priority_map[16]; __be16 vlan_proto; u16 vlan_id; u16 flags; struct net_device *real_dev; netdevice_tracker dev_tracker; unsigned char real_dev_addr[6]; struct proc_dir_entry *dent; struct vlan_pcpu_stats *vlan_pcpu_stats; }; struct vlan_ethhdr { union { struct { unsigned char h_dest[6]; unsigned char h_source[6]; }; struct { unsigned char h_dest[6]; unsigned char h_source[6]; } addrs; }; __be16 h_vlan_proto; __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; }; struct vlan_group { unsigned int nr_vlan_devs; struct hlist_node hlist; struct net_device **vlan_devices_arrays[16]; }; struct vlan_hdr { __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; }; struct vlan_info { struct net_device *real_dev; struct vlan_group grp; struct list_head vid_list; unsigned int nr_vids; struct callback_head rcu; }; struct vlan_ioctl_args { int cmd; char device1[24]; union { char device2[24]; int VID; unsigned int skb_priority; unsigned int name_type; unsigned int bind_type; unsigned int flag; } u; short int vlan_qos; }; struct vlan_net { struct proc_dir_entry *proc_vlan_dir; struct proc_dir_entry *proc_vlan_conf; short unsigned int name_type; }; struct vlan_pcpu_stats { u64_stats_t rx_packets; u64_stats_t rx_bytes; u64_stats_t rx_multicast; u64_stats_t tx_packets; u64_stats_t tx_bytes; struct u64_stats_sync syncp; u32 rx_errors; u32 tx_dropped; }; struct vlan_priority_tci_mapping { u32 priority; u16 vlan_qos; struct vlan_priority_tci_mapping *next; }; struct vlan_vid_info { struct list_head list; __be16 proto; u16 vid; int refcount; }; struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; }; struct vma_lock; struct vma_numab_state; struct vm_area_struct { union { struct { long unsigned int vm_start; long unsigned int vm_end; }; struct callback_head vm_rcu; }; struct mm_struct *vm_mm; pgprot_t vm_page_prot; union { const vm_flags_t vm_flags; vm_flags_t __vm_flags; }; bool detached; int vm_lock_seq; struct vma_lock *vm_lock; struct { struct rb_node rb; long unsigned int rb_subtree_last; } shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; long unsigned int vm_pgoff; struct file *vm_file; void *vm_private_data; atomic_long_t swap_readahead_info; struct mempolicy *vm_policy; struct vma_numab_state *numab_state; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; }; struct vm_event_state { long unsigned int event[106]; }; struct vm_fault { const struct { struct vm_area_struct *vma; gfp_t gfp_mask; long unsigned int pgoff; long unsigned int address; long unsigned int real_address; }; enum fault_flag flags; pmd_t *pmd; pud_t *pud; union { pte_t orig_pte; pmd_t orig_pmd; }; struct page *cow_page; struct page *page; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; }; struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*may_split)(struct vm_area_struct *, long unsigned int); int (*mremap)(struct vm_area_struct *); int (*mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int, long unsigned int); vm_fault_t (*fault)(struct vm_fault *); vm_fault_t (*huge_fault)(struct vm_fault *, unsigned int); vm_fault_t (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int); long unsigned int (*pagesize)(struct vm_area_struct *); vm_fault_t (*page_mkwrite)(struct vm_fault *); vm_fault_t (*pfn_mkwrite)(struct vm_fault *); int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, long unsigned int, long unsigned int *); struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int); }; struct vm_special_mapping { const char *name; struct page **pages; vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); void (*close)(const struct vm_special_mapping *, struct vm_area_struct *); }; struct vm_stack { struct callback_head rcu; struct vm_struct *stack_vm_area; }; struct vm_struct { struct vm_struct *next; void *addr; long unsigned int size; long unsigned int flags; struct page **pages; unsigned int page_order; unsigned int nr_pages; phys_addr_t phys_addr; const void *caller; }; struct vm_unmapped_area_info { long unsigned int flags; long unsigned int length; long unsigned int low_limit; long unsigned int high_limit; long unsigned int align_mask; long unsigned int align_offset; long unsigned int start_gap; }; struct vma_list { struct vm_area_struct *vma; struct list_head head; atomic_t mmap_count; }; struct vma_lock { struct rw_semaphore lock; }; struct vma_merge_struct { struct mm_struct *mm; struct vma_iterator *vmi; long unsigned int pgoff; struct vm_area_struct *prev; struct vm_area_struct *next; struct vm_area_struct *vma; long unsigned int start; long unsigned int end; long unsigned int flags; struct file *file; struct anon_vma *anon_vma; struct mempolicy *policy; struct vm_userfaultfd_ctx uffd_ctx; struct anon_vma_name *anon_name; enum vma_merge_state state; }; struct vma_munmap_struct { struct vma_iterator *vmi; struct vm_area_struct *vma; struct vm_area_struct *prev; struct vm_area_struct *next; struct list_head *uf; long unsigned int start; long unsigned int end; long unsigned int unmap_start; long unsigned int unmap_end; int vma_count; bool unlock; bool clear_ptes; bool closed_vm_ops; long unsigned int nr_pages; long unsigned int locked_vm; long unsigned int nr_accounted; long unsigned int exec_vm; long unsigned int stack_vm; long unsigned int data_vm; }; struct vma_numab_state { long unsigned int next_scan; long unsigned int pids_active_reset; long unsigned int pids_active[2]; int start_scan_seq; int prev_scan_seq; }; struct vma_prepare { struct vm_area_struct *vma; struct vm_area_struct *adj_next; struct file *file; struct address_space *mapping; struct anon_vma *anon_vma; struct vm_area_struct *insert; struct vm_area_struct *remove; struct vm_area_struct *remove2; }; struct vmap_area { long unsigned int va_start; long unsigned int va_end; struct rb_node rb_node; struct list_head list; union { long unsigned int subtree_max_size; struct vm_struct *vm; }; long unsigned int flags; }; struct vmap_block { spinlock_t lock; struct vmap_area *va; long unsigned int free; long unsigned int dirty; long unsigned int used_map[16]; long unsigned int dirty_min; long unsigned int dirty_max; struct list_head free_list; struct callback_head callback_head; struct list_head purge; unsigned int cpu; }; struct vmap_block_queue { spinlock_t lock; struct list_head free; struct xarray vmap_blocks; }; struct vmap_pool { struct list_head head; long unsigned int len; }; struct vmap_node { struct vmap_pool pool[256]; spinlock_t pool_lock; bool skip_populate; struct rb_list busy; struct rb_list lazy; struct list_head purge_list; struct work_struct purge_work; long unsigned int nr_purged; }; struct vmcore { struct list_head list; long long unsigned int paddr; long long unsigned int size; loff_t offset; }; struct vmcore_cb { bool (*pfn_is_ram)(struct vmcore_cb *, long unsigned int); struct list_head next; }; struct vmemmap_remap_walk { void (*remap_pte)(pte_t *, long unsigned int, struct vmemmap_remap_walk *); long unsigned int nr_walked; struct page *reuse_page; long unsigned int reuse_addr; struct list_head *vmemmap_pages; long unsigned int flags; }; struct vmgenid_state { u8 *next_id; u8 this_id[16]; }; struct vmpressure_event { struct eventfd_ctx *efd; enum vmpressure_levels level; enum vmpressure_modes mode; struct list_head node; }; struct vrf_map_elem { struct hlist_node hnode; struct list_head vrf_list; u32 table_id; int users; int ifindex; }; struct vsock_diag_msg { __u8 vdiag_family; __u8 vdiag_type; __u8 vdiag_state; __u8 vdiag_shutdown; __u32 vdiag_src_cid; __u32 vdiag_src_port; __u32 vdiag_dst_cid; __u32 vdiag_dst_port; __u32 vdiag_ino; __u32 vdiag_cookie[2]; }; struct vsock_diag_req { __u8 sdiag_family; __u8 sdiag_protocol; __u16 pad; __u32 vdiag_states; __u32 vdiag_ino; __u32 vdiag_show; __u32 vdiag_cookie[2]; }; struct vsock_loopback { struct workqueue_struct *workqueue; struct sk_buff_head pkt_queue; struct work_struct pkt_work; }; struct vsock_sock { struct sock sk; const struct vsock_transport *transport; struct sockaddr_vm local_addr; struct sockaddr_vm remote_addr; struct list_head bound_table; struct list_head connected_table; bool trusted; bool cached_peer_allow_dgram; u32 cached_peer; const struct cred *owner; long int connect_timeout; struct sock *listener; struct list_head pending_links; struct list_head accept_queue; bool rejected; struct delayed_work connect_work; struct delayed_work pending_work; struct delayed_work close_work; bool close_work_scheduled; u32 peer_shutdown; bool sent_request; bool ignore_connecting_rst; u64 buffer_size; u64 buffer_min_size; u64 buffer_max_size; void *trans; }; struct vsock_tap { struct net_device *dev; struct module *module; struct list_head list; }; struct vsock_transport_recv_notify_data { u64 data1; u64 data2; bool notify_on_block; }; struct vsock_transport_send_notify_data { u64 data1; u64 data2; }; struct vt_consize { short unsigned int v_rows; short unsigned int v_cols; short unsigned int v_vlin; short unsigned int v_clin; short unsigned int v_vcol; short unsigned int v_ccol; }; struct vt_event { unsigned int event; unsigned int oldev; unsigned int newev; unsigned int pad[4]; }; struct vt_event_wait { struct list_head list; struct vt_event event; int done; }; struct vt_notifier_param { struct vc_data *vc; unsigned int c; }; struct vt_setactivate { unsigned int console; struct vt_mode mode; }; struct vt_sizes { short unsigned int v_rows; short unsigned int v_cols; short unsigned int v_scrollsize; }; struct vt_spawn_console { spinlock_t lock; struct pid *pid; int sig; }; struct vt_stat { short unsigned int v_active; short unsigned int v_signal; short unsigned int v_state; }; struct vxlan_config { union vxlan_addr remote_ip; union vxlan_addr saddr; __be32 vni; int remote_ifindex; int mtu; __be16 dst_port; u16 port_min; u16 port_max; u8 tos; u8 ttl; __be32 label; enum ifla_vxlan_label_policy label_policy; u32 flags; long unsigned int age_interval; unsigned int addrmax; bool no_share; enum ifla_vxlan_df df; }; struct vxlan_dev; struct vxlan_dev_node { struct hlist_node hlist; struct vxlan_dev *vxlan; }; struct vxlan_rdst { union vxlan_addr remote_ip; __be16 remote_port; u8 offloaded: 1; __be32 remote_vni; u32 remote_ifindex; struct net_device *remote_dev; struct list_head list; struct callback_head rcu; struct dst_cache dst_cache; }; struct vxlan_sock; struct vxlan_vni_group; struct vxlan_dev { struct vxlan_dev_node hlist4; struct vxlan_dev_node hlist6; struct list_head next; struct vxlan_sock *vn4_sock; struct vxlan_sock *vn6_sock; struct net_device *dev; struct net *net; struct vxlan_rdst default_dst; struct timer_list age_timer; spinlock_t hash_lock[256]; unsigned int addrcnt; struct gro_cells gro_cells; struct vxlan_config cfg; struct vxlan_vni_group *vnigrp; struct hlist_head fdb_head[256]; struct rhashtable mdb_tbl; struct hlist_head mdb_list; unsigned int mdb_seq; }; struct vxlan_fdb { struct hlist_node hlist; struct callback_head rcu; long unsigned int updated; long unsigned int used; struct list_head remotes; u8 eth_addr[6]; u16 state; __be32 vni; u16 flags; struct list_head nh_list; struct nexthop *nh; struct vxlan_dev *vdev; }; struct vxlan_fdb_flush_desc { bool ignore_default_entry; long unsigned int state; long unsigned int state_mask; long unsigned int flags; long unsigned int flags_mask; __be32 src_vni; u32 nhid; __be32 vni; __be16 port; union vxlan_addr dst_ip; }; struct vxlan_mdb_entry_key { union vxlan_addr src; union vxlan_addr dst; __be32 vni; }; struct vxlan_mdb_config { struct vxlan_dev *vxlan; struct vxlan_mdb_entry_key group; struct list_head src_list; union vxlan_addr remote_ip; u32 remote_ifindex; __be32 remote_vni; __be16 remote_port; u16 nlflags; u8 flags; u8 filter_mode; u8 rt_protocol; }; struct vxlan_mdb_config_src_entry { union vxlan_addr addr; struct list_head node; }; struct vxlan_mdb_dump_ctx { long int reserved; long int entry_idx; long int remote_idx; }; struct vxlan_mdb_entry { struct rhash_head rhnode; struct list_head remotes; struct vxlan_mdb_entry_key key; struct hlist_node mdb_node; struct callback_head rcu; }; struct vxlan_mdb_flush_desc { union vxlan_addr remote_ip; __be32 src_vni; __be32 remote_vni; __be16 remote_port; u8 rt_protocol; }; struct vxlan_mdb_remote { struct list_head list; struct vxlan_rdst *rd; u8 flags; u8 filter_mode; u8 rt_protocol; struct hlist_head src_list; struct callback_head rcu; }; struct vxlan_mdb_src_entry { struct hlist_node node; union vxlan_addr addr; u8 flags; }; struct vxlan_metadata { u32 gbp; }; struct vxlan_net { struct list_head vxlan_list; struct hlist_head sock_list[256]; spinlock_t sock_lock; struct notifier_block nexthop_notifier_block; }; struct vxlan_sock { struct hlist_node hlist; struct socket *sock; struct hlist_head vni_list[1024]; refcount_t refcnt; u32 flags; }; struct vxlan_vni_group { struct rhashtable vni_hash; struct list_head vni_list; u32 num_vnis; }; struct vxlan_vni_stats_pcpu; struct vxlan_vni_node { struct rhash_head vnode; struct vxlan_dev_node hlist4; struct vxlan_dev_node hlist6; struct list_head vlist; __be32 vni; union vxlan_addr remote_ip; struct vxlan_vni_stats_pcpu *stats; struct callback_head rcu; }; struct vxlan_vni_stats { u64 rx_packets; u64 rx_bytes; u64 rx_drops; u64 rx_errors; u64 tx_packets; u64 tx_bytes; u64 tx_drops; u64 tx_errors; }; struct vxlan_vni_stats_pcpu { struct vxlan_vni_stats stats; struct u64_stats_sync syncp; }; struct vxlanhdr { __be32 vx_flags; __be32 vx_vni; }; struct vxlanhdr_gbp { u8 vx_flags; u8 reserved_flags1: 3; u8 policy_applied: 1; u8 reserved_flags2: 2; u8 dont_learn: 1; u8 reserved_flags3: 1; __be16 policy_id; __be32 vx_vni; }; struct vxlanhdr_gpe { u8 oam_flag: 1; u8 reserved_flags1: 1; u8 np_applied: 1; u8 instance_applied: 1; u8 version: 2; u8 reserved_flags2: 2; u8 reserved_flags3; u8 reserved_flags4; u8 next_protocol; __be32 vx_vni; }; struct wait_bit_key { void *flags; int bit_nr; long unsigned int timeout; }; struct wait_bit_queue_entry { struct wait_bit_key key; struct wait_queue_entry wq_entry; }; struct wait_page_key { struct folio *folio; int bit_nr; int page_match; }; struct wake_irq { struct device *dev; unsigned int status; int irq; const char *name; }; struct wakeup_header { u16 video_mode; u32 pmode_entry; u16 pmode_cs; u32 pmode_cr0; u32 pmode_cr3; u32 pmode_cr4; u32 pmode_efer_low; u32 pmode_efer_high; u64 pmode_gdt; u32 pmode_misc_en_low; u32 pmode_misc_en_high; u32 pmode_behavior; u32 realmode_flags; u32 real_magic; u32 signature; } __attribute__((packed)); struct wakeup_source { const char *name; int id; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; long unsigned int timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; long unsigned int event_count; long unsigned int active_count; long unsigned int relax_count; long unsigned int expire_count; long unsigned int wakeup_count; struct device *dev; bool active: 1; bool autosleep_enabled: 1; }; struct walk_rcec_data { struct pci_dev *rcec; int (*user_callback)(struct pci_dev *, void *); void *user_data; }; struct warn_args { const char *fmt; va_list args; }; struct wb_lock_cookie { bool locked; long unsigned int flags; }; struct wb_stats { long unsigned int nr_dirty; long unsigned int nr_io; long unsigned int nr_more_io; long unsigned int nr_dirty_time; long unsigned int nr_writeback; long unsigned int nr_reclaimable; long unsigned int nr_dirtied; long unsigned int nr_written; long unsigned int dirty_thresh; long unsigned int wb_thresh; }; struct wb_writeback_work { long int nr_pages; struct super_block *sb; enum writeback_sync_modes sync_mode; unsigned int tagged_writepages: 1; unsigned int for_kupdate: 1; unsigned int range_cyclic: 1; unsigned int for_background: 1; unsigned int for_sync: 1; unsigned int auto_free: 1; enum wb_reason reason; struct list_head list; struct wb_completion *done; }; struct wol_reply_data { struct ethnl_reply_data base; struct ethtool_wolinfo wol; bool show_sopass; }; struct word_at_a_time { const long unsigned int one_bits; const long unsigned int high_bits; }; struct work_for_cpu { struct work_struct work; long int (*fn)(void *); void *arg; long int ret; }; struct work_offq_data { u32 pool_id; u32 disable; u32 flags; }; struct worker { union { struct list_head entry; struct hlist_node hentry; }; struct work_struct *current_work; work_func_t current_func; struct pool_workqueue *current_pwq; u64 current_at; unsigned int current_color; int sleeping; work_func_t last_func; struct list_head scheduled; struct task_struct *task; struct worker_pool *pool; struct list_head node; long unsigned int last_active; unsigned int flags; int id; char desc[32]; struct workqueue_struct *rescue_wq; }; struct worker_pool { raw_spinlock_t lock; int cpu; int node; int id; unsigned int flags; long unsigned int watchdog_ts; bool cpu_stall; int nr_running; struct list_head worklist; int nr_workers; int nr_idle; struct list_head idle_list; struct timer_list idle_timer; struct work_struct idle_cull_work; struct timer_list mayday_timer; struct hlist_head busy_hash[64]; struct worker *manager; struct list_head workers; struct ida worker_ida; struct workqueue_attrs *attrs; struct hlist_node hash_node; int refcnt; struct callback_head rcu; }; struct workqueue_attrs { int nice; cpumask_var_t cpumask; cpumask_var_t __pod_cpumask; bool affn_strict; enum wq_affn_scope affn_scope; bool ordered; }; struct wq_flusher; struct wq_device; struct wq_node_nr_active; struct workqueue_struct { struct list_head pwqs; struct list_head list; struct mutex mutex; int work_color; int flush_color; atomic_t nr_pwqs_to_flush; struct wq_flusher *first_flusher; struct list_head flusher_queue; struct list_head flusher_overflow; struct list_head maydays; struct worker *rescuer; int nr_drainers; int max_active; int min_active; int saved_max_active; int saved_min_active; struct workqueue_attrs *unbound_attrs; struct pool_workqueue *dfl_pwq; struct wq_device *wq_dev; char *lock_name; struct lock_class_key key; struct lockdep_map __lockdep_map; struct lockdep_map *lockdep_map; char name[32]; struct callback_head rcu; long: 64; long: 64; long: 64; unsigned int flags; struct pool_workqueue **cpu_pwq; struct wq_node_nr_active *node_nr_active[0]; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct wq_barrier { struct work_struct work; struct completion done; struct task_struct *task; }; struct wq_device { struct workqueue_struct *wq; struct device dev; }; struct wq_drain_dead_softirq_work { struct work_struct work; struct worker_pool *pool; struct completion done; }; struct wq_flusher { struct list_head list; int flush_color; struct completion done; }; struct wq_node_nr_active { int max; atomic_t nr; raw_spinlock_t lock; struct list_head pending_pwqs; }; struct wq_pod_type { int nr_pods; cpumask_var_t *pod_cpus; int *pod_node; int *cpu_pod; }; typedef void (*swap_func_t)(void *, void *, int); struct wrapper { cmp_func_t cmp; swap_func_t swap; }; struct writeback_control { long int nr_to_write; long int pages_skipped; loff_t range_start; loff_t range_end; enum writeback_sync_modes sync_mode; unsigned int for_kupdate: 1; unsigned int for_background: 1; unsigned int tagged_writepages: 1; unsigned int for_reclaim: 1; unsigned int range_cyclic: 1; unsigned int for_sync: 1; unsigned int unpinned_netfs_wb: 1; unsigned int no_cgroup_owner: 1; struct swap_iocb **swap_plug; struct list_head *list; struct folio_batch fbatch; long unsigned int index; int saved_err; struct bdi_writeback *wb; struct inode *inode; int wb_id; int wb_lcand_id; int wb_tcand_id; size_t wb_bytes; size_t wb_lcand_bytes; size_t wb_tcand_bytes; }; struct writer { uint8_t *buffer; uint8_t previous_byte; size_t buffer_pos; int bufsize; size_t global_pos; long int (*flush)(void *, long unsigned int); struct lzma_header *header; }; struct ww_acquire_ctx { struct task_struct *task; long unsigned int stamp; unsigned int acquired; short unsigned int wounded; short unsigned int is_wait_die; unsigned int done_acquire; struct ww_class *ww_class; void *contending_lock; struct lockdep_map dep_map; unsigned int deadlock_inject_interval; unsigned int deadlock_inject_countdown; }; struct ww_class { atomic_long_t stamp; struct lock_class_key acquire_key; struct lock_class_key mutex_key; const char *acquire_name; const char *mutex_name; unsigned int is_wait_die; }; struct x509_certificate { struct x509_certificate *next; struct x509_certificate *signer; struct public_key *pub; struct public_key_signature *sig; char *issuer; char *subject; struct asymmetric_key_id *id; struct asymmetric_key_id *skid; time64_t valid_from; time64_t valid_to; const void *tbs; unsigned int tbs_size; unsigned int raw_sig_size; const void *raw_sig; const void *raw_serial; unsigned int raw_serial_size; unsigned int raw_issuer_size; const void *raw_issuer; const void *raw_subject; unsigned int raw_subject_size; unsigned int raw_skid_size; const void *raw_skid; unsigned int index; bool seen; bool verified; bool self_signed; bool unsupported_sig; bool blacklisted; }; struct x509_parse_context { struct x509_certificate *cert; long unsigned int data; const void *key; size_t key_size; const void *params; size_t params_size; enum OID key_algo; enum OID last_oid; enum OID sig_algo; u8 o_size; u8 cn_size; u8 email_size; u16 o_offset; u16 cn_offset; u16 email_offset; unsigned int raw_akid_size; const void *raw_akid; const void *akid_raw_issuer; unsigned int akid_raw_issuer_size; }; struct x64_jit_data { struct bpf_binary_header *rw_header; struct bpf_binary_header *header; int *addrs; u8 *image; int proglen; struct jit_context ctx; }; struct x86_apic_ops { unsigned int (*io_apic_read)(unsigned int, unsigned int); void (*restore)(void); }; struct x86_cpu_desc { u8 x86_family; u8 x86_vendor; u8 x86_model; u8 x86_stepping; u32 x86_microcode_rev; }; struct x86_cpuinit_ops { void (*setup_percpu_clockev)(void); void (*early_percpu_clock_init)(void); void (*fixup_cpu_id)(struct cpuinfo_x86 *, int); bool parallel_bringup; }; struct x86_guest { int (*enc_status_change_prepare)(long unsigned int, int, bool); int (*enc_status_change_finish)(long unsigned int, int, bool); bool (*enc_tlb_flush_required)(bool); bool (*enc_cache_flush_required)(void); void (*enc_kexec_begin)(void); void (*enc_kexec_finish)(void); }; struct x86_hybrid_pmu { struct pmu pmu; const char *name; enum hybrid_pmu_type pmu_type; cpumask_t supported_cpus; union perf_capabilities intel_cap; u64 intel_ctrl; u64 pebs_events_mask; u64 config_mask; union { u64 cntr_mask64; long unsigned int cntr_mask[1]; }; union { u64 fixed_cntr_mask64; long unsigned int fixed_cntr_mask[1]; }; struct event_constraint unconstrained; u64 hw_cache_event_ids[42]; u64 hw_cache_extra_regs[42]; struct event_constraint *event_constraints; struct event_constraint *pebs_constraints; struct extra_reg *extra_regs; unsigned int late_ack: 1; unsigned int mid_ack: 1; unsigned int enabled_ack: 1; u64 pebs_data_source[256]; }; struct x86_hyper_init { void (*init_platform)(void); void (*guest_late_init)(void); bool (*x2apic_available)(void); bool (*msi_ext_dest_id)(void); void (*init_mem_mapping)(void); void (*init_after_bootmem)(void); }; struct ghcb; struct x86_hyper_runtime { void (*pin_vcpu)(int); void (*sev_es_hcall_prepare)(struct ghcb *, struct pt_regs *); bool (*sev_es_hcall_finish)(struct ghcb *, struct pt_regs *); bool (*is_private_mmio)(u64); }; struct x86_init_acpi { void (*set_root_pointer)(u64); u64 (*get_root_pointer)(void); void (*reduced_hw_early_init)(void); }; struct x86_init_iommu { int (*iommu_init)(void); }; struct x86_init_irqs { void (*pre_vector_init)(void); void (*intr_init)(void); void (*intr_mode_select)(void); void (*intr_mode_init)(void); struct irq_domain * (*create_pci_msi_domain)(void); }; struct x86_init_mpparse { void (*setup_ioapic_ids)(void); void (*find_mptable)(void); void (*early_parse_smp_cfg)(void); void (*parse_smp_cfg)(void); }; struct x86_init_oem { void (*arch_setup)(void); void (*banner)(void); }; struct x86_init_resources { void (*probe_roms)(void); void (*reserve_resources)(void); char * (*memory_setup)(void); void (*dmi_setup)(void); }; struct x86_init_paging { void (*pagetable_init)(void); }; struct x86_init_timers { void (*setup_percpu_clockev)(void); void (*timer_init)(void); void (*wallclock_init)(void); }; struct x86_init_pci { int (*arch_init)(void); int (*init)(void); void (*init_irq)(void); void (*fixup_irqs)(void); }; struct x86_init_ops { struct x86_init_resources resources; struct x86_init_mpparse mpparse; struct x86_init_irqs irqs; struct x86_init_oem oem; struct x86_init_paging paging; struct x86_init_timers timers; struct x86_init_iommu iommu; struct x86_init_pci pci; struct x86_hyper_init hyper; struct x86_init_acpi acpi; }; struct x86_legacy_devices { int pnpbios; }; struct x86_legacy_features { enum x86_legacy_i8042_state i8042; int rtc; int warm_reset; int no_vga; int reserve_bios_regions; struct x86_legacy_devices devices; }; struct x86_mapping_info { void * (*alloc_pgt_page)(void *); void (*free_pgt_page)(void *, void *); void *context; long unsigned int page_flag; long unsigned int offset; bool direct_gbpages; long unsigned int kernpg_flag; }; struct x86_perf_regs { struct pt_regs regs; u64 *xmm_regs; }; struct x86_perf_task_context_opt { int lbr_callstack_users; int lbr_stack_state; int log_id; }; struct x86_perf_task_context { u64 lbr_sel; int tos; int valid_lbrs; struct x86_perf_task_context_opt opt; struct lbr_entry lbr[32]; }; struct x86_perf_task_context_arch_lbr { struct x86_perf_task_context_opt opt; struct lbr_entry entries[0]; }; struct x86_perf_task_context_arch_lbr_xsave { struct x86_perf_task_context_opt opt; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; union { struct xregs_state xsave; struct { struct fxregs_state i387; struct xstate_header header; struct arch_lbr_state lbr; long: 64; long: 64; long: 64; }; }; }; struct x86_platform_ops { long unsigned int (*calibrate_cpu)(void); long unsigned int (*calibrate_tsc)(void); void (*get_wallclock)(struct timespec64 *); int (*set_wallclock)(const struct timespec64 *); void (*iommu_shutdown)(void); bool (*is_untracked_pat_range)(u64, u64); void (*nmi_init)(void); unsigned char (*get_nmi_reason)(void); void (*save_sched_clock_state)(void); void (*restore_sched_clock_state)(void); void (*apic_post_init)(void); struct x86_legacy_features legacy; void (*set_legacy_features)(void); void (*realmode_reserve)(void); void (*realmode_init)(void); struct x86_hyper_runtime hyper; struct x86_guest guest; }; struct x86_pmu_quirk; struct x86_pmu { const char *name; int version; int (*handle_irq)(struct pt_regs *); void (*disable_all)(void); void (*enable_all)(int); void (*enable)(struct perf_event *); void (*disable)(struct perf_event *); void (*assign)(struct perf_event *, int); void (*add)(struct perf_event *); void (*del)(struct perf_event *); void (*read)(struct perf_event *); int (*set_period)(struct perf_event *); u64 (*update)(struct perf_event *); int (*hw_config)(struct perf_event *); int (*schedule_events)(struct cpu_hw_events *, int, int *); unsigned int eventsel; unsigned int perfctr; unsigned int fixedctr; int (*addr_offset)(int, bool); int (*rdpmc_index)(int); u64 (*event_map)(int); int max_events; u64 config_mask; union { u64 cntr_mask64; long unsigned int cntr_mask[1]; }; union { u64 fixed_cntr_mask64; long unsigned int fixed_cntr_mask[1]; }; int cntval_bits; u64 cntval_mask; union { long unsigned int events_maskl; long unsigned int events_mask[1]; }; int events_mask_len; int apic; u64 max_period; struct event_constraint * (*get_event_constraints)(struct cpu_hw_events *, int, struct perf_event *); void (*put_event_constraints)(struct cpu_hw_events *, struct perf_event *); void (*start_scheduling)(struct cpu_hw_events *); void (*commit_scheduling)(struct cpu_hw_events *, int, int); void (*stop_scheduling)(struct cpu_hw_events *); struct event_constraint *event_constraints; struct x86_pmu_quirk *quirks; void (*limit_period)(struct perf_event *, s64 *); unsigned int late_ack: 1; unsigned int mid_ack: 1; unsigned int enabled_ack: 1; int attr_rdpmc_broken; int attr_rdpmc; struct attribute **format_attrs; ssize_t (*events_sysfs_show)(char *, u64); const struct attribute_group **attr_update; long unsigned int attr_freeze_on_smi; int (*cpu_prepare)(int); void (*cpu_starting)(int); void (*cpu_dying)(int); void (*cpu_dead)(int); void (*check_microcode)(void); void (*sched_task)(struct perf_event_pmu_context *, bool); u64 intel_ctrl; union perf_capabilities intel_cap; unsigned int bts: 1; unsigned int bts_active: 1; unsigned int pebs: 1; unsigned int pebs_active: 1; unsigned int pebs_broken: 1; unsigned int pebs_prec_dist: 1; unsigned int pebs_no_tlb: 1; unsigned int pebs_no_isolation: 1; unsigned int pebs_block: 1; unsigned int pebs_ept: 1; int pebs_record_size; int pebs_buffer_size; u64 pebs_events_mask; void (*drain_pebs)(struct pt_regs *, struct perf_sample_data *); struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *); u64 (*pebs_latency_data)(struct perf_event *, u64); long unsigned int large_pebs_flags; u64 rtm_abort_event; u64 pebs_capable; unsigned int lbr_tos; unsigned int lbr_from; unsigned int lbr_to; unsigned int lbr_info; unsigned int lbr_nr; union { u64 lbr_sel_mask; u64 lbr_ctl_mask; }; union { const int *lbr_sel_map; int *lbr_ctl_map; }; bool lbr_double_abort; bool lbr_pt_coexist; unsigned int lbr_has_info: 1; unsigned int lbr_has_tsx: 1; unsigned int lbr_from_flags: 1; unsigned int lbr_to_cycles: 1; unsigned int lbr_depth_mask: 8; unsigned int lbr_deep_c_reset: 1; unsigned int lbr_lip: 1; unsigned int lbr_cpl: 1; unsigned int lbr_filter: 1; unsigned int lbr_call_stack: 1; unsigned int lbr_mispred: 1; unsigned int lbr_timed_lbr: 1; unsigned int lbr_br_type: 1; unsigned int lbr_counters: 4; void (*lbr_reset)(void); void (*lbr_read)(struct cpu_hw_events *); void (*lbr_save)(void *); void (*lbr_restore)(void *); atomic_t lbr_exclusive[3]; int num_topdown_events; void (*swap_task_ctx)(struct perf_event_pmu_context *, struct perf_event_pmu_context *); unsigned int amd_nb_constraints: 1; u64 perf_ctr_pair_en; struct extra_reg *extra_regs; unsigned int flags; struct perf_guest_switch_msr * (*guest_get_msrs)(int *, void *); int (*check_period)(struct perf_event *, u64); int (*aux_output_match)(struct perf_event *); void (*filter)(struct pmu *, int, bool *); int num_hybrid_pmus; struct x86_hybrid_pmu *hybrid_pmu; enum hybrid_cpu_type (*get_hybrid_cpu_type)(void); }; struct x86_pmu_capability { int version; int num_counters_gp; int num_counters_fixed; int bit_width_gp; int bit_width_fixed; unsigned int events_mask; int events_mask_len; unsigned int pebs_ept: 1; }; union x86_pmu_config { struct { u64 event: 8; u64 umask: 8; u64 usr: 1; u64 os: 1; u64 edge: 1; u64 pc: 1; u64 interrupt: 1; u64 __reserved1: 1; u64 en: 1; u64 inv: 1; u64 cmask: 8; u64 event2: 4; u64 __reserved2: 4; u64 go: 1; u64 ho: 1; } bits; u64 value; }; struct x86_pmu_lbr { unsigned int nr; unsigned int from; unsigned int to; unsigned int info; bool has_callstack; }; struct x86_pmu_quirk { struct x86_pmu_quirk *next; void (*func)(void); }; struct x86_topology_system { unsigned int dom_shifts[7]; unsigned int dom_size[7]; }; struct x86_xfeat_component { __u32 type; __u32 size; __u32 offset; __u32 flags; }; struct xa_limit { u32 max; u32 min; }; struct xa_node { unsigned char shift; unsigned char offset; unsigned char count; unsigned char nr_values; struct xa_node *parent; struct xarray *array; union { struct list_head private_list; struct callback_head callback_head; }; void *slots[64]; union { long unsigned int tags[3]; long unsigned int marks[3]; }; }; typedef void (*xa_update_node_t)(struct xa_node *); struct xa_state { struct xarray *xa; long unsigned int xa_index; unsigned char xa_shift; unsigned char xa_sibs; unsigned char xa_offset; unsigned char xa_pad; struct xa_node *xa_node; struct xa_node *xa_alloc; xa_update_node_t xa_update; struct list_lru *xa_lru; }; struct xattr { const char *name; void *value; size_t value_len; }; struct xattr_handler { const char *name; const char *prefix; int flags; bool (*list)(struct dentry *); int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); int (*set)(const struct xattr_handler *, struct mnt_idmap *, struct dentry *, struct inode *, const char *, const void *, size_t, int); }; struct xattr_name { char name[256]; }; struct xb1s_ff_report { __u8 report_id; __u8 enable; __u8 magnitude[4]; __u8 duration_10ms; __u8 start_delay_10ms; __u8 loop_count; }; struct xbc_node { uint16_t next; uint16_t child; uint16_t parent; uint16_t data; }; struct xdp_buff_xsk { struct xdp_buff xdp; u8 cb[24]; dma_addr_t dma; dma_addr_t frame_dma; struct xsk_buff_pool *pool; u64 orig_addr; struct list_head free_list_node; struct list_head xskb_list_node; }; struct xdp_bulk_queue { void *q[8]; struct list_head flush_node; struct bpf_cpu_map_entry *obj; unsigned int count; }; struct xdp_cpumap_stats { unsigned int redirect; unsigned int pass; unsigned int drop; }; struct xdp_desc { __u64 addr; __u32 len; __u32 options; }; struct xdp_dev_bulk_queue { struct xdp_frame *q[16]; struct list_head flush_node; struct net_device *dev; struct net_device *dev_rx; struct bpf_prog *xdp_prog; unsigned int count; }; struct xdp_diag_info { __u32 ifindex; __u32 queue_id; }; struct xdp_diag_msg { __u8 xdiag_family; __u8 xdiag_type; __u16 pad; __u32 xdiag_ino; __u32 xdiag_cookie[2]; }; struct xdp_diag_req { __u8 sdiag_family; __u8 sdiag_protocol; __u16 pad; __u32 xdiag_ino; __u32 xdiag_show; __u32 xdiag_cookie[2]; }; struct xdp_diag_ring { __u32 entries; }; struct xdp_diag_stats { __u64 n_rx_dropped; __u64 n_rx_invalid; __u64 n_rx_full; __u64 n_fill_ring_empty; __u64 n_tx_invalid; __u64 n_tx_ring_empty; }; struct xdp_diag_umem { __u64 size; __u32 id; __u32 num_pages; __u32 chunk_size; __u32 headroom; __u32 ifindex; __u32 queue_id; __u32 flags; __u32 refs; }; struct xdp_frame { void *data; u16 len; u16 headroom; u32 metasize; struct xdp_mem_info mem; struct net_device *dev_rx; u32 frame_sz; u32 flags; }; struct xdp_frame_bulk { int count; void *xa; void *q[16]; }; struct xdp_mem_allocator { struct xdp_mem_info mem; union { void *allocator; struct page_pool *page_pool; }; struct rhash_head node; struct callback_head rcu; }; struct xdp_metadata_ops { int (*xmo_rx_timestamp)(const struct xdp_md *, u64 *); int (*xmo_rx_hash)(const struct xdp_md *, u32 *, enum xdp_rss_hash_type *); int (*xmo_rx_vlan_tag)(const struct xdp_md *, __be16 *, u16 *); }; struct xdp_ring_offset { __u64 producer; __u64 consumer; __u64 desc; __u64 flags; }; struct xdp_mmap_offsets { struct xdp_ring_offset rx; struct xdp_ring_offset tx; struct xdp_ring_offset fr; struct xdp_ring_offset cr; }; struct xdp_ring_offset_v1 { __u64 producer; __u64 consumer; __u64 desc; }; struct xdp_mmap_offsets_v1 { struct xdp_ring_offset_v1 rx; struct xdp_ring_offset_v1 tx; struct xdp_ring_offset_v1 fr; struct xdp_ring_offset_v1 cr; }; struct xdp_options { __u32 flags; }; struct xdp_page_head { struct xdp_buff orig_ctx; struct xdp_buff ctx; union { struct { struct {} __empty_frame; struct xdp_frame frame[0]; }; struct { struct {} __empty_data; u8 data[0]; }; }; }; struct xdp_ring { u32 producer; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 pad1; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 consumer; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 pad2; u32 flags; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; u32 pad3; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct xdp_rxtx_ring { struct xdp_ring ptrs; struct xdp_desc desc[0]; }; struct xsk_queue; struct xdp_umem; struct xdp_sock { struct sock sk; struct xsk_queue *rx; struct net_device *dev; struct xdp_umem *umem; struct list_head flush_node; struct xsk_buff_pool *pool; u16 queue_id; bool zc; bool sg; enum { XSK_READY = 0, XSK_BOUND = 1, XSK_UNBOUND = 2, } state; long: 64; struct xsk_queue *tx; struct list_head tx_list; u32 tx_budget_spent; spinlock_t rx_lock; u64 rx_dropped; u64 rx_queue_full; struct sk_buff *skb; struct list_head map_list; spinlock_t map_list_lock; struct mutex mutex; struct xsk_queue *fq_tmp; struct xsk_queue *cq_tmp; long: 64; long: 64; long: 64; }; struct xdp_statistics { __u64 rx_dropped; __u64 rx_invalid_descs; __u64 tx_invalid_descs; __u64 rx_ring_full; __u64 rx_fill_ring_empty_descs; __u64 tx_ring_empty_descs; }; struct xdp_test_data { struct xdp_buff *orig_ctx; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; struct xdp_rxq_info rxq; struct net_device *dev; struct page_pool *pp; struct xdp_frame **frames; struct sk_buff **skbs; struct xdp_mem_info mem; u32 batch_size; u32 frame_cnt; long: 64; long: 64; }; struct xdp_txq_info { struct net_device *dev; }; struct xdp_umem { void *addrs; u64 size; u32 headroom; u32 chunk_size; u32 chunks; u32 npgs; struct user_struct *user; refcount_t users; u8 flags; u8 tx_metadata_len; bool zc; struct page **pgs; int id; struct list_head xsk_dma_list; struct work_struct work; }; struct xdp_umem_reg { __u64 addr; __u64 len; __u32 chunk_size; __u32 headroom; __u32 flags; __u32 tx_metadata_len; }; struct xdp_umem_ring { struct xdp_ring ptrs; u64 desc[0]; }; struct xfrm4_protocol { int (*handler)(struct sk_buff *); int (*input_handler)(struct sk_buff *, int, __be32, int); int (*cb_handler)(struct sk_buff *, int); int (*err_handler)(struct sk_buff *, u32); struct xfrm4_protocol *next; int priority; }; struct xfrm6_protocol { int (*handler)(struct sk_buff *); int (*input_handler)(struct sk_buff *, int, __be32, int); int (*cb_handler)(struct sk_buff *, int); int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); struct xfrm6_protocol *next; int priority; }; struct xfrm6_tunnel { int (*handler)(struct sk_buff *); int (*cb_handler)(struct sk_buff *, int); int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); struct xfrm6_tunnel *next; int priority; }; struct xfrm_address_filter { xfrm_address_t saddr; xfrm_address_t daddr; __u16 family; __u8 splen; __u8 dplen; }; struct xfrm_aead_name { const char *name; int icvbits; }; struct xfrm_usersa_id { xfrm_address_t daddr; __be32 spi; __u16 family; __u8 proto; }; struct xfrm_aevent_id { struct xfrm_usersa_id sa_id; xfrm_address_t saddr; __u32 flags; __u32 reqid; }; struct xfrm_algo { char alg_name[64]; unsigned int alg_key_len; char alg_key[0]; }; struct xfrm_algo_aead { char alg_name[64]; unsigned int alg_key_len; unsigned int alg_icv_len; char alg_key[0]; }; struct xfrm_algo_aead_info { char *geniv; u16 icv_truncbits; }; struct xfrm_algo_auth { char alg_name[64]; unsigned int alg_key_len; unsigned int alg_trunc_len; char alg_key[0]; }; struct xfrm_algo_auth_info { u16 icv_truncbits; u16 icv_fullbits; }; struct xfrm_algo_comp_info { u16 threshold; }; struct xfrm_algo_encr_info { char *geniv; u16 blockbits; u16 defkeybits; }; struct xfrm_algo_desc { char *name; char *compat; u8 available: 1; u8 pfkey_supported: 1; union { struct xfrm_algo_aead_info aead; struct xfrm_algo_auth_info auth; struct xfrm_algo_encr_info encr; struct xfrm_algo_comp_info comp; } uinfo; struct sadb_alg desc; }; struct xfrm_algo_list { int (*find)(const char *, u32, u32); struct xfrm_algo_desc *algs; int entries; }; struct xfrm_dev_offload { struct net_device *dev; netdevice_tracker dev_tracker; struct net_device *real_dev; long unsigned int offload_handle; u8 dir: 2; u8 type: 2; u8 flags: 2; }; struct xfrm_dst { union { struct dst_entry dst; struct rtable rt; struct rt6_info rt6; } u; struct dst_entry *route; struct dst_entry *child; struct dst_entry *path; struct xfrm_policy *pols[2]; int num_pols; int num_xfrms; u32 xfrm_genid; u32 policy_genid; u32 route_mtu_cached; u32 child_mtu_cached; u32 route_cookie; u32 path_cookie; }; struct xfrm_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; u32 nlmsg_seq; u16 nlmsg_flags; }; struct xfrm_encap_tmpl { __u16 encap_type; __be16 encap_sport; __be16 encap_dport; xfrm_address_t encap_oa; }; struct xfrm_flo { struct dst_entry *dst_orig; u8 flags; }; struct xfrm_flow_keys { struct flow_dissector_key_basic basic; struct flow_dissector_key_control control; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; } addrs; struct flow_dissector_key_ip ip; struct flow_dissector_key_icmp icmp; struct flow_dissector_key_ports ports; struct flow_dissector_key_keyid gre; }; struct xfrm_id { xfrm_address_t daddr; __be32 spi; __u8 proto; }; struct xfrm_if_parms { int link; u32 if_id; bool collect_md; }; struct xfrm_if { struct xfrm_if *next; struct net_device *dev; struct net *net; struct xfrm_if_parms p; struct gro_cells gro_cells; }; struct xfrm_if_decode_session_result; struct xfrm_if_cb { bool (*decode_session)(struct sk_buff *, short unsigned int, struct xfrm_if_decode_session_result *); }; struct xfrm_if_decode_session_result { struct net *net; u32 if_id; }; struct xfrm_input_afinfo { u8 family; bool is_ipip; int (*callback)(struct sk_buff *, u8, int); }; struct xfrm_kmaddress { xfrm_address_t local; xfrm_address_t remote; u32 reserved; u16 family; }; struct xfrm_lifetime_cfg { __u64 soft_byte_limit; __u64 hard_byte_limit; __u64 soft_packet_limit; __u64 hard_packet_limit; __u64 soft_add_expires_seconds; __u64 hard_add_expires_seconds; __u64 soft_use_expires_seconds; __u64 hard_use_expires_seconds; }; struct xfrm_lifetime_cur { __u64 bytes; __u64 packets; __u64 add_time; __u64 use_time; }; struct xfrm_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **, struct netlink_ext_ack *); int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); const struct nla_policy *nla_pol; int nla_max; }; struct xfrm_mark { __u32 v; __u32 m; }; struct xfrm_tmpl; struct xfrm_selector; struct xfrm_migrate; struct xfrm_mgr { struct list_head list; int (*notify)(struct xfrm_state *, const struct km_event *); int (*acquire)(struct xfrm_state *, struct xfrm_tmpl *, struct xfrm_policy *); struct xfrm_policy * (*compile_policy)(struct sock *, int, u8 *, int, int *); int (*new_mapping)(struct xfrm_state *, xfrm_address_t *, __be16); int (*notify_policy)(struct xfrm_policy *, int, const struct km_event *); int (*report)(struct net *, u8, struct xfrm_selector *, xfrm_address_t *); int (*migrate)(const struct xfrm_selector *, u8, u8, const struct xfrm_migrate *, int, const struct xfrm_kmaddress *, const struct xfrm_encap_tmpl *); bool (*is_alive)(const struct km_event *); }; struct xfrm_migrate { xfrm_address_t old_daddr; xfrm_address_t old_saddr; xfrm_address_t new_daddr; xfrm_address_t new_saddr; u8 proto; u8 mode; u16 reserved; u32 reqid; u16 old_family; u16 new_family; }; struct xfrm_mode { u8 encap; u8 family; u8 flags; }; struct xfrm_mode_skb_cb { struct xfrm_tunnel_skb_cb header; __be16 id; __be16 frag_off; u8 ihl; u8 tos; u8 ttl; u8 protocol; u8 optlen; u8 flow_lbl[3]; }; struct xfrm_pol_inexact_key { possible_net_t net; u32 if_id; u16 family; u8 dir; u8 type; }; struct xfrm_pol_inexact_bin { struct xfrm_pol_inexact_key k; struct rhash_head head; struct hlist_head hhead; seqcount_spinlock_t count; struct rb_root root_d; struct rb_root root_s; struct list_head inexact_bins; struct callback_head rcu; }; struct xfrm_pol_inexact_candidates { struct hlist_head *res[4]; }; struct xfrm_pol_inexact_node { struct rb_node node; union { xfrm_address_t addr; struct callback_head rcu; }; u8 prefixlen; struct rb_root root; struct hlist_head hhead; }; struct xfrm_selector { xfrm_address_t daddr; xfrm_address_t saddr; __be16 dport; __be16 dport_mask; __be16 sport; __be16 sport_mask; __u16 family; __u8 prefixlen_d; __u8 prefixlen_s; __u8 proto; int ifindex; __kernel_uid32_t user; }; struct xfrm_policy_walk_entry { struct list_head all; u8 dead; }; struct xfrm_policy_queue { struct sk_buff_head hold_queue; struct timer_list hold_timer; long unsigned int timeout; }; struct xfrm_tmpl { struct xfrm_id id; xfrm_address_t saddr; short unsigned int encap_family; u32 reqid; u8 mode; u8 share; u8 optional; u8 allalgs; u32 aalgos; u32 ealgos; u32 calgos; }; struct xfrm_sec_ctx; struct xfrm_policy { possible_net_t xp_net; struct hlist_node bydst; struct hlist_node byidx; rwlock_t lock; refcount_t refcnt; u32 pos; struct timer_list timer; atomic_t genid; u32 priority; u32 index; u32 if_id; struct xfrm_mark mark; struct xfrm_selector selector; struct xfrm_lifetime_cfg lft; struct xfrm_lifetime_cur curlft; struct xfrm_policy_walk_entry walk; struct xfrm_policy_queue polq; bool bydst_reinsert; u8 type; u8 action; u8 flags; u8 xfrm_nr; u16 family; struct xfrm_sec_ctx *security; struct xfrm_tmpl xfrm_vec[6]; struct callback_head rcu; struct xfrm_dev_offload xdo; }; struct xfrm_policy_afinfo { struct dst_ops *dst_ops; struct dst_entry * (*dst_lookup)(struct net *, int, int, const xfrm_address_t *, const xfrm_address_t *, u32); int (*get_saddr)(struct net *, int, xfrm_address_t *, xfrm_address_t *, u32); int (*fill_dst)(struct xfrm_dst *, struct net_device *, const struct flowi *); struct dst_entry * (*blackhole_route)(struct net *, struct dst_entry *); }; struct xfrm_policy_walk { struct xfrm_policy_walk_entry walk; u8 type; u32 seq; }; struct xfrm_replay_state { __u32 oseq; __u32 seq; __u32 bitmap; }; struct xfrm_replay_state_esn { unsigned int bmp_len; __u32 oseq; __u32 seq; __u32 oseq_hi; __u32 seq_hi; __u32 replay_window; __u32 bmp[0]; }; struct xfrm_sec_ctx { __u8 ctx_doi; __u8 ctx_alg; __u16 ctx_len; __u32 ctx_sid; char ctx_str[0]; }; struct xfrm_spi_skb_cb { struct xfrm_tunnel_skb_cb header; unsigned int daddroff; unsigned int family; __be32 seq; }; struct xfrm_state_walk { struct list_head all; u8 state; u8 dying; u8 proto; u32 seq; struct xfrm_address_filter *filter; }; struct xfrm_stats { __u32 replay_window; __u32 replay; __u32 integrity_failed; }; struct xfrm_type; struct xfrm_type_offload; struct xfrm_state { possible_net_t xs_net; union { struct hlist_node gclist; struct hlist_node bydst; }; union { struct hlist_node dev_gclist; struct hlist_node bysrc; }; struct hlist_node byspi; struct hlist_node byseq; refcount_t refcnt; spinlock_t lock; struct xfrm_id id; struct xfrm_selector sel; struct xfrm_mark mark; u32 if_id; u32 tfcpad; u32 genid; struct xfrm_state_walk km; struct { u32 reqid; u8 mode; u8 replay_window; u8 aalgo; u8 ealgo; u8 calgo; u8 flags; u16 family; xfrm_address_t saddr; int header_len; int trailer_len; u32 extra_flags; struct xfrm_mark smark; } props; struct xfrm_lifetime_cfg lft; struct xfrm_algo_auth *aalg; struct xfrm_algo *ealg; struct xfrm_algo *calg; struct xfrm_algo_aead *aead; const char *geniv; __be16 new_mapping_sport; u32 new_mapping; u32 mapping_maxage; struct xfrm_encap_tmpl *encap; struct sock *encap_sk; u32 nat_keepalive_interval; time64_t nat_keepalive_expiration; xfrm_address_t *coaddr; struct xfrm_state *tunnel; atomic_t tunnel_users; struct xfrm_replay_state replay; struct xfrm_replay_state_esn *replay_esn; struct xfrm_replay_state preplay; struct xfrm_replay_state_esn *preplay_esn; enum xfrm_replay_mode repl_mode; u32 xflags; u32 replay_maxage; u32 replay_maxdiff; struct timer_list rtimer; struct xfrm_stats stats; struct xfrm_lifetime_cur curlft; struct hrtimer mtimer; struct xfrm_dev_offload xso; long int saved_tmo; time64_t lastused; struct page_frag xfrag; const struct xfrm_type *type; struct xfrm_mode inner_mode; struct xfrm_mode inner_mode_iaf; struct xfrm_mode outer_mode; const struct xfrm_type_offload *type_offload; struct xfrm_sec_ctx *security; void *data; u8 dir; }; struct xfrm_state_afinfo { u8 family; u8 proto; const struct xfrm_type_offload *type_offload_esp; const struct xfrm_type *type_esp; const struct xfrm_type *type_ipip; const struct xfrm_type *type_ipip6; const struct xfrm_type *type_comp; const struct xfrm_type *type_ah; const struct xfrm_type *type_routing; const struct xfrm_type *type_dstopts; int (*output)(struct net *, struct sock *, struct sk_buff *); int (*transport_finish)(struct sk_buff *, int); void (*local_error)(struct sk_buff *, u32); }; struct xfrm_trans_cb { union { struct inet_skb_parm h4; struct inet6_skb_parm h6; } header; int (*finish)(struct net *, struct sock *, struct sk_buff *); struct net *net; }; struct xfrm_trans_tasklet { struct work_struct work; spinlock_t queue_lock; struct sk_buff_head queue; }; struct xfrm_translator { int (*alloc_compat)(struct sk_buff *, const struct nlmsghdr *); struct nlmsghdr * (*rcv_msg_compat)(const struct nlmsghdr *, int, const struct nla_policy *, struct netlink_ext_ack *); int (*xlate_user_policy_sockptr)(u8 **, int); struct module *owner; }; struct xfrm_tunnel { int (*handler)(struct sk_buff *); int (*cb_handler)(struct sk_buff *, int); int (*err_handler)(struct sk_buff *, u32); struct xfrm_tunnel *next; int priority; }; struct xfrm_type { struct module *owner; u8 proto; u8 flags; int (*init_state)(struct xfrm_state *, struct netlink_ext_ack *); void (*destructor)(struct xfrm_state *); int (*input)(struct xfrm_state *, struct sk_buff *); int (*output)(struct xfrm_state *, struct sk_buff *); int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); }; struct xfrm_type_offload { struct module *owner; u8 proto; void (*encap)(struct xfrm_state *, struct sk_buff *); int (*input_tail)(struct xfrm_state *, struct sk_buff *); int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t); }; struct xfrm_userpolicy_info { struct xfrm_selector sel; struct xfrm_lifetime_cfg lft; struct xfrm_lifetime_cur curlft; __u32 priority; __u32 index; __u8 dir; __u8 action; __u8 flags; __u8 share; }; struct xfrm_user_acquire { struct xfrm_id id; xfrm_address_t saddr; struct xfrm_selector sel; struct xfrm_userpolicy_info policy; __u32 aalgos; __u32 ealgos; __u32 calgos; __u32 seq; }; struct xfrm_usersa_info { struct xfrm_selector sel; struct xfrm_id id; xfrm_address_t saddr; struct xfrm_lifetime_cfg lft; struct xfrm_lifetime_cur curlft; struct xfrm_stats stats; __u32 seq; __u32 reqid; __u16 family; __u8 mode; __u8 replay_window; __u8 flags; }; struct xfrm_user_expire { struct xfrm_usersa_info state; __u8 hard; }; struct xfrm_user_mapping { struct xfrm_usersa_id id; __u32 reqid; xfrm_address_t old_saddr; xfrm_address_t new_saddr; __be16 old_sport; __be16 new_sport; }; struct xfrm_user_offload { int ifindex; __u8 flags; }; struct xfrm_user_polexpire { struct xfrm_userpolicy_info pol; __u8 hard; }; struct xfrm_user_report { __u8 proto; struct xfrm_selector sel; }; struct xfrm_user_sec_ctx { __u16 len; __u16 exttype; __u8 ctx_alg; __u8 ctx_doi; __u16 ctx_len; }; struct xfrm_user_tmpl { struct xfrm_id id; __u16 family; xfrm_address_t saddr; __u32 reqid; __u8 mode; __u8 share; __u8 optional; __u32 aalgos; __u32 ealgos; __u32 calgos; }; struct xfrm_userpolicy_default { __u8 in; __u8 fwd; __u8 out; }; struct xfrm_userpolicy_id { struct xfrm_selector sel; __u32 index; __u8 dir; }; struct xfrm_userpolicy_type { __u8 type; __u16 reserved1; __u8 reserved2; }; struct xfrm_usersa_flush { __u8 proto; }; struct xfrm_userspi_info { struct xfrm_usersa_info info; __u32 min; __u32 max; }; struct xfrmi_net { struct xfrm_if *xfrmi[256]; struct xfrm_if *collect_md_xfrmi; }; struct xfrmk_sadinfo { u32 sadhcnt; u32 sadhmcnt; u32 sadcnt; }; struct xfrmk_spdinfo { u32 incnt; u32 outcnt; u32 fwdcnt; u32 inscnt; u32 outscnt; u32 fwdscnt; u32 spdhcnt; u32 spdhmcnt; }; struct xfrmu_sadhinfo { __u32 sadhcnt; __u32 sadhmcnt; }; struct xfrmu_spdhinfo { __u32 spdhcnt; __u32 spdhmcnt; }; struct xfrmu_spdhthresh { __u8 lbits; __u8 rbits; }; struct xfrmu_spdinfo { __u32 incnt; __u32 outcnt; __u32 fwdcnt; __u32 inscnt; __u32 outscnt; __u32 fwdscnt; }; struct xol_area { wait_queue_head_t wq; atomic_t slot_count; long unsigned int *bitmap; struct page *page; long unsigned int vaddr; }; struct xps_map; struct xps_dev_maps { struct callback_head rcu; unsigned int nr_ids; s16 num_tc; struct xps_map *attr_map[0]; }; struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0]; }; struct xsk_buff_pool { struct device *dev; struct net_device *netdev; struct list_head xsk_tx_list; spinlock_t xsk_tx_list_lock; refcount_t users; struct xdp_umem *umem; struct work_struct work; struct list_head free_list; struct list_head xskb_list; u32 heads_cnt; u16 queue_id; long: 64; long: 64; long: 64; long: 64; struct xsk_queue *fq; struct xsk_queue *cq; dma_addr_t *dma_pages; struct xdp_buff_xsk *heads; struct xdp_desc *tx_descs; u64 chunk_mask; u64 addrs_cnt; u32 free_list_cnt; u32 dma_pages_cnt; u32 free_heads_cnt; u32 headroom; u32 chunk_size; u32 chunk_shift; u32 frame_len; u8 tx_metadata_len; u8 cached_need_wakeup; bool uses_need_wakeup; bool unaligned; bool tx_sw_csum; void *addrs; spinlock_t cq_lock; struct xdp_buff_xsk *free_heads[0]; long: 64; long: 64; long: 64; }; struct xsk_cb_desc { void *src; u8 off; u8 bytes; }; struct xsk_dma_map { dma_addr_t *dma_pages; struct device *dev; struct net_device *netdev; refcount_t users; struct list_head list; u32 dma_pages_cnt; }; struct xsk_map { struct bpf_map map; spinlock_t lock; atomic_t count; struct xdp_sock *xsk_map[0]; }; struct xsk_map_node { struct list_head node; struct xsk_map *map; struct xdp_sock **map_entry; }; struct xsk_queue { u32 ring_mask; u32 nentries; u32 cached_prod; u32 cached_cons; struct xdp_ring *ring; u64 invalid_descs; u64 queue_empty_descs; size_t ring_vmalloc_size; }; struct xsk_tx_metadata { __u64 flags; union { struct { __u16 csum_start; __u16 csum_offset; } request; struct { __u64 tx_timestamp; } completion; }; }; struct xsk_tx_metadata_ops { void (*tmo_request_timestamp)(void *); u64 (*tmo_fill_timestamp)(void *); void (*tmo_request_checksum)(u16, u16, void *); }; struct xt_match; struct xt_action_param { union { const struct xt_match *match; const struct xt_target *target; }; union { const void *matchinfo; const void *targinfo; }; const struct nf_hook_state *state; unsigned int thoff; u16 fragoff; bool hotdrop; }; struct xt_af { struct mutex mutex; struct list_head match; struct list_head target; }; struct xt_bpf_info { __u16 bpf_program_num_elem; struct sock_filter bpf_program[64]; struct bpf_prog *filter; }; struct xt_bpf_info_v1 { __u16 mode; __u16 bpf_program_num_elem; __s32 fd; union { struct sock_filter bpf_program[64]; char path[512]; }; struct bpf_prog *filter; }; struct xt_connmark_mtinfo1 { __u32 mark; __u32 mask; __u8 invert; }; struct xt_connmark_tginfo1 { __u32 ctmark; __u32 ctmask; __u32 nfmask; __u8 mode; }; struct xt_connmark_tginfo2 { __u32 ctmark; __u32 ctmask; __u32 nfmask; __u8 shift_dir; __u8 shift_bits; __u8 mode; }; struct xt_counters_info { char name[32]; unsigned int num_counters; struct xt_counters counters[0]; }; struct xt_ct_target_info { __u16 flags; __u16 zone; __u32 ct_events; __u32 exp_events; char helper[16]; struct nf_conn *ct; }; struct xt_ct_target_info_v1 { __u16 flags; __u16 zone; __u32 ct_events; __u32 exp_events; char helper[16]; char timeout[32]; struct nf_conn *ct; }; struct xt_entry_match { union { struct { __u16 match_size; char name[29]; __u8 revision; } user; struct { __u16 match_size; struct xt_match *match; } kernel; __u16 match_size; } u; unsigned char data[0]; }; struct xt_get_revision { char name[29]; __u8 revision; }; struct xt_mtchk_param; struct xt_mtdtor_param; struct xt_match { struct list_head list; const char name[29]; u_int8_t revision; bool (*match)(const struct sk_buff *, struct xt_action_param *); int (*checkentry)(const struct xt_mtchk_param *); void (*destroy)(const struct xt_mtdtor_param *); struct module *me; const char *table; unsigned int matchsize; unsigned int usersize; unsigned int hooks; short unsigned int proto; short unsigned int family; }; struct xt_mtchk_param { struct net *net; const char *table; const void *entryinfo; const struct xt_match *match; void *matchinfo; unsigned int hook_mask; u_int8_t family; bool nft_compat; }; struct xt_mtdtor_param { struct net *net; const struct xt_match *match; void *matchinfo; u_int8_t family; }; struct xt_percpu_counter_alloc_state { unsigned int off; const char *mem; }; struct xt_pernet { struct list_head tables[11]; }; struct xt_state_info { unsigned int statemask; }; struct xt_statistic_priv; struct xt_statistic_info { __u16 mode; __u16 flags; union { struct { __u32 probability; } random; struct { __u32 every; __u32 packet; __u32 count; } nth; } u; struct xt_statistic_priv *master; }; struct xt_statistic_priv { atomic_t count; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; long: 64; }; struct xt_table_info; struct xt_table { struct list_head list; unsigned int valid_hooks; struct xt_table_info *private; struct nf_hook_ops *ops; struct module *me; u_int8_t af; int priority; const char name[32]; }; struct xt_table_info { unsigned int size; unsigned int number; unsigned int initial_entries; unsigned int hook_entry[5]; unsigned int underflow[5]; unsigned int stacksize; void ***jumpstack; unsigned char entries[0]; }; struct xt_tgchk_param; struct xt_tgdtor_param; struct xt_target { struct list_head list; const char name[29]; u_int8_t revision; unsigned int (*target)(struct sk_buff *, const struct xt_action_param *); int (*checkentry)(const struct xt_tgchk_param *); void (*destroy)(const struct xt_tgdtor_param *); struct module *me; const char *table; unsigned int targetsize; unsigned int usersize; unsigned int hooks; short unsigned int proto; short unsigned int family; }; struct xt_tcp { __u16 spts[2]; __u16 dpts[2]; __u8 option; __u8 flg_mask; __u8 flg_cmp; __u8 invflags; }; struct xt_template { struct list_head list; int (*table_init)(struct net *); struct module *me; char name[32]; }; struct xt_tgchk_param { struct net *net; const char *table; const void *entryinfo; const struct xt_target *target; void *targinfo; unsigned int hook_mask; u_int8_t family; bool nft_compat; }; struct xt_tgdtor_param { struct net *net; const struct xt_target *target; void *targinfo; u_int8_t family; }; struct xt_udp { __u16 spts[2]; __u16 dpts[2]; __u8 invflags; }; struct xxh32_state { uint32_t total_len_32; uint32_t large_len; uint32_t v1; uint32_t v2; uint32_t v3; uint32_t v4; uint32_t mem32[4]; uint32_t memsize; }; struct xxhash64_desc_ctx { struct xxh64_state xxhstate; }; struct xxhash64_tfm_ctx { u64 seed; }; struct xz_buf { const uint8_t *in; size_t in_pos; size_t in_size; uint8_t *out; size_t out_pos; size_t out_size; }; struct xz_dec_hash { vli_type unpadded; vli_type uncompressed; uint32_t crc32; }; struct xz_dec_lzma2; struct xz_dec_bcj; struct xz_dec { enum { SEQ_STREAM_HEADER = 0, SEQ_BLOCK_START = 1, SEQ_BLOCK_HEADER = 2, SEQ_BLOCK_UNCOMPRESS = 3, SEQ_BLOCK_PADDING = 4, SEQ_BLOCK_CHECK = 5, SEQ_INDEX = 6, SEQ_INDEX_PADDING = 7, SEQ_INDEX_CRC32 = 8, SEQ_STREAM_FOOTER = 9, } sequence; uint32_t pos; vli_type vli; size_t in_start; size_t out_start; uint32_t crc32; enum xz_check check_type; enum xz_mode mode; bool allow_buf_error; struct { vli_type compressed; vli_type uncompressed; uint32_t size; } block_header; struct { vli_type compressed; vli_type uncompressed; vli_type count; struct xz_dec_hash hash; } block; struct { enum { SEQ_INDEX_COUNT = 0, SEQ_INDEX_UNPADDED = 1, SEQ_INDEX_UNCOMPRESSED = 2, } sequence; vli_type size; vli_type count; struct xz_dec_hash hash; } index; struct { size_t pos; size_t size; uint8_t buf[1024]; } temp; struct xz_dec_lzma2 *lzma2; struct xz_dec_bcj *bcj; bool bcj_active; }; struct xz_dec_bcj { enum { BCJ_X86 = 4, BCJ_POWERPC = 5, BCJ_IA64 = 6, BCJ_ARM = 7, BCJ_ARMTHUMB = 8, BCJ_SPARC = 9, BCJ_ARM64 = 10, BCJ_RISCV = 11, } type; enum xz_ret ret; bool single_call; uint32_t pos; uint32_t x86_prev_mask; uint8_t *out; size_t out_pos; size_t out_size; struct { size_t filtered; size_t size; uint8_t buf[16]; } temp; }; struct xz_dec_lzma2 { struct rc_dec rc; struct dictionary dict; struct lzma2_dec lzma2; struct lzma_dec lzma; struct { uint32_t size; uint8_t buf[63]; } temp; }; struct z_stream_s { const Byte *next_in; uLong avail_in; uLong total_in; Byte *next_out; uLong avail_out; uLong total_out; char *msg; struct internal_state *state; void *workspace; int data_type; uLong adler; uLong reserved; }; typedef struct z_stream_s z_stream; typedef z_stream *z_streamp; struct zap_details { struct folio *single_folio; bool even_cows; zap_flags_t zap_flags; }; union zen_patch_rev { struct { __u32 rev: 8; __u32 stepping: 4; __u32 model: 4; __u32 __reserved: 4; __u32 ext_model: 4; __u32 ext_fam: 8; }; __u32 ucode_rev; }; struct zpff_device { struct hid_report *report; }; typedef acpi_status (*acpi_exception_handler)(acpi_status, acpi_name, u16, u32, void *); typedef acpi_status (*acpi_execute_op)(struct acpi_walk_state *); typedef void (*acpi_gbl_event_handler)(u32, acpi_handle, u32, void *); typedef acpi_status (*acpi_gpe_callback)(struct acpi_gpe_xrupt_info *, struct acpi_gpe_block_info *, void *); typedef acpi_status (*acpi_init_handler)(acpi_handle, u32); typedef u32 (*acpi_interface_handler)(acpi_string, u32); typedef u32 (*acpi_osd_handler)(void *); typedef acpi_status (*acpi_pkg_callback)(u8, union acpi_operand_object *, union acpi_generic_state *, void *); typedef acpi_status (*acpi_table_handler)(u32, void *, void *); typedef acpi_status (*acpi_walk_aml_callback)(u8 *, u32, u32, u8, void **); typedef acpi_status (*acpi_walk_resource_callback)(struct acpi_resource *, void *); typedef void amd_pmu_branch_reset_t(void); typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); typedef void (*blake2b_compress_t)(struct blake2b_state *, const u8 *, size_t, u32); typedef void blk_log_action_t(struct trace_iterator *, const char *, bool); typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int); typedef unsigned int (*bpf_dispatcher_fn)(const void *, const struct bpf_insn *, unsigned int (*)(const void *, const struct bpf_insn *)); typedef unsigned int (*bpf_func_t)(const void *, const struct bpf_insn *); typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *, const void *); typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *, struct bpf_tramp_run_ctx *); typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *, u64, struct bpf_tramp_run_ctx *); typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int); typedef u64 (*btf_bpf_bprm_opts_set)(struct linux_binprm *, u64); typedef u64 (*btf_bpf_btf_find_by_name_kind)(char *, int, u32, int); typedef u64 (*btf_bpf_cgrp_storage_delete)(struct bpf_map *, struct cgroup *); typedef u64 (*btf_bpf_cgrp_storage_get)(struct bpf_map *, struct cgroup *, void *, u64, gfp_t); typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64); typedef u64 (*btf_bpf_copy_from_user)(void *, u32, const void *); typedef u64 (*btf_bpf_copy_from_user_task)(void *, u32, const void *, struct task_struct *, u64); typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum); typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64); typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum); typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32); typedef u64 (*btf_bpf_d_path)(struct path *, char *, u32); typedef u64 (*btf_bpf_dynptr_data)(const struct bpf_dynptr_kern *, u32, u32); typedef u64 (*btf_bpf_dynptr_from_mem)(void *, u32, u64, struct bpf_dynptr_kern *); typedef u64 (*btf_bpf_dynptr_read)(void *, u32, const struct bpf_dynptr_kern *, u32, u64); typedef u64 (*btf_bpf_dynptr_write)(const struct bpf_dynptr_kern *, u32, void *, u32, u64); typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_find_vma)(struct task_struct *, u64, bpf_callback_t, void *, u64); typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, void *, u32); typedef u64 (*btf_bpf_for_each_map_elem)(struct bpf_map *, void *, void *, u64); typedef u64 (*btf_bpf_get_attach_cookie)(void *); typedef u64 (*btf_bpf_get_attach_cookie_kprobe_multi)(struct pt_regs *); typedef u64 (*btf_bpf_get_attach_cookie_pe)(struct bpf_perf_event_data_kern *); typedef u64 (*btf_bpf_get_attach_cookie_trace)(void *); typedef u64 (*btf_bpf_get_attach_cookie_tracing)(void *); typedef u64 (*btf_bpf_get_attach_cookie_uprobe_multi)(struct pt_regs *); typedef u64 (*btf_bpf_get_branch_snapshot)(void *, u32, u64); typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *); typedef u64 (*btf_bpf_get_cgroup_classid_curr)(void); typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int); typedef u64 (*btf_bpf_get_current_cgroup_id)(void); typedef u64 (*btf_bpf_get_current_comm)(char *, u32); typedef u64 (*btf_bpf_get_current_pid_tgid)(void); typedef u64 (*btf_bpf_get_current_task)(void); typedef u64 (*btf_bpf_get_current_task_btf)(void); typedef u64 (*btf_bpf_get_current_uid_gid)(void); typedef u64 (*btf_bpf_get_func_ip_kprobe)(struct pt_regs *); typedef u64 (*btf_bpf_get_func_ip_kprobe_multi)(struct pt_regs *); typedef u64 (*btf_bpf_get_func_ip_tracing)(void *); typedef u64 (*btf_bpf_get_func_ip_uprobe_multi)(struct pt_regs *); typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *); typedef u64 (*btf_bpf_get_listener_sock)(struct sock *); typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64); typedef u64 (*btf_bpf_get_netns_cookie_sk_msg)(struct sk_msg *); typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *); typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *); typedef u64 (*btf_bpf_get_netns_cookie_sock_ops)(struct bpf_sock_ops_kern *); typedef u64 (*btf_bpf_get_netns_cookie_sockopt)(struct bpf_sockopt_kern *); typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32); typedef u64 (*btf_bpf_get_numa_node_id)(void); typedef u64 (*btf_bpf_get_raw_cpu_id)(void); typedef u64 (*btf_bpf_get_retval)(void); typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *); typedef u64 (*btf_bpf_get_smp_processor_id)(void); typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *); typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *); typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *); typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *); typedef u64 (*btf_bpf_get_socket_ptr_cookie)(struct sock *); typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *); typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64); typedef u64 (*btf_bpf_get_stack_pe)(struct bpf_perf_event_data_kern *, void *, u32, u64); typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64); typedef u64 (*btf_bpf_get_stack_sleepable)(struct pt_regs *, void *, u32, u64); typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64); typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64); typedef u64 (*btf_bpf_get_stackid_pe)(struct bpf_perf_event_data_kern *, struct bpf_map *, u64); typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64); typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64); typedef u64 (*btf_bpf_get_task_stack)(struct task_struct *, void *, u32, u64); typedef u64 (*btf_bpf_get_task_stack_sleepable)(struct task_struct *, void *, u32, u64); typedef u64 (*btf_bpf_ima_file_hash)(struct file *, void *, u32); typedef u64 (*btf_bpf_ima_inode_hash)(struct inode *, void *, u32); typedef u64 (*btf_bpf_inode_storage_delete)(struct bpf_map *, struct inode *); typedef u64 (*btf_bpf_inode_storage_get)(struct bpf_map *, struct inode *, void *, u64, gfp_t); typedef u64 (*btf_bpf_jiffies64)(void); typedef u64 (*btf_bpf_kallsyms_lookup_name)(const char *, int, int, u64 *); typedef u64 (*btf_bpf_kptr_xchg)(void *, void *); typedef u64 (*btf_bpf_ktime_get_boot_ns)(void); typedef u64 (*btf_bpf_ktime_get_coarse_ns)(void); typedef u64 (*btf_bpf_ktime_get_ns)(void); typedef u64 (*btf_bpf_ktime_get_tai_ns)(void); typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64); typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64); typedef u64 (*btf_bpf_loop)(u32, void *, void *, u64); typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32); typedef u64 (*btf_bpf_lwt_seg6_action)(struct sk_buff *, u32, void *, u32); typedef u64 (*btf_bpf_lwt_seg6_adjust_srh)(struct sk_buff *, u32, s32); typedef u64 (*btf_bpf_lwt_seg6_store_bytes)(struct sk_buff *, u32, const void *, u32); typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32); typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *); typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *); typedef u64 (*btf_bpf_map_lookup_percpu_elem)(struct bpf_map *, void *, u32); typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *); typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *); typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64); typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64); typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32); typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32); typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64); typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64); typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64); typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64); typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64); typedef u64 (*btf_bpf_override_return)(struct pt_regs *, long unsigned int); typedef u64 (*btf_bpf_per_cpu_ptr)(const void *, u32); typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64); typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, struct bpf_perf_event_value *, u32); typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, struct bpf_perf_event_value *, u32); typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_read_user)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, const void *); typedef u64 (*btf_bpf_probe_write_user)(void *, const void *, u32); typedef u64 (*btf_bpf_rc_keydown)(u32 *, u32, u64, u32); typedef u64 (*btf_bpf_rc_pointer_rel)(u32 *, s32, s32); typedef u64 (*btf_bpf_rc_repeat)(u32 *); typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64); typedef u64 (*btf_bpf_redirect)(u32, u64); typedef u64 (*btf_bpf_redirect_neigh)(u32, struct bpf_redir_neigh *, int, u64); typedef u64 (*btf_bpf_redirect_peer)(u32, u64); typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64); typedef u64 (*btf_bpf_ringbuf_discard_dynptr)(struct bpf_dynptr_kern *, u64); typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64); typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64); typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64); typedef u64 (*btf_bpf_ringbuf_reserve_dynptr)(struct bpf_map *, u32, u64, struct bpf_dynptr_kern *); typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64); typedef u64 (*btf_bpf_ringbuf_submit_dynptr)(struct bpf_dynptr_kern *, u64); typedef u64 (*btf_bpf_send_signal)(u32); typedef u64 (*btf_bpf_send_signal_thread)(u32); typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32); typedef u64 (*btf_bpf_seq_printf_btf)(struct seq_file *, struct btf_ptr *, u32, u64); typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32); typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32); typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *); typedef u64 (*btf_bpf_set_retval)(int); typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int); typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64); typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *); typedef u64 (*btf_bpf_sk_fullsock)(struct sock *); typedef u64 (*btf_bpf_sk_getsockopt)(struct sock *, int, int, char *, int); typedef u64 (*btf_bpf_sk_lookup_assign)(struct bpf_sk_lookup_kern *, struct sock *, u64); typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64); typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64); typedef u64 (*btf_bpf_sk_release)(struct sock *); typedef u64 (*btf_bpf_sk_setsockopt)(struct sock *, int, int, char *, int); typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *); typedef u64 (*btf_bpf_sk_storage_delete_tracing)(struct bpf_map *, struct sock *); typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64, gfp_t); typedef u64 (*btf_bpf_sk_storage_get_tracing)(struct bpf_map *, struct sock *, void *, u64, gfp_t); typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64); typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int); typedef u64 (*btf_bpf_skb_cgroup_classid)(const struct sk_buff *); typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *); typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64); typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64); typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64); typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32); typedef u64 (*btf_bpf_skb_check_mtu)(struct sk_buff *, u32, u32 *, s32, u64); typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *); typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32); typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32); typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32); typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *); typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64); typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32); typedef u64 (*btf_bpf_skb_get_xfrm_state)(struct sk_buff *, u32, struct bpf_xfrm_state *, u32, u64); typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32); typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32); typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int); typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int); typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int); typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int); typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int); typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int); typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32); typedef u64 (*btf_bpf_skb_set_tstamp)(struct sk_buff *, u64, u32); typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, u32, u64); typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32); typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64); typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32); typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *); typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16); typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_skc_to_mptcp_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_tcp6_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_tcp_request_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_tcp_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_tcp_timewait_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_udp6_sock)(struct sock *); typedef u64 (*btf_bpf_skc_to_unix_sock)(struct sock *); typedef u64 (*btf_bpf_snprintf)(char *, u32, char *, const void *, u32); typedef u64 (*btf_bpf_snprintf_btf)(char *, u32, struct btf_ptr *, u32, u64); typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_sock_from_file)(struct file *); typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int); typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); typedef u64 (*btf_bpf_sock_ops_load_hdr_opt)(struct bpf_sock_ops_kern *, void *, u32, u64); typedef u64 (*btf_bpf_sock_ops_reserve_hdr_opt)(struct bpf_sock_ops_kern *, u32, u64); typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); typedef u64 (*btf_bpf_sock_ops_store_hdr_opt)(struct bpf_sock_ops_kern *, const void *, u32, u64); typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *); typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *); typedef u64 (*btf_bpf_strncmp)(const char *, u32, const char *); typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, s64 *); typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, u64 *); typedef u64 (*btf_bpf_sys_bpf)(int, union bpf_attr *, u32); typedef u64 (*btf_bpf_sys_close)(u32); typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t); typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64); typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t); typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t); typedef u64 (*btf_bpf_task_pt_regs)(struct task_struct *); typedef u64 (*btf_bpf_task_storage_delete)(struct bpf_map *, struct task_struct *); typedef u64 (*btf_bpf_task_storage_delete_recur)(struct bpf_map *, struct task_struct *); typedef u64 (*btf_bpf_task_storage_get)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); typedef u64 (*btf_bpf_task_storage_get_recur)(struct bpf_map *, struct task_struct *, void *, u64, gfp_t); typedef u64 (*btf_bpf_tc_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_tc_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_tc_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *, struct tcphdr *); typedef u64 (*btf_bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *); typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *, struct tcphdr *, u32); typedef u64 (*btf_bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *, struct tcphdr *, u32); typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32); typedef u64 (*btf_bpf_tcp_sock)(struct sock *); typedef u64 (*btf_bpf_this_cpu_ptr)(const void *); typedef u64 (*btf_bpf_timer_cancel)(struct bpf_async_kern *); typedef u64 (*btf_bpf_timer_init)(struct bpf_async_kern *, struct bpf_map *, u64); typedef u64 (*btf_bpf_timer_set_callback)(struct bpf_async_kern *, void *, struct bpf_prog_aux *); typedef u64 (*btf_bpf_timer_start)(struct bpf_async_kern *, u64, u64); typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64); typedef u64 (*btf_bpf_trace_vprintk)(char *, u32, const void *, u32); typedef u64 (*btf_bpf_unlocked_sk_getsockopt)(struct sock *, int, int, char *, int); typedef u64 (*btf_bpf_unlocked_sk_setsockopt)(struct sock *, int, int, char *, int); typedef u64 (*btf_bpf_user_ringbuf_drain)(struct bpf_map *, void *, void *, u64); typedef u64 (*btf_bpf_user_rnd_u32)(void); typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int); typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int); typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int); typedef u64 (*btf_bpf_xdp_check_mtu)(struct xdp_buff *, u32, u32 *, s32, u64); typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64); typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32); typedef u64 (*btf_bpf_xdp_get_buff_len)(struct xdp_buff *); typedef u64 (*btf_bpf_xdp_load_bytes)(struct xdp_buff *, u32, void *, u32); typedef u64 (*btf_bpf_xdp_redirect)(u32, u64); typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u64, u64); typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); typedef u64 (*btf_bpf_xdp_store_bytes)(struct xdp_buff *, u32, void *, u32); typedef u64 (*btf_get_func_arg)(void *, u32, u64 *); typedef u64 (*btf_get_func_arg_cnt)(void *); typedef u64 (*btf_get_func_ret)(void *, u64 *); typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32); typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, void *, u32, u32); typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, void *, u32); typedef u64 (*btf_sk_skb_adjust_room)(struct sk_buff *, s32, u32, u64); typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64); typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64); typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32); typedef void (*btf_trace_ack_update_msk)(void *, u64, u64, u64, u64, u64); typedef void (*btf_trace_add_device_to_group)(void *, int, struct device *); typedef void (*btf_trace_aer_event)(void *, const char *, const u32, const u8, const u8, struct pcie_tlp_log *); typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t); typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t); typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t); typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int); typedef void (*btf_trace_alloc_vmap_area)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); typedef void (*btf_trace_amd_pstate_perf)(void *, long unsigned int, long unsigned int, long unsigned int, u64, u64, u64, u64, unsigned int, bool, bool); typedef void (*btf_trace_arm_event)(void *, const struct cper_sec_proc_arm *); typedef void (*btf_trace_attach_device_to_domain)(void *, struct device *); typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long int, long unsigned int); typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, long unsigned int, long unsigned int); typedef void (*btf_trace_block_bio_backmerge)(void *, struct bio *); typedef void (*btf_trace_block_bio_bounce)(void *, struct bio *); typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *); typedef void (*btf_trace_block_bio_frontmerge)(void *, struct bio *); typedef void (*btf_trace_block_bio_queue)(void *, struct bio *); typedef void (*btf_trace_block_bio_remap)(void *, struct bio *, dev_t, sector_t); typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *); typedef void (*btf_trace_block_getrq)(void *, struct bio *); typedef void (*btf_trace_block_io_done)(void *, struct request *); typedef void (*btf_trace_block_io_start)(void *, struct request *); typedef void (*btf_trace_block_plug)(void *, struct request_queue *); typedef void (*btf_trace_block_rq_complete)(void *, struct request *, blk_status_t, unsigned int); typedef void (*btf_trace_block_rq_error)(void *, struct request *, blk_status_t, unsigned int); typedef void (*btf_trace_block_rq_insert)(void *, struct request *); typedef void (*btf_trace_block_rq_issue)(void *, struct request *); typedef void (*btf_trace_block_rq_merge)(void *, struct request *); typedef void (*btf_trace_block_rq_remap)(void *, struct request *, dev_t, sector_t); typedef void (*btf_trace_block_rq_requeue)(void *, struct request *); typedef void (*btf_trace_block_split)(void *, struct bio *, unsigned int); typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *); typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool); typedef void (*btf_trace_bpf_test_finish)(void *, int *); typedef void (*btf_trace_bpf_trace_printk)(void *, const char *); typedef void (*btf_trace_bpf_trigger_tp)(void *, int); typedef void (*btf_trace_bpf_xdp_link_attach_failed)(void *, const char *); typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lease *); typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lease *); typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lease *); typedef void (*btf_trace_call_function_entry)(void *, int); typedef void (*btf_trace_call_function_exit)(void *, int); typedef void (*btf_trace_call_function_single_entry)(void *, int); typedef void (*btf_trace_call_function_single_exit)(void *, int); typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, long unsigned int); typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, struct task_struct *, bool); typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *); typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int); typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int); typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *); typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *); typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended)(void *, struct cgroup *, int, bool); typedef void (*btf_trace_cgroup_rstat_cpu_lock_contended_fastpath)(void *, struct cgroup *, int, bool); typedef void (*btf_trace_cgroup_rstat_cpu_locked)(void *, struct cgroup *, int, bool); typedef void (*btf_trace_cgroup_rstat_cpu_locked_fastpath)(void *, struct cgroup *, int, bool); typedef void (*btf_trace_cgroup_rstat_cpu_unlock)(void *, struct cgroup *, int, bool); typedef void (*btf_trace_cgroup_rstat_cpu_unlock_fastpath)(void *, struct cgroup *, int, bool); typedef void (*btf_trace_cgroup_rstat_lock_contended)(void *, struct cgroup *, int, bool); typedef void (*btf_trace_cgroup_rstat_locked)(void *, struct cgroup *, int, bool); typedef void (*btf_trace_cgroup_rstat_unlock)(void *, struct cgroup *, int, bool); typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *); typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, struct task_struct *, bool); typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *); typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int); typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int); typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int); typedef void (*btf_trace_cma_alloc_busy_retry)(void *, const char *, long unsigned int, const struct page *, long unsigned int, unsigned int); typedef void (*btf_trace_cma_alloc_finish)(void *, const char *, long unsigned int, const struct page *, long unsigned int, unsigned int, int); typedef void (*btf_trace_cma_alloc_start)(void *, const char *, long unsigned int, unsigned int); typedef void (*btf_trace_cma_release)(void *, const char *, long unsigned int, const struct page *, long unsigned int); typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, enum compact_result, int, int, bool); typedef void (*btf_trace_console)(void *, const char *, size_t); typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *, void *); typedef void (*btf_trace_contention_begin)(void *, void *, unsigned int); typedef void (*btf_trace_contention_end)(void *, void *, int); typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int); typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *); typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int); typedef void (*btf_trace_cpu_idle_miss)(void *, unsigned int, unsigned int, bool); typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int)); typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int); typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, int (*)(unsigned int, struct hlist_node *), struct hlist_node *); typedef void (*btf_trace_csd_function_entry)(void *, smp_call_func_t, call_single_data_t *); typedef void (*btf_trace_csd_function_exit)(void *, smp_call_func_t, call_single_data_t *); typedef void (*btf_trace_csd_queue_cpu)(void *, const unsigned int, long unsigned int, smp_call_func_t, call_single_data_t *); typedef void (*btf_trace_deferred_error_apic_entry)(void *, int); typedef void (*btf_trace_deferred_error_apic_exit)(void *, int); typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, enum dev_pm_qos_req_type, s32); typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, enum dev_pm_qos_req_type, s32); typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, enum dev_pm_qos_req_type, s32); typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int); typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int); typedef void (*btf_trace_devlink_health_recover_aborted)(void *, const struct devlink *, const char *, bool, u64); typedef void (*btf_trace_devlink_health_report)(void *, const struct devlink *, const char *, const char *); typedef void (*btf_trace_devlink_health_reporter_state_update)(void *, const struct devlink *, const char *, bool); typedef void (*btf_trace_devlink_hwerr)(void *, const struct devlink *, int, const char *); typedef void (*btf_trace_devlink_hwmsg)(void *, const struct devlink *, bool, long unsigned int, const u8 *, size_t); typedef void (*btf_trace_devlink_trap_report)(void *, const struct devlink *, struct sk_buff *, const struct devlink_trap_metadata *); typedef void (*btf_trace_devres_log)(void *, struct device *, const char *, void *, const char *, size_t); typedef void (*btf_trace_dma_alloc)(void *, struct device *, void *, dma_addr_t, size_t, gfp_t, long unsigned int); typedef void (*btf_trace_dma_fence_destroy)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_emit)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_enable_signal)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_init)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_signaled)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_wait_end)(void *, struct dma_fence *); typedef void (*btf_trace_dma_fence_wait_start)(void *, struct dma_fence *); typedef void (*btf_trace_dma_free)(void *, struct device *, void *, dma_addr_t, size_t, long unsigned int); typedef void (*btf_trace_dma_map_page)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); typedef void (*btf_trace_dma_map_resource)(void *, struct device *, phys_addr_t, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); typedef void (*btf_trace_dma_map_sg)(void *, struct device *, struct scatterlist *, int, int, enum dma_data_direction, long unsigned int); typedef void (*btf_trace_dma_sync_sg_for_cpu)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); typedef void (*btf_trace_dma_sync_sg_for_device)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction); typedef void (*btf_trace_dma_sync_single_for_cpu)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); typedef void (*btf_trace_dma_sync_single_for_device)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction); typedef void (*btf_trace_dma_unmap_page)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); typedef void (*btf_trace_dma_unmap_resource)(void *, struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); typedef void (*btf_trace_dma_unmap_sg)(void *, struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); typedef void (*btf_trace_dql_stall_detected)(void *, short unsigned int, unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int *); typedef void (*btf_trace_emulate_vsyscall)(void *, int); typedef void (*btf_trace_error_apic_entry)(void *, int); typedef void (*btf_trace_error_apic_exit)(void *, int); typedef void (*btf_trace_error_report_end)(void *, enum error_detector, long unsigned int); typedef void (*btf_trace_exit_mmap)(void *, struct mm_struct *); typedef void (*btf_trace_ext4_alloc_da_blocks)(void *, struct inode *); typedef void (*btf_trace_ext4_allocate_blocks)(void *, struct ext4_allocation_request *, long long unsigned int); typedef void (*btf_trace_ext4_allocate_inode)(void *, struct inode *, struct inode *, int); typedef void (*btf_trace_ext4_begin_ordered_truncate)(void *, struct inode *, loff_t); typedef void (*btf_trace_ext4_collapse_range)(void *, struct inode *, loff_t, loff_t); typedef void (*btf_trace_ext4_da_release_space)(void *, struct inode *, int); typedef void (*btf_trace_ext4_da_reserve_space)(void *, struct inode *, int); typedef void (*btf_trace_ext4_da_update_reserve_space)(void *, struct inode *, int, int); typedef void (*btf_trace_ext4_da_write_begin)(void *, struct inode *, loff_t, unsigned int); typedef void (*btf_trace_ext4_da_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_da_write_pages)(void *, struct inode *, long unsigned int, struct writeback_control *); typedef void (*btf_trace_ext4_da_write_pages_extent)(void *, struct inode *, struct ext4_map_blocks *); typedef void (*btf_trace_ext4_discard_blocks)(void *, struct super_block *, long long unsigned int, long long unsigned int); typedef void (*btf_trace_ext4_discard_preallocations)(void *, struct inode *, unsigned int); typedef void (*btf_trace_ext4_drop_inode)(void *, struct inode *, int); typedef void (*btf_trace_ext4_error)(void *, struct super_block *, const char *, unsigned int); typedef void (*btf_trace_ext4_es_cache_extent)(void *, struct inode *, struct extent_status *); typedef void (*btf_trace_ext4_es_find_extent_range_enter)(void *, struct inode *, ext4_lblk_t); typedef void (*btf_trace_ext4_es_find_extent_range_exit)(void *, struct inode *, struct extent_status *); typedef void (*btf_trace_ext4_es_insert_delayed_extent)(void *, struct inode *, struct extent_status *, bool, bool); typedef void (*btf_trace_ext4_es_insert_extent)(void *, struct inode *, struct extent_status *); typedef void (*btf_trace_ext4_es_lookup_extent_enter)(void *, struct inode *, ext4_lblk_t); typedef void (*btf_trace_ext4_es_lookup_extent_exit)(void *, struct inode *, struct extent_status *, int); typedef void (*btf_trace_ext4_es_remove_extent)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t); typedef void (*btf_trace_ext4_es_shrink)(void *, struct super_block *, int, u64, int, int); typedef void (*btf_trace_ext4_es_shrink_count)(void *, struct super_block *, int, int); typedef void (*btf_trace_ext4_es_shrink_scan_enter)(void *, struct super_block *, int, int); typedef void (*btf_trace_ext4_es_shrink_scan_exit)(void *, struct super_block *, int, int); typedef void (*btf_trace_ext4_evict_inode)(void *, struct inode *); typedef void (*btf_trace_ext4_ext_convert_to_initialized_enter)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *); typedef void (*btf_trace_ext4_ext_convert_to_initialized_fastpath)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *, struct ext4_extent *); typedef void (*btf_trace_ext4_ext_handle_unwritten_extents)(void *, struct inode *, struct ext4_map_blocks *, int, unsigned int, ext4_fsblk_t); typedef void (*btf_trace_ext4_ext_load_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t); typedef void (*btf_trace_ext4_ext_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_ext_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); typedef void (*btf_trace_ext4_ext_remove_space)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int); typedef void (*btf_trace_ext4_ext_remove_space_done)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, struct partial_cluster *, __le16); typedef void (*btf_trace_ext4_ext_rm_idx)(void *, struct inode *, ext4_fsblk_t); typedef void (*btf_trace_ext4_ext_rm_leaf)(void *, struct inode *, ext4_lblk_t, struct ext4_extent *, struct partial_cluster *); typedef void (*btf_trace_ext4_ext_show_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t, short unsigned int); typedef void (*btf_trace_ext4_fallocate_enter)(void *, struct inode *, loff_t, loff_t, int); typedef void (*btf_trace_ext4_fallocate_exit)(void *, struct inode *, loff_t, unsigned int, int); typedef void (*btf_trace_ext4_fc_cleanup)(void *, journal_t *, int, tid_t); typedef void (*btf_trace_ext4_fc_commit_start)(void *, struct super_block *, tid_t); typedef void (*btf_trace_ext4_fc_commit_stop)(void *, struct super_block *, int, int, tid_t); typedef void (*btf_trace_ext4_fc_replay)(void *, struct super_block *, int, int, int, int); typedef void (*btf_trace_ext4_fc_replay_scan)(void *, struct super_block *, int, int); typedef void (*btf_trace_ext4_fc_stats)(void *, struct super_block *); typedef void (*btf_trace_ext4_fc_track_create)(void *, handle_t *, struct inode *, struct dentry *, int); typedef void (*btf_trace_ext4_fc_track_inode)(void *, handle_t *, struct inode *, int); typedef void (*btf_trace_ext4_fc_track_link)(void *, handle_t *, struct inode *, struct dentry *, int); typedef void (*btf_trace_ext4_fc_track_range)(void *, handle_t *, struct inode *, long int, long int, int); typedef void (*btf_trace_ext4_fc_track_unlink)(void *, handle_t *, struct inode *, struct dentry *, int); typedef void (*btf_trace_ext4_forget)(void *, struct inode *, int, __u64); typedef void (*btf_trace_ext4_free_blocks)(void *, struct inode *, __u64, long unsigned int, int); typedef void (*btf_trace_ext4_free_inode)(void *, struct inode *); typedef void (*btf_trace_ext4_fsmap_high_key)(void *, struct super_block *, u32, u32, u64, u64, u64); typedef void (*btf_trace_ext4_fsmap_low_key)(void *, struct super_block *, u32, u32, u64, u64, u64); typedef void (*btf_trace_ext4_fsmap_mapping)(void *, struct super_block *, u32, u32, u64, u64, u64); typedef void (*btf_trace_ext4_get_implied_cluster_alloc_exit)(void *, struct super_block *, struct ext4_map_blocks *, int); typedef void (*btf_trace_ext4_getfsmap_high_key)(void *, struct super_block *, struct ext4_fsmap *); typedef void (*btf_trace_ext4_getfsmap_low_key)(void *, struct super_block *, struct ext4_fsmap *); typedef void (*btf_trace_ext4_getfsmap_mapping)(void *, struct super_block *, struct ext4_fsmap *); typedef void (*btf_trace_ext4_ind_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_ind_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); typedef void (*btf_trace_ext4_insert_range)(void *, struct inode *, loff_t, loff_t); typedef void (*btf_trace_ext4_invalidate_folio)(void *, struct folio *, size_t, size_t); typedef void (*btf_trace_ext4_journal_start_inode)(void *, struct inode *, int, int, int, int, long unsigned int); typedef void (*btf_trace_ext4_journal_start_reserved)(void *, struct super_block *, int, long unsigned int); typedef void (*btf_trace_ext4_journal_start_sb)(void *, struct super_block *, int, int, int, int, long unsigned int); typedef void (*btf_trace_ext4_journalled_invalidate_folio)(void *, struct folio *, size_t, size_t); typedef void (*btf_trace_ext4_journalled_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_lazy_itable_init)(void *, struct super_block *, ext4_group_t); typedef void (*btf_trace_ext4_load_inode)(void *, struct super_block *, long unsigned int); typedef void (*btf_trace_ext4_load_inode_bitmap)(void *, struct super_block *, long unsigned int); typedef void (*btf_trace_ext4_mark_inode_dirty)(void *, struct inode *, long unsigned int); typedef void (*btf_trace_ext4_mb_bitmap_load)(void *, struct super_block *, long unsigned int); typedef void (*btf_trace_ext4_mb_buddy_bitmap_load)(void *, struct super_block *, long unsigned int); typedef void (*btf_trace_ext4_mb_discard_preallocations)(void *, struct super_block *, int); typedef void (*btf_trace_ext4_mb_new_group_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); typedef void (*btf_trace_ext4_mb_new_inode_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); typedef void (*btf_trace_ext4_mb_release_group_pa)(void *, struct super_block *, struct ext4_prealloc_space *); typedef void (*btf_trace_ext4_mb_release_inode_pa)(void *, struct ext4_prealloc_space *, long long unsigned int, unsigned int); typedef void (*btf_trace_ext4_mballoc_alloc)(void *, struct ext4_allocation_context *); typedef void (*btf_trace_ext4_mballoc_discard)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); typedef void (*btf_trace_ext4_mballoc_free)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); typedef void (*btf_trace_ext4_mballoc_prealloc)(void *, struct ext4_allocation_context *); typedef void (*btf_trace_ext4_nfs_commit_metadata)(void *, struct inode *); typedef void (*btf_trace_ext4_other_inode_update_time)(void *, struct inode *, ino_t); typedef void (*btf_trace_ext4_prefetch_bitmaps)(void *, struct super_block *, ext4_group_t, ext4_group_t, unsigned int); typedef void (*btf_trace_ext4_punch_hole)(void *, struct inode *, loff_t, loff_t, int); typedef void (*btf_trace_ext4_read_block_bitmap_load)(void *, struct super_block *, long unsigned int, bool); typedef void (*btf_trace_ext4_read_folio)(void *, struct inode *, struct folio *); typedef void (*btf_trace_ext4_release_folio)(void *, struct inode *, struct folio *); typedef void (*btf_trace_ext4_remove_blocks)(void *, struct inode *, struct ext4_extent *, ext4_lblk_t, ext4_fsblk_t, struct partial_cluster *); typedef void (*btf_trace_ext4_request_blocks)(void *, struct ext4_allocation_request *); typedef void (*btf_trace_ext4_request_inode)(void *, struct inode *, int); typedef void (*btf_trace_ext4_shutdown)(void *, struct super_block *, long unsigned int); typedef void (*btf_trace_ext4_sync_file_enter)(void *, struct file *, int); typedef void (*btf_trace_ext4_sync_file_exit)(void *, struct inode *, int); typedef void (*btf_trace_ext4_sync_fs)(void *, struct super_block *, int); typedef void (*btf_trace_ext4_trim_all_free)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); typedef void (*btf_trace_ext4_trim_extent)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); typedef void (*btf_trace_ext4_truncate_enter)(void *, struct inode *); typedef void (*btf_trace_ext4_truncate_exit)(void *, struct inode *); typedef void (*btf_trace_ext4_unlink_enter)(void *, struct inode *, struct dentry *); typedef void (*btf_trace_ext4_unlink_exit)(void *, struct dentry *, int); typedef void (*btf_trace_ext4_update_sb)(void *, struct super_block *, ext4_fsblk_t, unsigned int); typedef void (*btf_trace_ext4_write_begin)(void *, struct inode *, loff_t, unsigned int); typedef void (*btf_trace_ext4_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); typedef void (*btf_trace_ext4_writepages)(void *, struct inode *, struct writeback_control *); typedef void (*btf_trace_ext4_writepages_result)(void *, struct inode *, struct writeback_control *, int, int); typedef void (*btf_trace_ext4_zero_range)(void *, struct inode *, loff_t, loff_t, int); typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int); typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, const struct fib6_result *, struct fib6_table *, const struct flowi6 *); typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, const struct fib_nh_common *, int); typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t); typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t); typedef void (*btf_trace_finish_task_reaping)(void *, int); typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int); typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, unsigned int); typedef void (*btf_trace_folio_wait_writeback)(void *, struct folio *, struct address_space *); typedef void (*btf_trace_free_vmap_area_noflush)(void *, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lease *); typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lease *); typedef void (*btf_trace_get_mapping_status)(void *, struct mptcp_ext *); typedef void (*btf_trace_global_dirty_state)(void *, long unsigned int, long unsigned int); typedef void (*btf_trace_guest_halt_poll_ns)(void *, bool, unsigned int, unsigned int); typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *); typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *); typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *); typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode); typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode); typedef void (*btf_trace_hugepage_set_pmd)(void *, long unsigned int, long unsigned int); typedef void (*btf_trace_hugepage_set_pud)(void *, long unsigned int, long unsigned int); typedef void (*btf_trace_hugepage_update_pmd)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_hugepage_update_pud)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_hwmon_attr_show)(void *, int, const char *, long int); typedef void (*btf_trace_hwmon_attr_show_string)(void *, int, const char *, const char *); typedef void (*btf_trace_hwmon_attr_store)(void *, int, const char *, long int); typedef void (*btf_trace_icmp_send)(void *, const struct sk_buff *, int, int); typedef void (*btf_trace_inet_sk_error_report)(void *, const struct sock *); typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, const int); typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int); typedef void (*btf_trace_initcall_level)(void *, const char *); typedef void (*btf_trace_initcall_start)(void *, initcall_t); typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, struct writeback_control *, unsigned int); typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, struct bdi_writeback *); typedef void (*btf_trace_io_page_fault)(void *, struct device *, long unsigned int, int); typedef void (*btf_trace_io_uring_complete)(void *, void *, void *, u64, int, unsigned int, u64, u64); typedef void (*btf_trace_io_uring_cqe_overflow)(void *, void *, long long unsigned int, s32, u32, void *); typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int); typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32); typedef void (*btf_trace_io_uring_defer)(void *, struct io_kiocb *); typedef void (*btf_trace_io_uring_fail_link)(void *, struct io_kiocb *, struct io_kiocb *); typedef void (*btf_trace_io_uring_file_get)(void *, struct io_kiocb *, int); typedef void (*btf_trace_io_uring_link)(void *, struct io_kiocb *, struct io_kiocb *); typedef void (*btf_trace_io_uring_local_work_run)(void *, void *, int, unsigned int); typedef void (*btf_trace_io_uring_poll_arm)(void *, struct io_kiocb *, int, int); typedef void (*btf_trace_io_uring_queue_async_work)(void *, struct io_kiocb *, int); typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, unsigned int, long int); typedef void (*btf_trace_io_uring_req_failed)(void *, const struct io_uring_sqe *, struct io_kiocb *, int); typedef void (*btf_trace_io_uring_short_write)(void *, void *, u64, u64, u64); typedef void (*btf_trace_io_uring_submit_req)(void *, struct io_kiocb *); typedef void (*btf_trace_io_uring_task_add)(void *, struct io_kiocb *, int); typedef void (*btf_trace_io_uring_task_work_run)(void *, void *, unsigned int); typedef void (*btf_trace_iomap_dio_complete)(void *, struct kiocb *, int, ssize_t); typedef void (*btf_trace_iomap_dio_invalidate_fail)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_dio_rw_begin)(void *, struct kiocb *, struct iov_iter *, unsigned int, size_t); typedef void (*btf_trace_iomap_dio_rw_queued)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_invalidate_folio)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_iter)(void *, struct iomap_iter *, const void *, long unsigned int); typedef void (*btf_trace_iomap_iter_dstmap)(void *, struct inode *, struct iomap *); typedef void (*btf_trace_iomap_iter_srcmap)(void *, struct inode *, struct iomap *); typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int); typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int); typedef void (*btf_trace_iomap_release_folio)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, loff_t, u64); typedef void (*btf_trace_iomap_writepage_map)(void *, struct inode *, u64, unsigned int, struct iomap *); typedef void (*btf_trace_ipi_entry)(void *, const char *); typedef void (*btf_trace_ipi_exit)(void *, const char *); typedef void (*btf_trace_ipi_raise)(void *, const struct cpumask *, const char *); typedef void (*btf_trace_ipi_send_cpu)(void *, const unsigned int, long unsigned int, void *); typedef void (*btf_trace_ipi_send_cpumask)(void *, const struct cpumask *, long unsigned int, void *); typedef void (*btf_trace_irq_disable)(void *, long unsigned int, long unsigned int); typedef void (*btf_trace_irq_enable)(void *, long unsigned int, long unsigned int); typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *); typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int); typedef void (*btf_trace_irq_matrix_alloc)(void *, int, unsigned int, struct irq_matrix *, struct cpumap *); typedef void (*btf_trace_irq_matrix_alloc_managed)(void *, int, unsigned int, struct irq_matrix *, struct cpumap *); typedef void (*btf_trace_irq_matrix_alloc_reserved)(void *, int, unsigned int, struct irq_matrix *, struct cpumap *); typedef void (*btf_trace_irq_matrix_assign)(void *, int, unsigned int, struct irq_matrix *, struct cpumap *); typedef void (*btf_trace_irq_matrix_assign_system)(void *, int, struct irq_matrix *); typedef void (*btf_trace_irq_matrix_free)(void *, int, unsigned int, struct irq_matrix *, struct cpumap *); typedef void (*btf_trace_irq_matrix_offline)(void *, struct irq_matrix *); typedef void (*btf_trace_irq_matrix_online)(void *, struct irq_matrix *); typedef void (*btf_trace_irq_matrix_remove_managed)(void *, int, unsigned int, struct irq_matrix *, struct cpumap *); typedef void (*btf_trace_irq_matrix_remove_reserved)(void *, struct irq_matrix *); typedef void (*btf_trace_irq_matrix_reserve)(void *, struct irq_matrix *); typedef void (*btf_trace_irq_matrix_reserve_managed)(void *, int, unsigned int, struct irq_matrix *, struct cpumap *); typedef void (*btf_trace_irq_work_entry)(void *, int); typedef void (*btf_trace_irq_work_exit)(void *, int); typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, long long unsigned int); typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 * const, long long unsigned int); typedef void (*btf_trace_jbd2_checkpoint)(void *, journal_t *, int); typedef void (*btf_trace_jbd2_checkpoint_stats)(void *, dev_t, tid_t, struct transaction_chp_stats_s *); typedef void (*btf_trace_jbd2_commit_flushing)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_commit_locking)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_commit_logging)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_drop_transaction)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_end_commit)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_handle_extend)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int); typedef void (*btf_trace_jbd2_handle_restart)(void *, dev_t, tid_t, unsigned int, unsigned int, int); typedef void (*btf_trace_jbd2_handle_start)(void *, dev_t, tid_t, unsigned int, unsigned int, int); typedef void (*btf_trace_jbd2_handle_stats)(void *, dev_t, tid_t, unsigned int, unsigned int, int, int, int, int); typedef void (*btf_trace_jbd2_lock_buffer_stall)(void *, dev_t, long unsigned int); typedef void (*btf_trace_jbd2_run_stats)(void *, dev_t, tid_t, struct transaction_run_stats_s *); typedef void (*btf_trace_jbd2_shrink_checkpoint_list)(void *, journal_t *, tid_t, tid_t, tid_t, long unsigned int, tid_t); typedef void (*btf_trace_jbd2_shrink_count)(void *, journal_t *, long unsigned int, long unsigned int); typedef void (*btf_trace_jbd2_shrink_scan_enter)(void *, journal_t *, long unsigned int, long unsigned int); typedef void (*btf_trace_jbd2_shrink_scan_exit)(void *, journal_t *, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_jbd2_start_commit)(void *, journal_t *, transaction_t *); typedef void (*btf_trace_jbd2_submit_inode_data)(void *, struct inode *); typedef void (*btf_trace_jbd2_update_log_tail)(void *, journal_t *, tid_t, long unsigned int, long unsigned int); typedef void (*btf_trace_jbd2_write_superblock)(void *, journal_t *, blk_opf_t); typedef void (*btf_trace_kfree)(void *, long unsigned int, const void *); typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *, enum skb_drop_reason, struct sock *); typedef void (*btf_trace_kmalloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int); typedef void (*btf_trace_kmem_cache_alloc)(void *, long unsigned int, const void *, struct kmem_cache *, gfp_t, int); typedef void (*btf_trace_kmem_cache_free)(void *, long unsigned int, const void *, const struct kmem_cache *); typedef void (*btf_trace_ksm_advisor)(void *, s64, long unsigned int, unsigned int); typedef void (*btf_trace_ksm_enter)(void *, void *); typedef void (*btf_trace_ksm_exit)(void *, void *); typedef void (*btf_trace_ksm_merge_one_page)(void *, long unsigned int, void *, void *, int); typedef void (*btf_trace_ksm_merge_with_ksm_page)(void *, void *, long unsigned int, void *, void *, int); typedef void (*btf_trace_ksm_remove_ksm_page)(void *, long unsigned int); typedef void (*btf_trace_ksm_remove_rmap_item)(void *, long unsigned int, void *, void *); typedef void (*btf_trace_ksm_start_scan)(void *, int, u32); typedef void (*btf_trace_ksm_stop_scan)(void *, int, u32); typedef void (*btf_trace_kyber_adjust)(void *, dev_t, const char *, unsigned int); typedef void (*btf_trace_kyber_latency)(void *, dev_t, const char *, const char *, unsigned int, unsigned int, unsigned int, unsigned int); typedef void (*btf_trace_kyber_throttled)(void *, dev_t, const char *); typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lease *, struct file_lease *); typedef void (*btf_trace_local_timer_entry)(void *, int); typedef void (*btf_trace_local_timer_exit)(void *, int); typedef void (*btf_trace_lock_acquire)(void *, struct lockdep_map *, unsigned int, int, int, int, struct lockdep_map *, long unsigned int); typedef void (*btf_trace_lock_release)(void *, struct lockdep_map *, long unsigned int); typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, struct file_lock_context *); typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int); typedef void (*btf_trace_ma_op)(void *, const char *, struct ma_state *); typedef void (*btf_trace_ma_read)(void *, const char *, struct ma_state *); typedef void (*btf_trace_ma_write)(void *, const char *, struct ma_state *, long unsigned int, void *); typedef void (*btf_trace_map)(void *, long unsigned int, phys_addr_t, size_t); typedef void (*btf_trace_mark_victim)(void *, struct task_struct *, uid_t); typedef void (*btf_trace_mc_event)(void *, const unsigned int, const char *, const char *, const int, const u8, const s8, const s8, const s8, long unsigned int, const u8, long unsigned int, const char *); typedef void (*btf_trace_mce_record)(void *, struct mce *); typedef void (*btf_trace_mei_pci_cfg_read)(void *, const struct device *, const char *, u32, u32); typedef void (*btf_trace_mei_reg_read)(void *, const struct device *, const char *, u32, u32); typedef void (*btf_trace_mei_reg_write)(void *, const struct device *, const char *, u32, u32); typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, const struct xdp_rxq_info *); typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *); typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, const struct page *); typedef void (*btf_trace_memory_failure_event)(void *, long unsigned int, int, int); typedef void (*btf_trace_mm_alloc_contig_migrate_range_info)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); typedef void (*btf_trace_mm_collapse_huge_page)(void *, struct mm_struct *, int, int); typedef void (*btf_trace_mm_collapse_huge_page_isolate)(void *, struct page *, int, int, bool, int); typedef void (*btf_trace_mm_collapse_huge_page_swapin)(void *, struct mm_struct *, int, int, int); typedef void (*btf_trace_mm_compaction_begin)(void *, struct compact_control *, long unsigned int, long unsigned int, bool); typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int); typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int); typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int); typedef void (*btf_trace_mm_compaction_end)(void *, struct compact_control *, long unsigned int, long unsigned int, bool, int); typedef void (*btf_trace_mm_compaction_fast_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int); typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int); typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type); typedef void (*btf_trace_mm_compaction_migratepages)(void *, unsigned int, unsigned int); typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int); typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int); typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type); typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct folio *); typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct folio *); typedef void (*btf_trace_mm_filemap_fault)(void *, struct address_space *, long unsigned int); typedef void (*btf_trace_mm_filemap_get_pages)(void *, struct address_space *, long unsigned int, long unsigned int); typedef void (*btf_trace_mm_filemap_map_pages)(void *, struct address_space *, long unsigned int, long unsigned int); typedef void (*btf_trace_mm_khugepaged_collapse_file)(void *, struct mm_struct *, struct folio *, long unsigned int, bool, long unsigned int, struct file *, int, int); typedef void (*btf_trace_mm_khugepaged_scan_file)(void *, struct mm_struct *, struct folio *, struct file *, int, int, int); typedef void (*btf_trace_mm_khugepaged_scan_pmd)(void *, struct mm_struct *, struct page *, bool, int, int, int, int); typedef void (*btf_trace_mm_lru_activate)(void *, struct folio *); typedef void (*btf_trace_mm_lru_insertion)(void *, struct folio *); typedef void (*btf_trace_mm_migrate_pages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, enum migrate_mode, int); typedef void (*btf_trace_mm_migrate_pages_start)(void *, enum migrate_mode, int); typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int); typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int); typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, int, int); typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int); typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *); typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int); typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long int, long int, long int); typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, struct shrink_control *, long int, long unsigned int, long long unsigned int, long unsigned int, int); typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t); typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, long unsigned int); typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int); typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int); typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int, int); typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *, int, int); typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t); typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, long unsigned int); typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t); typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, long unsigned int); typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t); typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, long unsigned int); typedef void (*btf_trace_mm_vmscan_throttled)(void *, int, int, int, int); typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t); typedef void (*btf_trace_mm_vmscan_write_folio)(void *, struct folio *); typedef void (*btf_trace_mmap_lock_acquire_returned)(void *, struct mm_struct *, const char *, bool, bool); typedef void (*btf_trace_mmap_lock_released)(void *, struct mm_struct *, const char *, bool); typedef void (*btf_trace_mmap_lock_start_locking)(void *, struct mm_struct *, const char *, bool); typedef void (*btf_trace_module_free)(void *, struct module *); typedef void (*btf_trace_module_get)(void *, struct module *, long unsigned int); typedef void (*btf_trace_module_load)(void *, struct module *); typedef void (*btf_trace_module_put)(void *, struct module *, long unsigned int); typedef void (*btf_trace_module_request)(void *, char *, bool, long unsigned int); typedef void (*btf_trace_mptcp_sendmsg_frag)(void *, struct mptcp_ext *); typedef void (*btf_trace_mptcp_subflow_get_send)(void *, struct mptcp_subflow_context *); typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_napi_gro_frags_exit)(void *, int); typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_napi_gro_receive_exit)(void *, int); typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int); typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int); typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, const void *, const struct neighbour *, bool); typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int); typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int); typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int); typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32); typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int); typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *); typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, const struct net_device *); typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, unsigned int); typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int); typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *); typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_netif_receive_skb_exit)(void *, int); typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int); typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *); typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *); typedef void (*btf_trace_netif_rx_exit)(void *, int); typedef void (*btf_trace_netlink_extack)(void *, const char *); typedef void (*btf_trace_nmi_handler)(void *, void *, s64, int); typedef void (*btf_trace_non_standard_event)(void *, const guid_t *, const guid_t *, const char *, const u8, const u8 *, const u32); typedef void (*btf_trace_notifier_register)(void *, void *); typedef void (*btf_trace_notifier_run)(void *, void *); typedef void (*btf_trace_notifier_unregister)(void *, void *); typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *); typedef void (*btf_trace_page_fault_kernel)(void *, long unsigned int, struct pt_regs *, long unsigned int); typedef void (*btf_trace_page_fault_user)(void *, long unsigned int, struct pt_regs *, long unsigned int); typedef void (*btf_trace_page_pool_release)(void *, const struct page_pool *, s32, u32, u32); typedef void (*btf_trace_page_pool_state_hold)(void *, const struct page_pool *, netmem_ref, u32); typedef void (*btf_trace_page_pool_state_release)(void *, const struct page_pool *, netmem_ref, u32); typedef void (*btf_trace_page_pool_update_nid)(void *, const struct page_pool *, int); typedef void (*btf_trace_pelt_cfs_tp)(void *, struct cfs_rq *); typedef void (*btf_trace_pelt_dl_tp)(void *, struct rq *); typedef void (*btf_trace_pelt_hw_tp)(void *, struct rq *); typedef void (*btf_trace_pelt_irq_tp)(void *, struct rq *); typedef void (*btf_trace_pelt_rt_tp)(void *, struct rq *); typedef void (*btf_trace_pelt_se_tp)(void *, struct sched_entity *); typedef void (*btf_trace_percpu_alloc_percpu)(void *, long unsigned int, bool, bool, size_t, size_t, void *, int, void *, size_t, gfp_t); typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t); typedef void (*btf_trace_percpu_create_chunk)(void *, void *); typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *); typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, void *); typedef void (*btf_trace_pm_qos_add_request)(void *, s32); typedef void (*btf_trace_pm_qos_remove_request)(void *, s32); typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int); typedef void (*btf_trace_pm_qos_update_request)(void *, s32); typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int); typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int); typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int); typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int); typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32); typedef void (*btf_trace_purge_vmap_area_lazy)(void *, long unsigned int, long unsigned int, unsigned int); typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, struct net_device *, u32); typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, const struct netdev_queue *, int, struct sk_buff *); typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *); typedef void (*btf_trace_qdisc_enqueue)(void *, struct Qdisc *, const struct netdev_queue *, struct sk_buff *); typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *); typedef void (*btf_trace_rcu_barrier)(void *, const char *, const char *, int, int, long unsigned int); typedef void (*btf_trace_rcu_batch_end)(void *, const char *, int, char, char, char, char); typedef void (*btf_trace_rcu_batch_start)(void *, const char *, long int, long int); typedef void (*btf_trace_rcu_callback)(void *, const char *, struct callback_head *, long int); typedef void (*btf_trace_rcu_exp_funnel_lock)(void *, const char *, u8, int, int, const char *); typedef void (*btf_trace_rcu_exp_grace_period)(void *, const char *, long unsigned int, const char *); typedef void (*btf_trace_rcu_fqs)(void *, const char *, long unsigned int, int, const char *); typedef void (*btf_trace_rcu_future_grace_period)(void *, const char *, long unsigned int, long unsigned int, u8, int, int, const char *); typedef void (*btf_trace_rcu_grace_period)(void *, const char *, long unsigned int, const char *); typedef void (*btf_trace_rcu_grace_period_init)(void *, const char *, long unsigned int, u8, int, int, long unsigned int); typedef void (*btf_trace_rcu_invoke_callback)(void *, const char *, struct callback_head *); typedef void (*btf_trace_rcu_invoke_kfree_bulk_callback)(void *, const char *, long unsigned int, void **); typedef void (*btf_trace_rcu_invoke_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int); typedef void (*btf_trace_rcu_kvfree_callback)(void *, const char *, struct callback_head *, long unsigned int, long int); typedef void (*btf_trace_rcu_preempt_task)(void *, const char *, int, long unsigned int); typedef void (*btf_trace_rcu_quiescent_state_report)(void *, const char *, long unsigned int, long unsigned int, long unsigned int, u8, int, int, int); typedef void (*btf_trace_rcu_segcb_stats)(void *, struct rcu_segcblist *, const char *); typedef void (*btf_trace_rcu_sr_normal)(void *, const char *, struct callback_head *, const char *); typedef void (*btf_trace_rcu_stall_warning)(void *, const char *, const char *); typedef void (*btf_trace_rcu_torture_read)(void *, const char *, struct callback_head *, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_rcu_unlock_preempted_task)(void *, const char *, long unsigned int, int); typedef void (*btf_trace_rcu_utilization)(void *, const char *); typedef void (*btf_trace_rcu_watching)(void *, const char *, long int, long int, int); typedef void (*btf_trace_rdpmc)(void *, unsigned int, u64, int); typedef void (*btf_trace_read_msr)(void *, unsigned int, u64, int); typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, long unsigned int, long unsigned int, long unsigned int, int, bool); typedef void (*btf_trace_remove_device_from_group)(void *, int, struct device *); typedef void (*btf_trace_remove_migration_pmd)(void *, long unsigned int, long unsigned int); typedef void (*btf_trace_remove_migration_pte)(void *, long unsigned int, long unsigned int, int); typedef void (*btf_trace_reschedule_entry)(void *, int); typedef void (*btf_trace_reschedule_exit)(void *, int); typedef void (*btf_trace_rpm_idle)(void *, struct device *, int); typedef void (*btf_trace_rpm_resume)(void *, struct device *, int); typedef void (*btf_trace_rpm_return_int)(void *, struct device *, long unsigned int, int); typedef void (*btf_trace_rpm_status)(void *, struct device *, enum rpm_status); typedef void (*btf_trace_rpm_suspend)(void *, struct device *, int); typedef void (*btf_trace_rpm_usage)(void *, struct device *, int); typedef void (*btf_trace_rseq_ip_fixup)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_rseq_update)(void *, struct task_struct *); typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int); typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *); typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *); typedef void (*btf_trace_sched_compute_energy_tp)(void *, struct task_struct *, int, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_sched_cpu_capacity_tp)(void *, struct rq *); typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *); typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int); typedef void (*btf_trace_sched_kthread_work_execute_end)(void *, struct kthread_work *, kthread_work_func_t); typedef void (*btf_trace_sched_kthread_work_execute_start)(void *, struct kthread_work *); typedef void (*btf_trace_sched_kthread_work_queue_work)(void *, struct kthread_worker *, struct kthread_work *); typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int); typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int); typedef void (*btf_trace_sched_overutilized_tp)(void *, struct root_domain *, bool); typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *); typedef void (*btf_trace_sched_prepare_exec)(void *, struct task_struct *, struct linux_binprm *); typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, struct linux_binprm *); typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *); typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, struct task_struct *); typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *); typedef void (*btf_trace_sched_process_hang)(void *, struct task_struct *); typedef void (*btf_trace_sched_process_wait)(void *, struct pid *); typedef void (*btf_trace_sched_skip_vma_numa)(void *, struct mm_struct *, struct vm_area_struct *, enum numa_vmaskip_reason); typedef void (*btf_trace_sched_stat_blocked)(void *, struct task_struct *, u64); typedef void (*btf_trace_sched_stat_iowait)(void *, struct task_struct *, u64); typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64); typedef void (*btf_trace_sched_stat_sleep)(void *, struct task_struct *, u64); typedef void (*btf_trace_sched_stat_wait)(void *, struct task_struct *, u64); typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, struct task_struct *, int); typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, struct task_struct *, int); typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, struct task_struct *, unsigned int); typedef void (*btf_trace_sched_update_nr_running_tp)(void *, struct rq *, int); typedef void (*btf_trace_sched_util_est_cfs_tp)(void *, struct cfs_rq *); typedef void (*btf_trace_sched_util_est_se_tp)(void *, struct sched_entity *); typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *); typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int); typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *); typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *); typedef void (*btf_trace_sched_waking)(void *, struct task_struct *); typedef void (*btf_trace_selinux_audited)(void *, struct selinux_audit_data *, char *, char *, const char *); typedef void (*btf_trace_set_migration_pmd)(void *, long unsigned int, long unsigned int); typedef void (*btf_trace_set_migration_pte)(void *, long unsigned int, long unsigned int, int); typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, struct k_sigaction *); typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, struct task_struct *, int, int); typedef void (*btf_trace_sk_data_ready)(void *, const struct sock *); typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int); typedef void (*btf_trace_skip_task_reaping)(void *, int); typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, long int, int); typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *); typedef void (*btf_trace_sock_recv_length)(void *, struct sock *, int, int); typedef void (*btf_trace_sock_send_length)(void *, struct sock *, int, int); typedef void (*btf_trace_softirq_entry)(void *, unsigned int); typedef void (*btf_trace_softirq_exit)(void *, unsigned int); typedef void (*btf_trace_softirq_raise)(void *, unsigned int); typedef void (*btf_trace_spurious_apic_entry)(void *, int); typedef void (*btf_trace_spurious_apic_exit)(void *, int); typedef void (*btf_trace_start_task_reaping)(void *, int); typedef void (*btf_trace_subflow_check_data_avail)(void *, __u8, struct sk_buff *); typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool); typedef void (*btf_trace_swiotlb_bounced)(void *, struct device *, dma_addr_t, size_t); typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long int); typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long int); typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, long unsigned int); typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *); typedef void (*btf_trace_tasklet_entry)(void *, struct tasklet_struct *, void *); typedef void (*btf_trace_tasklet_exit)(void *, struct tasklet_struct *, void *); typedef void (*btf_trace_tcp_ao_handshake_failure)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); typedef void (*btf_trace_tcp_ao_key_not_found)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); typedef void (*btf_trace_tcp_ao_mismatch)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); typedef void (*btf_trace_tcp_ao_rcv_sne_update)(void *, const struct sock *, __u32); typedef void (*btf_trace_tcp_ao_rnext_request)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); typedef void (*btf_trace_tcp_ao_snd_sne_update)(void *, const struct sock *, __u32); typedef void (*btf_trace_tcp_ao_synack_no_key)(void *, const struct sock *, const __u8, const __u8); typedef void (*btf_trace_tcp_ao_wrong_maclen)(void *, const struct sock *, const struct sk_buff *, const __u8, const __u8, const __u8); typedef void (*btf_trace_tcp_bad_csum)(void *, const struct sk_buff *); typedef void (*btf_trace_tcp_cong_state_set)(void *, struct sock *, const u8); typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *); typedef void (*btf_trace_tcp_hash_ao_required)(void *, const struct sock *, const struct sk_buff *); typedef void (*btf_trace_tcp_hash_bad_header)(void *, const struct sock *, const struct sk_buff *); typedef void (*btf_trace_tcp_hash_md5_mismatch)(void *, const struct sock *, const struct sk_buff *); typedef void (*btf_trace_tcp_hash_md5_required)(void *, const struct sock *, const struct sk_buff *); typedef void (*btf_trace_tcp_hash_md5_unexpected)(void *, const struct sock *, const struct sk_buff *); typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *); typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *); typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *); typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, const struct sk_buff *); typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, const struct request_sock *); typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *, const enum sk_rst_reason); typedef void (*btf_trace_test_pages_isolated)(void *, long unsigned int, long unsigned int, long unsigned int); typedef void (*btf_trace_thermal_apic_entry)(void *, int); typedef void (*btf_trace_thermal_apic_exit)(void *, int); typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *); typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, enum thermal_trip_type); typedef void (*btf_trace_threshold_apic_entry)(void *, int); typedef void (*btf_trace_threshold_apic_exit)(void *, int); typedef void (*btf_trace_tick_stop)(void *, int, int); typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lease *); typedef void (*btf_trace_timer_base_idle)(void *, bool, unsigned int); typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *); typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, long unsigned int); typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *); typedef void (*btf_trace_timer_init)(void *, struct timer_list *); typedef void (*btf_trace_timer_start)(void *, struct timer_list *, long unsigned int); typedef void (*btf_trace_tlb_flush)(void *, int, long unsigned int); typedef void (*btf_trace_tls_device_decrypted)(void *, struct sock *, u32, u8 *, u32, bool, bool); typedef void (*btf_trace_tls_device_offload_set)(void *, struct sock *, int, u32, u8 *, int); typedef void (*btf_trace_tls_device_rx_resync_nh_delay)(void *, struct sock *, u32, u32); typedef void (*btf_trace_tls_device_rx_resync_nh_schedule)(void *, struct sock *); typedef void (*btf_trace_tls_device_rx_resync_send)(void *, struct sock *, u32, u8 *, int); typedef void (*btf_trace_tls_device_tx_resync_req)(void *, struct sock *, u32, u32); typedef void (*btf_trace_tls_device_tx_resync_send)(void *, struct sock *, u32, u8 *); typedef void (*btf_trace_tmigr_connect_child_parent)(void *, struct tmigr_group *); typedef void (*btf_trace_tmigr_connect_cpu_parent)(void *, struct tmigr_cpu *); typedef void (*btf_trace_tmigr_cpu_active)(void *, struct tmigr_cpu *); typedef void (*btf_trace_tmigr_cpu_idle)(void *, struct tmigr_cpu *, u64); typedef void (*btf_trace_tmigr_cpu_new_timer)(void *, struct tmigr_cpu *); typedef void (*btf_trace_tmigr_cpu_new_timer_idle)(void *, struct tmigr_cpu *, u64); typedef void (*btf_trace_tmigr_cpu_offline)(void *, struct tmigr_cpu *); typedef void (*btf_trace_tmigr_cpu_online)(void *, struct tmigr_cpu *); typedef void (*btf_trace_tmigr_group_set)(void *, struct tmigr_group *); typedef void (*btf_trace_tmigr_group_set_cpu_active)(void *, struct tmigr_group *, union tmigr_state, u32); typedef void (*btf_trace_tmigr_group_set_cpu_inactive)(void *, struct tmigr_group *, union tmigr_state, u32); typedef void (*btf_trace_tmigr_handle_remote)(void *, struct tmigr_group *); typedef void (*btf_trace_tmigr_handle_remote_cpu)(void *, struct tmigr_cpu *); typedef void (*btf_trace_tmigr_update_events)(void *, struct tmigr_group *, struct tmigr_group *, union tmigr_state, union tmigr_state, u64); typedef void (*btf_trace_track_foreign_dirty)(void *, struct folio *, struct bdi_writeback *); typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *, struct sk_buff *); typedef void (*btf_trace_unmap)(void *, long unsigned int, size_t, size_t); typedef void (*btf_trace_vector_activate)(void *, unsigned int, bool, bool, bool); typedef void (*btf_trace_vector_alloc)(void *, unsigned int, unsigned int, bool, int); typedef void (*btf_trace_vector_alloc_managed)(void *, unsigned int, unsigned int, int); typedef void (*btf_trace_vector_clear)(void *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int); typedef void (*btf_trace_vector_config)(void *, unsigned int, unsigned int, unsigned int, unsigned int); typedef void (*btf_trace_vector_deactivate)(void *, unsigned int, bool, bool, bool); typedef void (*btf_trace_vector_free_moved)(void *, unsigned int, unsigned int, unsigned int, bool); typedef void (*btf_trace_vector_reserve)(void *, unsigned int, int); typedef void (*btf_trace_vector_reserve_managed)(void *, unsigned int, int); typedef void (*btf_trace_vector_setup)(void *, unsigned int, bool, int); typedef void (*btf_trace_vector_teardown)(void *, unsigned int, bool, bool); typedef void (*btf_trace_vector_update)(void *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int); typedef void (*btf_trace_virtio_transport_alloc_pkt)(void *, __u32, __u32, __u32, __u32, __u32, __u16, __u16, __u32, bool); typedef void (*btf_trace_virtio_transport_recv_pkt)(void *, __u32, __u32, __u32, __u32, __u32, __u16, __u16, __u32, __u32, __u32); typedef void (*btf_trace_vm_unmapped_area)(void *, long unsigned int, struct vm_unmapped_area_info *); typedef void (*btf_trace_vma_mas_szero)(void *, struct maple_tree *, long unsigned int, long unsigned int); typedef void (*btf_trace_vma_store)(void *, struct maple_tree *, struct vm_area_struct *); typedef void (*btf_trace_wake_reaper)(void *, int); typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int); typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int); typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, struct backing_dev_info *); typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *); typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t); typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *); typedef void (*btf_trace_workqueue_queue_work)(void *, int, struct pool_workqueue *, struct work_struct *); typedef void (*btf_trace_write_msr)(void *, unsigned int, u64, int); typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *); typedef void (*btf_trace_writeback_dirty_folio)(void *, struct folio *, struct address_space *); typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int); typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *); typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int); typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, struct wb_writeback_work *); typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *); typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *); typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int); typedef void (*btf_trace_writeback_pages_written)(void *, long int); typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, struct wb_writeback_work *); typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, struct wb_writeback_work *, long unsigned int, int); typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *); typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, struct writeback_control *, long unsigned int); typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, struct writeback_control *, long unsigned int); typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, struct wb_writeback_work *); typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, struct wb_writeback_work *); typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *); typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, struct writeback_control *); typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, struct writeback_control *); typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, struct wb_writeback_work *); typedef void (*btf_trace_x86_fpu_after_restore)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_after_save)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_before_restore)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_before_save)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_copy_dst)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_copy_src)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_dropped)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_init_state)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_regs_activated)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_regs_deactivated)(void *, struct fpu *); typedef void (*btf_trace_x86_fpu_xstate_check_failed)(void *, struct fpu *); typedef void (*btf_trace_x86_platform_ipi_entry)(void *, int); typedef void (*btf_trace_x86_platform_ipi_exit)(void *, int); typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int); typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int); typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int, struct xdp_cpumap_stats *); typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, const struct net_device *, int, int, int); typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, const struct bpf_prog *, u32); typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); typedef bool (*check_reserved_t)(u64, u64, enum e820_type); typedef void cleanup_cb_t(struct rq_wait *, void *); typedef int (*cmp_r_func_t)(const void *, const void *, const void *); typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *, void *); typedef void (*codel_skb_drop_t)(struct sk_buff *, void *); typedef u32 (*codel_skb_len_t)(const struct sk_buff *); typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *); typedef bool (*cond_update_fn_t)(struct trace_array *, void *); typedef int (*cppc_mode_transition_fn)(int); typedef void * (*devcon_match_fn_t)(const struct fwnode_handle *, const char *, void *); typedef int (*device_match_t)(struct device *, const void *); typedef int devlink_chunk_fill_t(void *, u8 *, u32, u64, struct netlink_ext_ack *); typedef int devlink_nl_dump_one_func_t(struct sk_buff *, struct devlink *, struct netlink_callback *, int); typedef int (*dr_match_t)(struct device *, void *, void *); typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *, ...); typedef int (*dynevent_check_arg_fn_t)(void *); typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool); typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *); typedef void (*exitcall_t)(void); typedef int (*ext4_mballoc_query_range_fn)(struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t, void *); typedef void ext4_update_sb_callback(struct ext4_super_block *, const void *); typedef int filler_t(struct file *, struct folio *); typedef bool (*filter_func_t)(struct uprobe_consumer *, struct mm_struct *); typedef void fn_handler_fn(struct vc_data *); typedef void free_folio_t(struct folio *, long unsigned int); typedef int (*ftrace_mapper_func)(void *); typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *); typedef bool (*hid_usage_cmp_t)(struct hid_usage *, unsigned int, unsigned int); typedef u32 inet6_ehashfn_t(const struct net *, const struct in6_addr *, const u16, const struct in6_addr *, const __be16); typedef u32 inet_ehashfn_t(const struct net *, const __be32, const __u16, const __be32, const __be16); typedef int (*initxattrs)(struct inode *, const struct xattr *, void *); typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); typedef void (*iomap_punch_t)(struct inode *, loff_t, loff_t, struct iomap *); typedef size_t (*iov_step_f)(void *, size_t, size_t, void *, void *); typedef size_t (*iov_ustep_f)(void *, size_t, size_t, void *, void *); typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *, const struct inet6_skb_parm *); typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); typedef void k_handler_fn(struct vc_data *, unsigned char, char); typedef int (*list_cmp_func_t)(void *, const struct list_head *, const struct list_head *); typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, spinlock_t *, void *); typedef int mh_filter_t(struct sock *, struct sk_buff *); typedef void (*move_fn_t)(struct lruvec *, struct folio *); typedef int (*netlink_filter_fn)(struct sock *, struct sk_buff *, void *); typedef struct folio *new_folio_t(struct folio *, long unsigned int); typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, long unsigned int); typedef void (*nmi_shootdown_cb)(int, struct pt_regs *); typedef struct ns_common *ns_get_path_helper_t(void *); typedef int (*objpool_init_obj_cb)(void *, void *); typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **); typedef int (*parse_unknown_fn)(char *, char *, const char *, void *); typedef int (*pcie_callback_t)(struct pcie_device *); typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); typedef int pcpu_fc_cpu_to_node_fn_t(int); typedef void perf_iterate_f(struct perf_event *, void *); typedef int perf_snapshot_branch_stack_t(struct perf_branch_entry *, unsigned int); typedef int (*pm_callback_t)(struct device *); typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *); typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); typedef int (*pp_nl_fill_cb)(struct sk_buff *, const struct page_pool *, const struct genl_info *); typedef int (*proc_visitor)(struct task_struct *, void *); typedef int (*pte_fn_t)(pte_t *, long unsigned int, void *); typedef void (*rethook_handler_t)(struct rethook_node *, void *, long unsigned int, struct pt_regs *); typedef bool (*ring_buffer_cond_fn)(void *); typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); typedef int (*sendmsg_func)(struct sock *, struct msghdr *); typedef void (*serial8250_isa_config_fn)(int, struct uart_port *, u32 *); typedef int (*set_callee_state_fn)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *, int); typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); typedef void sg_free_fn(struct scatterlist *, unsigned int); typedef void sha1_block_fn(struct sha1_state *, const u8 *, int); typedef void sha256_block_fn(struct sha256_state *, const u8 *, int); typedef void sha512_block_fn(struct sha512_state *, const u8 *, int); typedef bool (*smp_cond_func_t)(int, void *); typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); typedef bool (*stack_trace_consume_fn)(void *, long unsigned int); typedef void (*swap_r_func_t)(void *, void *, int, const void *); typedef long int (*sys_call_ptr_t)(const struct pt_regs *); typedef int (*task_call_f)(struct task_struct *, void *); typedef void (*task_work_func_t)(struct callback_head *); typedef void text_poke_f(void *, const void *, size_t); typedef int (*tg_visitor)(struct task_group *, void *); typedef struct sock * (*udp_lookup_t)(const struct sk_buff *, __be16, __be16); typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *); typedef int wait_bit_action_f(struct wait_bit_key *, int); typedef int (*writepage_t)(struct folio *, struct writeback_control *, void *); struct net_bridge; struct nf_bridge_frag_data; struct bpf_iter; struct creds; struct fscrypt_inode_info; /* BPF kfuncs */ #ifndef BPF_NO_KFUNC_PROTOTYPES extern void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) __weak __ksym; extern void cgroup_rstat_flush(struct cgroup *cgrp) __weak __ksym; extern struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) __weak __ksym; extern struct bpf_key *bpf_lookup_system_key(u64 id) __weak __ksym; extern void bpf_key_put(struct bpf_key *bkey) __weak __ksym; extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p, struct bpf_dynptr *sig_p, struct bpf_key *trusted_keyring) __weak __ksym; extern bool bpf_session_is_return(void) __weak __ksym; extern __u64 *bpf_session_cookie(void) __weak __ksym; extern void crash_kexec(struct pt_regs *regs) __weak __ksym; extern void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) __weak __ksym; extern void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) __weak __ksym; extern void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) __weak __ksym; extern void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) __weak __ksym; extern void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) __weak __ksym; extern int bpf_list_push_front_impl(struct bpf_list_head *head, struct bpf_list_node *node, void *meta__ign, u64 off) __weak __ksym; extern int bpf_list_push_back_impl(struct bpf_list_head *head, struct bpf_list_node *node, void *meta__ign, u64 off) __weak __ksym; extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __weak __ksym; extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __weak __ksym; extern struct task_struct *bpf_task_acquire(struct task_struct *p) __weak __ksym; extern void bpf_task_release(struct task_struct *p) __weak __ksym; extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, struct bpf_rb_node *node) __weak __ksym; extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, bool (*less)(struct bpf_rb_node *, const struct bpf_rb_node *), void *meta__ign, u64 off) __weak __ksym; extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __weak __ksym; extern struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) __weak __ksym; extern void bpf_cgroup_release(struct cgroup *cgrp) __weak __ksym; extern struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __weak __ksym; extern struct cgroup *bpf_cgroup_from_id(u64 cgid) __weak __ksym; extern long int bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __weak __ksym; extern struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __weak __ksym; extern struct task_struct *bpf_task_from_pid(s32 pid) __weak __ksym; extern void bpf_throw(u64 cookie) __weak __ksym; extern void *bpf_cast_to_kern_ctx(void *obj) __weak __ksym; extern void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k) __weak __ksym; extern void bpf_rcu_read_lock(void) __weak __ksym; extern void bpf_rcu_read_unlock(void) __weak __ksym; extern void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, void *buffer__opt, u32 buffer__szk) __weak __ksym; extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, void *buffer__opt, u32 buffer__szk) __weak __ksym; extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym; extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym; extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym; extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, struct task_struct *task, u64 addr) __weak __ksym; extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __weak __ksym; extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __weak __ksym; extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it, struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym; extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym; extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym; extern int bpf_iter_css_new(struct bpf_iter_css *it, struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym; extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym; extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; extern int bpf_iter_task_new(struct bpf_iter_task *it, struct task_struct *task__nullable, unsigned int flags) __weak __ksym; extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym; extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym; extern int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) __weak __ksym; extern bool bpf_dynptr_is_null(const struct bpf_dynptr *p) __weak __ksym; extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p) __weak __ksym; extern __u32 bpf_dynptr_size(const struct bpf_dynptr *p) __weak __ksym; extern int bpf_dynptr_clone(const struct bpf_dynptr *p, struct bpf_dynptr *clone__uninit) __weak __ksym; extern int bpf_modify_return_test_tp(int nonce) __weak __ksym; extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym; extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, int (*callback_fn)(void *, int *, void *), unsigned int flags, void *aux__ign) __weak __ksym; extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym; extern void bpf_preempt_disable(void) __weak __ksym; extern void bpf_preempt_enable(void) __weak __ksym; extern int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __weak __ksym; extern int *bpf_iter_bits_next(struct bpf_iter_bits *it) __weak __ksym; extern void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __weak __ksym; extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym; extern s64 bpf_map_sum_elem_count(const struct bpf_map *map) __weak __ksym; extern void *bpf_arena_alloc_pages(void *p__map, void *addr__ign, u32 page_cnt, int node_id, u64 flags) __weak __ksym; extern void bpf_arena_free_pages(void *p__map, void *ptr__ign, u32 page_cnt) __weak __ksym; extern struct bpf_cpumask *bpf_cpumask_create(void) __weak __ksym; extern void bpf_cpumask_release(struct bpf_cpumask *cpumask) __weak __ksym; extern struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __weak __ksym; extern u32 bpf_cpumask_first(const struct cpumask *cpumask) __weak __ksym; extern u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __weak __ksym; extern u32 bpf_cpumask_first_and(const struct cpumask *src1, const struct cpumask *src2) __weak __ksym; extern void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __weak __ksym; extern void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __weak __ksym; extern bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __weak __ksym; extern bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __weak __ksym; extern bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __weak __ksym; extern void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __weak __ksym; extern void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __weak __ksym; extern bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1, const struct cpumask *src2) __weak __ksym; extern void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1, const struct cpumask *src2) __weak __ksym; extern void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1, const struct cpumask *src2) __weak __ksym; extern bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __weak __ksym; extern bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __weak __ksym; extern bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __weak __ksym; extern bool bpf_cpumask_empty(const struct cpumask *cpumask) __weak __ksym; extern bool bpf_cpumask_full(const struct cpumask *cpumask) __weak __ksym; extern void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __weak __ksym; extern u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __weak __ksym; extern u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __weak __ksym; extern u32 bpf_cpumask_weight(const struct cpumask *cpumask) __weak __ksym; extern struct bpf_crypto_ctx *bpf_crypto_ctx_create(const struct bpf_crypto_params *params, u32 params__sz, int *err) __weak __ksym; extern void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx) __weak __ksym; extern struct bpf_crypto_ctx *bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx) __weak __ksym; extern int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src, const struct bpf_dynptr *dst, const struct bpf_dynptr *siv__nullable) __weak __ksym; extern int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src, const struct bpf_dynptr *dst, const struct bpf_dynptr *siv__nullable) __weak __ksym; extern int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr *digest_p) __weak __ksym; extern struct file *bpf_get_task_exe_file(struct task_struct *task) __weak __ksym; extern void bpf_put_file(struct file *file) __weak __ksym; extern int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz) __weak __ksym; extern int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__str, struct bpf_dynptr *value_p) __weak __ksym; extern int bpf_get_file_xattr(struct file *file, const char *name__str, struct bpf_dynptr *value_p) __weak __ksym; extern int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags, struct bpf_dynptr *ptr__uninit) __weak __ksym; extern int bpf_dynptr_from_xdp(struct xdp_md *x, u64 flags, struct bpf_dynptr *ptr__uninit) __weak __ksym; extern int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern, const u8 *sun_path, u32 sun_path__sz) __weak __ksym; extern int bpf_sk_assign_tcp_reqsk(struct __sk_buff *s, struct sock *sk, struct bpf_tcp_req_attrs *attrs, int attrs__sz) __weak __ksym; extern int bpf_sock_destroy(struct sock_common *sock) __weak __ksym; extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) __weak __ksym; extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, enum xdp_rss_hash_type *rss_type) __weak __ksym; extern int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, u16 *vlan_tci) __weak __ksym; extern int bpf_modify_return_test(int a, int *b) __weak __ksym; extern int bpf_modify_return_test2(int a, int *b, short int c, int d, void *e, char f, int g) __weak __ksym; extern int bpf_fentry_test1(int a) __weak __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __weak __ksym; extern void bpf_kfunc_call_memb_release(struct prog_test_member *p) __weak __ksym; extern struct nf_conn___init *bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple, u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz) __weak __ksym; extern struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple, u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz) __weak __ksym; extern struct nf_conn___init *bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple, u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz) __weak __ksym; extern struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple, u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz) __weak __ksym; extern struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i) __weak __ksym; extern void bpf_ct_release(struct nf_conn *nfct) __weak __ksym; extern void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout) __weak __ksym; extern int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout) __weak __ksym; extern int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status) __weak __ksym; extern int bpf_ct_change_status(struct nf_conn *nfct, u32 status) __weak __ksym; extern int bpf_ct_set_nat_info(struct nf_conn___init *nfct, union nf_inet_addr *addr, int port, enum nf_nat_manip_type manip) __weak __ksym; extern struct flow_offload_tuple_rhash *bpf_xdp_flow_lookup(struct xdp_md *ctx, struct bpf_fib_lookup *fib_tuple, struct bpf_flowtable_opts *opts, u32 opts_len) __weak __ksym; extern int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx, struct bpf_fou_encap *encap, int type) __weak __ksym; extern int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx, struct bpf_fou_encap *encap) __weak __ksym; extern void bbr_init(struct sock *sk) __weak __ksym; extern void bbr_main(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs) __weak __ksym; extern u32 bbr_sndbuf_expand(struct sock *sk) __weak __ksym; extern u32 bbr_undo_cwnd(struct sock *sk) __weak __ksym; extern void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) __weak __ksym; extern u32 bbr_ssthresh(struct sock *sk) __weak __ksym; extern u32 bbr_min_tso_segs(struct sock *sk) __weak __ksym; extern void bbr_set_state(struct sock *sk, u8 new_state) __weak __ksym; extern void cubictcp_init(struct sock *sk) __weak __ksym; extern u32 cubictcp_recalc_ssthresh(struct sock *sk) __weak __ksym; extern void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) __weak __ksym; extern void cubictcp_state(struct sock *sk, u8 new_state) __weak __ksym; extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __weak __ksym; extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __weak __ksym; extern void dctcp_init(struct sock *sk) __weak __ksym; extern void dctcp_update_alpha(struct sock *sk, u32 flags) __weak __ksym; extern void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) __weak __ksym; extern u32 dctcp_ssthresh(struct sock *sk) __weak __ksym; extern u32 dctcp_cwnd_undo(struct sock *sk) __weak __ksym; extern void dctcp_state(struct sock *sk, u8 new_state) __weak __ksym; extern u32 tcp_reno_ssthresh(struct sock *sk) __weak __ksym; extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) __weak __ksym; extern u32 tcp_reno_undo_cwnd(struct sock *sk) __weak __ksym; extern u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) __weak __ksym; extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) __weak __ksym; extern int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to) __weak __ksym; extern int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bpf_xfrm_info *from) __weak __ksym; extern struct xfrm_state *bpf_xdp_get_xfrm_state(struct xdp_md *ctx, struct bpf_xfrm_state_opts *opts, u32 opts__sz) __weak __ksym; extern void bpf_xdp_xfrm_state_release(struct xfrm_state *x) __weak __ksym; #endif #ifndef BPF_NO_PRESERVE_ACCESS_INDEX #pragma clang attribute pop #endif #endif /* __VMLINUX_H__ */ xdp-tools-1.5.4/lib/libbpf/.github/actions/build-selftests/action.yml0000644000175100001660000000205314706536574025153 0ustar runnerdockername: 'build-selftests' description: 'Build BPF selftests' inputs: repo-path: description: 'where is the source code' required: true kernel: description: 'kernel version or LATEST' required: true default: 'LATEST' vmlinux: description: 'where is vmlinux file' required: true default: '${{ github.workspace }}/vmlinux' llvm-version: description: 'llvm version' required: true runs: using: "composite" steps: - shell: bash run: | source $GITHUB_ACTION_PATH/../../../ci/vmtest/helpers.sh foldable start "Setup Env" sudo apt-get install -y qemu-kvm zstd binutils-dev elfutils libcap-dev libelf-dev libdw-dev python3-docutils foldable end - shell: bash run: | export KERNEL=${{ inputs.kernel }} export REPO_ROOT="${{ github.workspace }}" export REPO_PATH="${{ inputs.repo-path }}" export VMLINUX_BTF="${{ inputs.vmlinux }}" export LLVM_VERSION="${{ inputs.llvm-version }}" ${{ github.action_path }}/build_selftests.sh xdp-tools-1.5.4/lib/libbpf/.github/actions/build-selftests/prepare_selftests-4.9.0.sh0000755000175100001660000000025014706536574027707 0ustar runnerdocker#!/bin/bash printf "all:\n\ttouch bpf_testmod.ko\n\nclean:\n" > bpf_testmod/Makefile printf "all:\n\ttouch bpf_test_no_cfi.ko\n\nclean:\n" > bpf_test_no_cfi/Makefile xdp-tools-1.5.4/lib/libbpf/.github/actions/build-selftests/build_selftests.sh0000755000175100001660000000263514706536574026713 0ustar runnerdocker#!/bin/bash set -euo pipefail THISDIR="$(cd $(dirname $0) && pwd)" source ${THISDIR}/helpers.sh foldable start prepare_selftests "Building selftests" LIBBPF_PATH="${REPO_ROOT}" llvm_latest_version() { echo "19" } if [[ "${LLVM_VERSION}" == $(llvm_latest_version) ]]; then REPO_DISTRO_SUFFIX="" else REPO_DISTRO_SUFFIX="-${LLVM_VERSION}" fi DISTRIB_CODENAME="noble" test -f /etc/lsb-release && . /etc/lsb-release echo "${DISTRIB_CODENAME}" echo "deb https://apt.llvm.org/${DISTRIB_CODENAME}/ llvm-toolchain-${DISTRIB_CODENAME}${REPO_DISTRO_SUFFIX} main" \ | sudo tee /etc/apt/sources.list.d/llvm.list PREPARE_SELFTESTS_SCRIPT=${THISDIR}/prepare_selftests-${KERNEL}.sh if [ -f "${PREPARE_SELFTESTS_SCRIPT}" ]; then (cd "${REPO_ROOT}/${REPO_PATH}/tools/testing/selftests/bpf" && ${PREPARE_SELFTESTS_SCRIPT}) fi if [[ "${KERNEL}" = 'LATEST' ]]; then VMLINUX_H= else VMLINUX_H=${THISDIR}/vmlinux.h fi cd ${REPO_ROOT}/${REPO_PATH} make headers make \ CLANG=clang-${LLVM_VERSION} \ LLC=llc-${LLVM_VERSION} \ LLVM_STRIP=llvm-strip-${LLVM_VERSION} \ VMLINUX_BTF="${VMLINUX_BTF}" \ VMLINUX_H=${VMLINUX_H} \ -C "${REPO_ROOT}/${REPO_PATH}/tools/testing/selftests/bpf" \ -j $((4*$(nproc))) > /dev/null cd - mkdir ${LIBBPF_PATH}/selftests cp -R "${REPO_ROOT}/${REPO_PATH}/tools/testing/selftests/bpf" \ ${LIBBPF_PATH}/selftests cd ${LIBBPF_PATH} rm selftests/bpf/.gitignore git add selftests foldable end prepare_selftests xdp-tools-1.5.4/lib/libbpf/.github/actions/build-selftests/helpers.sh0000644000175100001660000000115014706536574025146 0ustar runnerdocker# shellcheck shell=bash # $1 - start or end # $2 - fold identifier, no spaces # $3 - fold section description foldable() { local YELLOW='\033[1;33m' local NOCOLOR='\033[0m' if [ $1 = "start" ]; then line="::group::$2" if [ ! -z "${3:-}" ]; then line="$line - ${YELLOW}$3${NOCOLOR}" fi else line="::endgroup::" fi echo -e "$line" } __print() { local TITLE="" if [[ -n $2 ]]; then TITLE=" title=$2" fi echo "::$1${TITLE}::$3" } # $1 - title # $2 - message print_error() { __print error $1 $2 } # $1 - title # $2 - message print_notice() { __print notice $1 $2 } xdp-tools-1.5.4/lib/libbpf/.github/actions/build-selftests/prepare_selftests-5.5.0.sh0000755000175100001660000000025014706536574027704 0ustar runnerdocker#!/bin/bash printf "all:\n\ttouch bpf_testmod.ko\n\nclean:\n" > bpf_testmod/Makefile printf "all:\n\ttouch bpf_test_no_cfi.ko\n\nclean:\n" > bpf_test_no_cfi/Makefile xdp-tools-1.5.4/lib/libbpf/.github/actions/vmtest/0000755000175100001660000000000014706536574021364 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/.github/actions/vmtest/action.yml0000644000175100001660000000775314706536574023400 0ustar runnerdockername: 'vmtest' description: 'Build + run vmtest' inputs: kernel: description: 'kernel version or LATEST' required: true default: 'LATEST' arch: description: 'what arch to test' required: true default: 'x86_64' pahole: description: 'pahole rev or master' required: true default: 'master' llvm-version: description: 'llvm version' required: false default: '17' runs: using: "composite" steps: # Allow CI user to access /dev/kvm (via qemu) w/o group change/relogin # by changing permissions set by udev. - name: Set /dev/kvm permissions shell: bash run: | if [ -e /dev/kvm ]; then echo "/dev/kvm exists" if [ $(id -u) != 0 ]; then echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' \ | sudo tee /etc/udev/rules.d/99-kvm4all.rules > /dev/null sudo udevadm control --reload-rules sudo udevadm trigger --name-match=kvm fi else echo "/dev/kvm does not exist" fi # setup environment - name: Setup environment uses: libbpf/ci/setup-build-env@main with: pahole: ${{ inputs.pahole }} arch: ${{ inputs.arch }} llvm-version: ${{ inputs.llvm-version }} # 1. download CHECKPOINT kernel source - name: Get checkpoint commit shell: bash run: | cat CHECKPOINT-COMMIT echo "CHECKPOINT=$(cat CHECKPOINT-COMMIT)" >> $GITHUB_ENV - name: Get kernel source at checkpoint uses: libbpf/ci/get-linux-source@main with: repo: 'https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git' rev: ${{ env.CHECKPOINT }} dest: '${{ github.workspace }}/.kernel' - name: Patch kernel source uses: libbpf/ci/patch-kernel@main with: patches-root: '${{ github.workspace }}/ci/diffs' repo-root: '.kernel' - name: Prepare to build BPF selftests shell: bash run: | source $GITHUB_ACTION_PATH/../../../ci/vmtest/helpers.sh foldable start "Prepare building selftest" cd .kernel cat tools/testing/selftests/bpf/config \ tools/testing/selftests/bpf/config.${{ inputs.arch }} > .config # this file might or mihgt not exist depending on kernel version cat tools/testing/selftests/bpf/config.vm >> .config || : make olddefconfig && make prepare cd - foldable end # 2. if kernel == LATEST, build kernel image from tree - name: Build kernel image if: ${{ inputs.kernel == 'LATEST' }} shell: bash run: | source $GITHUB_ACTION_PATH/../../../ci/vmtest/helpers.sh foldable start "Build Kernel Image" cd .kernel make -j $((4*$(nproc))) all > /dev/null cp vmlinux ${{ github.workspace }} cd - foldable end # else, just download prebuilt kernel image - name: Download prebuilt kernel if: ${{ inputs.kernel != 'LATEST' }} uses: libbpf/ci/download-vmlinux@main with: kernel: ${{ inputs.kernel }} arch: ${{ inputs.arch }} # 3. build selftests - name: Build BPF selftests uses: ./.github/actions/build-selftests with: repo-path: '.kernel' kernel: ${{ inputs.kernel }} llvm-version: ${{ inputs.llvm-version }} # 4. prepare rootfs - name: prepare rootfs uses: libbpf/ci/prepare-rootfs@main env: KBUILD_OUTPUT: '.kernel' with: project-name: 'libbpf' arch: ${{ inputs.arch }} kernel: ${{ inputs.kernel }} kernel-root: '.kernel' kbuild-output: ${{ env.KBUILD_OUTPUT }} image-output: '/tmp/root.img' # 5. run selftest in QEMU - name: Run selftests env: KERNEL: ${{ inputs.kernel }} REPO_ROOT: ${{ github.workspace }} uses: libbpf/ci/run-qemu@main with: arch: ${{ inputs.arch }} img: '/tmp/root.img' vmlinuz: 'vmlinuz' kernel-root: '.kernel' xdp-tools-1.5.4/lib/libbpf/.github/actions/debian/0000755000175100001660000000000014706536574021264 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/.github/actions/debian/action.yml0000644000175100001660000000055714706536574023273 0ustar runnerdockername: 'debian' description: 'Build' inputs: target: description: 'Run target' required: true runs: using: "composite" steps: - run: | source /tmp/ci_setup bash -x $CI_ROOT/managers/debian.sh SETUP bash -x $CI_ROOT/managers/debian.sh ${{ inputs.target }} bash -x $CI_ROOT/managers/debian.sh CLEANUP shell: bash xdp-tools-1.5.4/lib/libbpf/.github/actions/setup/0000755000175100001660000000000014706536574021202 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/.github/actions/setup/action.yml0000644000175100001660000000202714706536574023203 0ustar runnerdockername: 'setup' description: 'setup env, create /tmp/ci_setup' runs: using: "composite" steps: - id: variables run: | export REPO_ROOT=$GITHUB_WORKSPACE export CI_ROOT=$REPO_ROOT/ci # this is somewhat ugly, but that is the easiest way to share this code with # arch specific docker echo 'echo ::group::Env setup' > /tmp/ci_setup echo export DEBIAN_FRONTEND=noninteractive >> /tmp/ci_setup echo sudo apt-get update >> /tmp/ci_setup echo sudo apt-get install -y aptitude qemu-kvm zstd binutils-dev elfutils libcap-dev libelf-dev libdw-dev libguestfs-tools >> /tmp/ci_setup echo export PROJECT_NAME='libbpf' >> /tmp/ci_setup echo export AUTHOR_EMAIL="$(git log -1 --pretty=\"%aE\")" >> /tmp/ci_setup echo export REPO_ROOT=$GITHUB_WORKSPACE >> /tmp/ci_setup echo export CI_ROOT=$REPO_ROOT/ci >> /tmp/ci_setup echo export VMTEST_ROOT=$CI_ROOT/vmtest >> /tmp/ci_setup echo 'echo ::endgroup::' >> /tmp/ci_setup shell: bash xdp-tools-1.5.4/lib/libbpf/.github/workflows/0000755000175100001660000000000014706536574020437 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/.github/workflows/lint.yml0000644000175100001660000000054014706536574022127 0ustar runnerdockername: "lint" on: pull_request: push: branches: - master jobs: shellcheck: name: ShellCheck runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Run ShellCheck uses: ludeeus/action-shellcheck@master env: SHELLCHECK_OPTS: --severity=error xdp-tools-1.5.4/lib/libbpf/.github/workflows/build.yml0000644000175100001660000000453014706536574022263 0ustar runnerdockername: libbpf-build on: pull_request: push: schedule: - cron: '0 18 * * *' concurrency: group: ci-build-${{ github.head_ref }} cancel-in-progress: true jobs: debian: runs-on: ubuntu-latest name: Debian Build (${{ matrix.name }}) strategy: fail-fast: false matrix: include: - name: default target: RUN - name: ASan+UBSan target: RUN_ASAN - name: clang ASan+UBSan target: RUN_CLANG_ASAN - name: gcc-10 ASan+UBSan target: RUN_GCC10_ASAN - name: clang target: RUN_CLANG - name: clang-14 target: RUN_CLANG14 - name: clang-15 target: RUN_CLANG15 - name: clang-16 target: RUN_CLANG16 - name: gcc-10 target: RUN_GCC10 - name: gcc-11 target: RUN_GCC11 - name: gcc-12 target: RUN_GCC12 steps: - uses: actions/checkout@v4 name: Checkout - uses: ./.github/actions/setup name: Setup - uses: ./.github/actions/debian name: Build with: target: ${{ matrix.target }} ubuntu: runs-on: ubuntu-latest name: Ubuntu Build (${{ matrix.arch }}) strategy: fail-fast: false matrix: include: - arch: aarch64 - arch: ppc64le - arch: s390x - arch: x86 steps: - uses: actions/checkout@v4 name: Checkout - uses: ./.github/actions/setup name: Pre-Setup - run: source /tmp/ci_setup && sudo -E $CI_ROOT/managers/ubuntu.sh if: matrix.arch == 'x86' name: Setup - uses: uraimo/run-on-arch-action@v2.8.1 name: Build in docker if: matrix.arch != 'x86' with: distro: ubuntu22.04 arch: ${{ matrix.arch }} setup: cp /tmp/ci_setup $GITHUB_WORKSPACE dockerRunArgs: | --volume "${GITHUB_WORKSPACE}:${GITHUB_WORKSPACE}" shell: /bin/bash install: | export DEBIAN_FRONTEND=noninteractive export TZ="America/Los_Angeles" apt-get update -y apt-get install -y tzdata build-essential sudo run: source ${GITHUB_WORKSPACE}/ci_setup && $CI_ROOT/managers/ubuntu.sh xdp-tools-1.5.4/lib/libbpf/.github/workflows/pahole.yml0000644000175100001660000000061014706536574022427 0ustar runnerdockername: pahole-staging on: workflow_dispatch: schedule: - cron: '0 18 * * *' jobs: vmtest: runs-on: ubuntu-20.04 name: Kernel LATEST + staging pahole env: STAGING: tmp.master steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup - uses: ./.github/actions/vmtest with: kernel: LATEST pahole: $STAGING xdp-tools-1.5.4/lib/libbpf/.github/workflows/codeql.yml0000644000175100001660000000176014706536574022435 0ustar runnerdocker--- # vi: ts=2 sw=2 et: name: "CodeQL" on: push: branches: - master pull_request: branches: - master permissions: contents: read jobs: analyze: name: Analyze runs-on: ubuntu-latest concurrency: group: ${{ github.workflow }}-${{ matrix.language }}-${{ github.ref }} cancel-in-progress: true permissions: actions: read security-events: write strategy: fail-fast: false matrix: language: ['cpp', 'python'] steps: - name: Checkout repository uses: actions/checkout@v4 - name: Initialize CodeQL uses: github/codeql-action/init@v2 with: languages: ${{ matrix.language }} queries: +security-extended,security-and-quality - name: Setup uses: ./.github/actions/setup - name: Build run: | source /tmp/ci_setup make -C ./src - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v2 xdp-tools-1.5.4/lib/libbpf/.github/workflows/cifuzz.yml0000644000175100001660000000225114706536574022474 0ustar runnerdocker--- # https://google.github.io/oss-fuzz/getting-started/continuous-integration/ name: CIFuzz on: push: branches: - master pull_request: branches: - master jobs: Fuzzing: runs-on: ubuntu-latest if: github.repository == 'libbpf/libbpf' strategy: fail-fast: false matrix: sanitizer: [address, undefined, memory] steps: - name: Build Fuzzers (${{ matrix.sanitizer }}) id: build uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master with: oss-fuzz-project-name: 'libbpf' dry-run: false allowed-broken-targets-percentage: 0 sanitizer: ${{ matrix.sanitizer }} - name: Run Fuzzers (${{ matrix.sanitizer }}) uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master with: oss-fuzz-project-name: 'libbpf' fuzz-seconds: 300 dry-run: false sanitizer: ${{ matrix.sanitizer }} - name: Upload Crash uses: actions/upload-artifact@v4 if: failure() && steps.build.outcome == 'success' with: name: ${{ matrix.sanitizer }}-artifacts path: ./out/artifacts xdp-tools-1.5.4/lib/libbpf/.github/workflows/ondemand.yml0000644000175100001660000000215314706536574022750 0ustar runnerdockername: ondemand on: workflow_dispatch: inputs: kernel-origin: description: 'git repo for linux kernel' default: 'https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git' required: true kernel-rev: description: 'rev/tag/branch for linux kernel' default: "master" required: true pahole-origin: description: 'git repo for pahole' default: 'https://git.kernel.org/pub/scm/devel/pahole/pahole.git' required: true pahole-rev: description: 'ref/tag/branch for pahole' default: "master" required: true jobs: vmtest: runs-on: ubuntu-latest name: vmtest with customized pahole/Kernel steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup - uses: ./.github/actions/vmtest with: kernel: 'LATEST' kernel-rev: ${{ github.event.inputs.kernel-rev }} kernel-origin: ${{ github.event.inputs.kernel-origin }} pahole: ${{ github.event.inputs.pahole-rev }} pahole-origin: ${{ github.event.inputs.pahole-origin }} xdp-tools-1.5.4/lib/libbpf/.github/workflows/test.yml0000644000175100001660000000160414706536574022142 0ustar runnerdockername: libbpf-ci on: pull_request: push: schedule: - cron: '0 18 * * *' concurrency: group: ci-test-${{ github.head_ref }} cancel-in-progress: true jobs: vmtest: runs-on: ${{ matrix.runs_on }} name: Kernel ${{ matrix.kernel }} on ${{ matrix.arch }} + selftests strategy: fail-fast: false matrix: include: - kernel: 'LATEST' runs_on: ubuntu-24.04 arch: 'x86_64' - kernel: '5.5.0' runs_on: ubuntu-24.04 arch: 'x86_64' - kernel: '4.9.0' runs_on: ubuntu-24.04 arch: 'x86_64' steps: - uses: actions/checkout@v4 name: Checkout - uses: ./.github/actions/setup name: Setup - uses: ./.github/actions/vmtest name: vmtest with: kernel: ${{ matrix.kernel }} arch: ${{ matrix.arch }} xdp-tools-1.5.4/lib/libbpf/.github/workflows/coverity.yml0000644000175100001660000000176214706536574023034 0ustar runnerdockername: libbpf-ci-coverity on: schedule: - cron: '0 18 * * *' jobs: coverity: runs-on: ubuntu-latest if: github.repository == 'libbpf/libbpf' name: Coverity steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup - name: Run coverity run: | source "${GITHUB_WORKSPACE}"/ci/vmtest/helpers.sh foldable start "Setup CI env" source /tmp/ci_setup export COVERITY_SCAN_NOTIFICATION_EMAIL="${AUTHOR_EMAIL}" export COVERITY_SCAN_BRANCH_PATTERN=${GITHUB_REF##refs/*/} export TRAVIS_BRANCH=${COVERITY_SCAN_BRANCH_PATTERN} foldable end scripts/coverity.sh env: COVERITY_SCAN_TOKEN: ${{ secrets.COVERITY_SCAN_TOKEN }} COVERITY_SCAN_PROJECT_NAME: libbpf COVERITY_SCAN_BUILD_COMMAND_PREPEND: 'cd src/' COVERITY_SCAN_BUILD_COMMAND: 'make' - name: SCM log run: cat /home/runner/work/libbpf/libbpf/src/cov-int/scm_log.txt xdp-tools-1.5.4/lib/libbpf/.mailmap0000644000175100001660000000235514706536574016470 0ustar runnerdockerAlexei Starovoitov Antoine Tenart Benjamin Tissoires Björn Töpel Changbin Du Colin Ian King Dan Carpenter Geliang Tang Herbert Xu Jakub Kicinski Kees Cook Leo Yan Mark Starovoytov Maxim Mikityanskiy Maxim Mikityanskiy Puranjay Mohan Quentin Monnet Quentin Monnet Stanislav Fomichev Vadim Fedorenko Vadim Fedorenko xdp-tools-1.5.4/lib/libbpf/LICENSE0000644000175100001660000000003114706536574016041 0ustar runnerdockerLGPL-2.1 OR BSD-2-Clause xdp-tools-1.5.4/lib/libbpf/LICENSE.BSD-2-Clause0000644000175100001660000000316114706536574020030 0ustar runnerdockerValid-License-Identifier: BSD-2-Clause SPDX-URL: https://spdx.org/licenses/BSD-2-Clause.html Usage-Guide: To use the BSD 2-clause "Simplified" License put the following SPDX tag/value pair into a comment according to the placement guidelines in the licensing rules documentation: SPDX-License-Identifier: BSD-2-Clause License-Text: Copyright (c) 2015 The Libbpf Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xdp-tools-1.5.4/lib/libbpf/LICENSE.LGPL-2.10000644000175100001660000006542514706536574017116 0ustar runnerdockerValid-License-Identifier: LGPL-2.1 Valid-License-Identifier: LGPL-2.1+ SPDX-URL: https://spdx.org/licenses/LGPL-2.1.html Usage-Guide: To use this license in source code, put one of the following SPDX tag/value pairs into a comment according to the placement guidelines in the licensing rules documentation. For 'GNU Lesser General Public License (LGPL) version 2.1 only' use: SPDX-License-Identifier: LGPL-2.1 For 'GNU Lesser General Public License (LGPL) version 2.1 or any later version' use: SPDX-License-Identifier: LGPL-2.1+ License-Text: GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. one line to give the library's name and an idea of what it does. Copyright (C) year name of author This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. signature of Ty Coon, 1 April 1990 Ty Coon, President of Vice That's all there is to it! xdp-tools-1.5.4/lib/libbpf/include/0000755000175100001660000000000014706536574016465 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/include/uapi/0000755000175100001660000000000014706536574017423 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/include/uapi/linux/0000755000175100001660000000000014706536574020562 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/include/uapi/linux/if_link.h0000644000175100001660000010457114706536574022356 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_IF_LINK_H #define _UAPI_LINUX_IF_LINK_H #include #include /* This struct should be in sync with struct rtnl_link_stats64 */ struct rtnl_link_stats { __u32 rx_packets; __u32 tx_packets; __u32 rx_bytes; __u32 tx_bytes; __u32 rx_errors; __u32 tx_errors; __u32 rx_dropped; __u32 tx_dropped; __u32 multicast; __u32 collisions; /* detailed rx_errors: */ __u32 rx_length_errors; __u32 rx_over_errors; __u32 rx_crc_errors; __u32 rx_frame_errors; __u32 rx_fifo_errors; __u32 rx_missed_errors; /* detailed tx_errors */ __u32 tx_aborted_errors; __u32 tx_carrier_errors; __u32 tx_fifo_errors; __u32 tx_heartbeat_errors; __u32 tx_window_errors; /* for cslip etc */ __u32 rx_compressed; __u32 tx_compressed; __u32 rx_nohandler; }; /** * struct rtnl_link_stats64 - The main device statistics structure. * * @rx_packets: Number of good packets received by the interface. * For hardware interfaces counts all good packets received from the device * by the host, including packets which host had to drop at various stages * of processing (even in the driver). * * @tx_packets: Number of packets successfully transmitted. * For hardware interfaces counts packets which host was able to successfully * hand over to the device, which does not necessarily mean that packets * had been successfully transmitted out of the device, only that device * acknowledged it copied them out of host memory. * * @rx_bytes: Number of good received bytes, corresponding to @rx_packets. * * For IEEE 802.3 devices should count the length of Ethernet Frames * excluding the FCS. * * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets. * * For IEEE 802.3 devices should count the length of Ethernet Frames * excluding the FCS. * * @rx_errors: Total number of bad packets received on this network device. * This counter must include events counted by @rx_length_errors, * @rx_crc_errors, @rx_frame_errors and other errors not otherwise * counted. * * @tx_errors: Total number of transmit problems. * This counter must include events counter by @tx_aborted_errors, * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors, * @tx_window_errors and other errors not otherwise counted. * * @rx_dropped: Number of packets received but not processed, * e.g. due to lack of resources or unsupported protocol. * For hardware interfaces this counter may include packets discarded * due to L2 address filtering but should not include packets dropped * by the device due to buffer exhaustion which are counted separately in * @rx_missed_errors (since procfs folds those two counters together). * * @tx_dropped: Number of packets dropped on their way to transmission, * e.g. due to lack of resources. * * @multicast: Multicast packets received. * For hardware interfaces this statistic is commonly calculated * at the device level (unlike @rx_packets) and therefore may include * packets which did not reach the host. * * For IEEE 802.3 devices this counter may be equivalent to: * * - 30.3.1.1.21 aMulticastFramesReceivedOK * * @collisions: Number of collisions during packet transmissions. * * @rx_length_errors: Number of packets dropped due to invalid length. * Part of aggregate "frame" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter should be equivalent to a sum * of the following attributes: * * - 30.3.1.1.23 aInRangeLengthErrors * - 30.3.1.1.24 aOutOfRangeLengthField * - 30.3.1.1.25 aFrameTooLongErrors * * @rx_over_errors: Receiver FIFO overflow event counter. * * Historically the count of overflow events. Such events may be * reported in the receive descriptors or via interrupts, and may * not correspond one-to-one with dropped packets. * * The recommended interpretation for high speed interfaces is - * number of packets dropped because they did not fit into buffers * provided by the host, e.g. packets larger than MTU or next buffer * in the ring was not available for a scatter transfer. * * Part of aggregate "frame" errors in `/proc/net/dev`. * * This statistics was historically used interchangeably with * @rx_fifo_errors. * * This statistic corresponds to hardware events and is not commonly used * on software devices. * * @rx_crc_errors: Number of packets received with a CRC error. * Part of aggregate "frame" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter must be equivalent to: * * - 30.3.1.1.6 aFrameCheckSequenceErrors * * @rx_frame_errors: Receiver frame alignment errors. * Part of aggregate "frame" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter should be equivalent to: * * - 30.3.1.1.7 aAlignmentErrors * * @rx_fifo_errors: Receiver FIFO error counter. * * Historically the count of overflow events. Those events may be * reported in the receive descriptors or via interrupts, and may * not correspond one-to-one with dropped packets. * * This statistics was used interchangeably with @rx_over_errors. * Not recommended for use in drivers for high speed interfaces. * * This statistic is used on software devices, e.g. to count software * packet queue overflow (can) or sequencing errors (GRE). * * @rx_missed_errors: Count of packets missed by the host. * Folded into the "drop" counter in `/proc/net/dev`. * * Counts number of packets dropped by the device due to lack * of buffer space. This usually indicates that the host interface * is slower than the network interface, or host is not keeping up * with the receive packet rate. * * This statistic corresponds to hardware events and is not used * on software devices. * * @tx_aborted_errors: * Part of aggregate "carrier" errors in `/proc/net/dev`. * For IEEE 802.3 devices capable of half-duplex operation this counter * must be equivalent to: * * - 30.3.1.1.11 aFramesAbortedDueToXSColls * * High speed interfaces may use this counter as a general device * discard counter. * * @tx_carrier_errors: Number of frame transmission errors due to loss * of carrier during transmission. * Part of aggregate "carrier" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter must be equivalent to: * * - 30.3.1.1.13 aCarrierSenseErrors * * @tx_fifo_errors: Number of frame transmission errors due to device * FIFO underrun / underflow. This condition occurs when the device * begins transmission of a frame but is unable to deliver the * entire frame to the transmitter in time for transmission. * Part of aggregate "carrier" errors in `/proc/net/dev`. * * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for * old half-duplex Ethernet. * Part of aggregate "carrier" errors in `/proc/net/dev`. * * For IEEE 802.3 devices possibly equivalent to: * * - 30.3.2.1.4 aSQETestErrors * * @tx_window_errors: Number of frame transmission errors due * to late collisions (for Ethernet - after the first 64B of transmission). * Part of aggregate "carrier" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter must be equivalent to: * * - 30.3.1.1.10 aLateCollisions * * @rx_compressed: Number of correctly received compressed packets. * This counters is only meaningful for interfaces which support * packet compression (e.g. CSLIP, PPP). * * @tx_compressed: Number of transmitted compressed packets. * This counters is only meaningful for interfaces which support * packet compression (e.g. CSLIP, PPP). * * @rx_nohandler: Number of packets received on the interface * but dropped by the networking stack because the device is * not designated to receive packets (e.g. backup link in a bond). * * @rx_otherhost_dropped: Number of packets dropped due to mismatch * in destination MAC address. */ struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; /* detailed rx_errors: */ __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; /* detailed tx_errors */ __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; /* for cslip etc */ __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; __u64 rx_otherhost_dropped; }; /* Subset of link stats useful for in-HW collection. Meaning of the fields is as * for struct rtnl_link_stats64. */ struct rtnl_hw_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; }; /* The struct should be in sync with struct ifmap */ struct rtnl_link_ifmap { __u64 mem_start; __u64 mem_end; __u64 base_addr; __u16 irq; __u8 dma; __u8 port; }; /* * IFLA_AF_SPEC * Contains nested attributes for address family specific attributes. * Each address family may create a attribute with the address family * number as type and create its own attribute structure in it. * * Example: * [IFLA_AF_SPEC] = { * [AF_INET] = { * [IFLA_INET_CONF] = ..., * }, * [AF_INET6] = { * [IFLA_INET6_FLAGS] = ..., * [IFLA_INET6_CONF] = ..., * } * } */ enum { IFLA_UNSPEC, IFLA_ADDRESS, IFLA_BROADCAST, IFLA_IFNAME, IFLA_MTU, IFLA_LINK, IFLA_QDISC, IFLA_STATS, IFLA_COST, #define IFLA_COST IFLA_COST IFLA_PRIORITY, #define IFLA_PRIORITY IFLA_PRIORITY IFLA_MASTER, #define IFLA_MASTER IFLA_MASTER IFLA_WIRELESS, /* Wireless Extension event - see wireless.h */ #define IFLA_WIRELESS IFLA_WIRELESS IFLA_PROTINFO, /* Protocol specific information for a link */ #define IFLA_PROTINFO IFLA_PROTINFO IFLA_TXQLEN, #define IFLA_TXQLEN IFLA_TXQLEN IFLA_MAP, #define IFLA_MAP IFLA_MAP IFLA_WEIGHT, #define IFLA_WEIGHT IFLA_WEIGHT IFLA_OPERSTATE, IFLA_LINKMODE, IFLA_LINKINFO, #define IFLA_LINKINFO IFLA_LINKINFO IFLA_NET_NS_PID, IFLA_IFALIAS, IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ IFLA_VFINFO_LIST, IFLA_STATS64, IFLA_VF_PORTS, IFLA_PORT_SELF, IFLA_AF_SPEC, IFLA_GROUP, /* Group the device belongs to */ IFLA_NET_NS_FD, IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */ #define IFLA_PROMISCUITY IFLA_PROMISCUITY IFLA_NUM_TX_QUEUES, IFLA_NUM_RX_QUEUES, IFLA_CARRIER, IFLA_PHYS_PORT_ID, IFLA_CARRIER_CHANGES, IFLA_PHYS_SWITCH_ID, IFLA_LINK_NETNSID, IFLA_PHYS_PORT_NAME, IFLA_PROTO_DOWN, IFLA_GSO_MAX_SEGS, IFLA_GSO_MAX_SIZE, IFLA_PAD, IFLA_XDP, IFLA_EVENT, IFLA_NEW_NETNSID, IFLA_IF_NETNSID, IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */ IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_DOWN_COUNT, IFLA_NEW_IFINDEX, IFLA_MIN_MTU, IFLA_MAX_MTU, IFLA_PROP_LIST, IFLA_ALT_IFNAME, /* Alternative ifname */ IFLA_PERM_ADDRESS, IFLA_PROTO_DOWN_REASON, /* device (sysfs) name as parent, used instead * of IFLA_LINK where there's no parent netdev */ IFLA_PARENT_DEV_NAME, IFLA_PARENT_DEV_BUS_NAME, IFLA_GRO_MAX_SIZE, IFLA_TSO_MAX_SIZE, IFLA_TSO_MAX_SEGS, IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */ IFLA_DEVLINK_PORT, IFLA_GSO_IPV4_MAX_SIZE, IFLA_GRO_IPV4_MAX_SIZE, IFLA_DPLL_PIN, __IFLA_MAX }; #define IFLA_MAX (__IFLA_MAX - 1) enum { IFLA_PROTO_DOWN_REASON_UNSPEC, IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */ IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */ __IFLA_PROTO_DOWN_REASON_CNT, IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1 }; /* backwards compatibility for userspace */ #ifndef __KERNEL__ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) #endif enum { IFLA_INET_UNSPEC, IFLA_INET_CONF, __IFLA_INET_MAX, }; #define IFLA_INET_MAX (__IFLA_INET_MAX - 1) /* ifi_flags. IFF_* flags. The only change is: IFF_LOOPBACK, IFF_BROADCAST and IFF_POINTOPOINT are more not changeable by user. They describe link media characteristics and set by device driver. Comments: - Combination IFF_BROADCAST|IFF_POINTOPOINT is invalid - If neither of these three flags are set; the interface is NBMA. - IFF_MULTICAST does not mean anything special: multicasts can be used on all not-NBMA links. IFF_MULTICAST means that this media uses special encapsulation for multicast frames. Apparently, all IFF_POINTOPOINT and IFF_BROADCAST devices are able to use multicasts too. */ /* IFLA_LINK. For usual devices it is equal ifi_index. If it is a "virtual interface" (f.e. tunnel), ifi_link can point to real physical interface (f.e. for bandwidth calculations), or maybe 0, what means, that real media is unknown (usual for IPIP tunnels, when route to endpoint is allowed to change) */ /* Subtype attributes for IFLA_PROTINFO */ enum { IFLA_INET6_UNSPEC, IFLA_INET6_FLAGS, /* link flags */ IFLA_INET6_CONF, /* sysctl parameters */ IFLA_INET6_STATS, /* statistics */ IFLA_INET6_MCAST, /* MC things. What of them? */ IFLA_INET6_CACHEINFO, /* time values and max reasm size */ IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */ IFLA_INET6_TOKEN, /* device token */ IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */ IFLA_INET6_RA_MTU, /* mtu carried in the RA message */ __IFLA_INET6_MAX }; #define IFLA_INET6_MAX (__IFLA_INET6_MAX - 1) enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_EUI64, IN6_ADDR_GEN_MODE_NONE, IN6_ADDR_GEN_MODE_STABLE_PRIVACY, IN6_ADDR_GEN_MODE_RANDOM, }; /* Bridge section */ enum { IFLA_BR_UNSPEC, IFLA_BR_FORWARD_DELAY, IFLA_BR_HELLO_TIME, IFLA_BR_MAX_AGE, IFLA_BR_AGEING_TIME, IFLA_BR_STP_STATE, IFLA_BR_PRIORITY, IFLA_BR_VLAN_FILTERING, IFLA_BR_VLAN_PROTOCOL, IFLA_BR_GROUP_FWD_MASK, IFLA_BR_ROOT_ID, IFLA_BR_BRIDGE_ID, IFLA_BR_ROOT_PORT, IFLA_BR_ROOT_PATH_COST, IFLA_BR_TOPOLOGY_CHANGE, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, IFLA_BR_HELLO_TIMER, IFLA_BR_TCN_TIMER, IFLA_BR_TOPOLOGY_CHANGE_TIMER, IFLA_BR_GC_TIMER, IFLA_BR_GROUP_ADDR, IFLA_BR_FDB_FLUSH, IFLA_BR_MCAST_ROUTER, IFLA_BR_MCAST_SNOOPING, IFLA_BR_MCAST_QUERY_USE_IFADDR, IFLA_BR_MCAST_QUERIER, IFLA_BR_MCAST_HASH_ELASTICITY, IFLA_BR_MCAST_HASH_MAX, IFLA_BR_MCAST_LAST_MEMBER_CNT, IFLA_BR_MCAST_STARTUP_QUERY_CNT, IFLA_BR_MCAST_LAST_MEMBER_INTVL, IFLA_BR_MCAST_MEMBERSHIP_INTVL, IFLA_BR_MCAST_QUERIER_INTVL, IFLA_BR_MCAST_QUERY_INTVL, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, IFLA_BR_NF_CALL_IPTABLES, IFLA_BR_NF_CALL_IP6TABLES, IFLA_BR_NF_CALL_ARPTABLES, IFLA_BR_VLAN_DEFAULT_PVID, IFLA_BR_PAD, IFLA_BR_VLAN_STATS_ENABLED, IFLA_BR_MCAST_STATS_ENABLED, IFLA_BR_MCAST_IGMP_VERSION, IFLA_BR_MCAST_MLD_VERSION, IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, IFLA_BR_MCAST_QUERIER_STATE, __IFLA_BR_MAX, }; #define IFLA_BR_MAX (__IFLA_BR_MAX - 1) struct ifla_bridge_id { __u8 prio[2]; __u8 addr[6]; /* ETH_ALEN */ }; enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, }; enum { IFLA_BRPORT_UNSPEC, IFLA_BRPORT_STATE, /* Spanning tree state */ IFLA_BRPORT_PRIORITY, /* " priority */ IFLA_BRPORT_COST, /* " cost */ IFLA_BRPORT_MODE, /* mode (hairpin) */ IFLA_BRPORT_GUARD, /* bpdu guard */ IFLA_BRPORT_PROTECT, /* root port protection */ IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ IFLA_BRPORT_LEARNING, /* mac learning */ IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ IFLA_BRPORT_PROXYARP, /* proxy ARP */ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */ IFLA_BRPORT_ROOT_ID, /* designated root */ IFLA_BRPORT_BRIDGE_ID, /* designated bridge */ IFLA_BRPORT_DESIGNATED_PORT, IFLA_BRPORT_DESIGNATED_COST, IFLA_BRPORT_ID, IFLA_BRPORT_NO, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, IFLA_BRPORT_CONFIG_PENDING, IFLA_BRPORT_MESSAGE_AGE_TIMER, IFLA_BRPORT_FORWARD_DELAY_TIMER, IFLA_BRPORT_HOLD_TIMER, IFLA_BRPORT_FLUSH, IFLA_BRPORT_MULTICAST_ROUTER, IFLA_BRPORT_PAD, IFLA_BRPORT_MCAST_FLOOD, IFLA_BRPORT_MCAST_TO_UCAST, IFLA_BRPORT_VLAN_TUNNEL, IFLA_BRPORT_BCAST_FLOOD, IFLA_BRPORT_GROUP_FWD_MASK, IFLA_BRPORT_NEIGH_SUPPRESS, IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, IFLA_BRPORT_LOCKED, IFLA_BRPORT_MAB, IFLA_BRPORT_MCAST_N_GROUPS, IFLA_BRPORT_MCAST_MAX_GROUPS, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, IFLA_BRPORT_BACKUP_NHID, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) struct ifla_cacheinfo { __u32 max_reasm_len; __u32 tstamp; /* ipv6InterfaceTable updated timestamp */ __u32 reachable_time; __u32 retrans_time; }; enum { IFLA_INFO_UNSPEC, IFLA_INFO_KIND, IFLA_INFO_DATA, IFLA_INFO_XSTATS, IFLA_INFO_SLAVE_KIND, IFLA_INFO_SLAVE_DATA, __IFLA_INFO_MAX, }; #define IFLA_INFO_MAX (__IFLA_INFO_MAX - 1) /* VLAN section */ enum { IFLA_VLAN_UNSPEC, IFLA_VLAN_ID, IFLA_VLAN_FLAGS, IFLA_VLAN_EGRESS_QOS, IFLA_VLAN_INGRESS_QOS, IFLA_VLAN_PROTOCOL, __IFLA_VLAN_MAX, }; #define IFLA_VLAN_MAX (__IFLA_VLAN_MAX - 1) struct ifla_vlan_flags { __u32 flags; __u32 mask; }; enum { IFLA_VLAN_QOS_UNSPEC, IFLA_VLAN_QOS_MAPPING, __IFLA_VLAN_QOS_MAX }; #define IFLA_VLAN_QOS_MAX (__IFLA_VLAN_QOS_MAX - 1) struct ifla_vlan_qos_mapping { __u32 from; __u32 to; }; /* MACVLAN section */ enum { IFLA_MACVLAN_UNSPEC, IFLA_MACVLAN_MODE, IFLA_MACVLAN_FLAGS, IFLA_MACVLAN_MACADDR_MODE, IFLA_MACVLAN_MACADDR, IFLA_MACVLAN_MACADDR_DATA, IFLA_MACVLAN_MACADDR_COUNT, IFLA_MACVLAN_BC_QUEUE_LEN, IFLA_MACVLAN_BC_QUEUE_LEN_USED, IFLA_MACVLAN_BC_CUTOFF, __IFLA_MACVLAN_MAX, }; #define IFLA_MACVLAN_MAX (__IFLA_MACVLAN_MAX - 1) enum macvlan_mode { MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */ MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */ }; enum macvlan_macaddr_mode { MACVLAN_MACADDR_ADD, MACVLAN_MACADDR_DEL, MACVLAN_MACADDR_FLUSH, MACVLAN_MACADDR_SET, }; #define MACVLAN_FLAG_NOPROMISC 1 #define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */ /* VRF section */ enum { IFLA_VRF_UNSPEC, IFLA_VRF_TABLE, __IFLA_VRF_MAX }; #define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1) enum { IFLA_VRF_PORT_UNSPEC, IFLA_VRF_PORT_TABLE, __IFLA_VRF_PORT_MAX }; #define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1) /* MACSEC section */ enum { IFLA_MACSEC_UNSPEC, IFLA_MACSEC_SCI, IFLA_MACSEC_PORT, IFLA_MACSEC_ICV_LEN, IFLA_MACSEC_CIPHER_SUITE, IFLA_MACSEC_WINDOW, IFLA_MACSEC_ENCODING_SA, IFLA_MACSEC_ENCRYPT, IFLA_MACSEC_PROTECT, IFLA_MACSEC_INC_SCI, IFLA_MACSEC_ES, IFLA_MACSEC_SCB, IFLA_MACSEC_REPLAY_PROTECT, IFLA_MACSEC_VALIDATION, IFLA_MACSEC_PAD, IFLA_MACSEC_OFFLOAD, __IFLA_MACSEC_MAX, }; #define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) /* XFRM section */ enum { IFLA_XFRM_UNSPEC, IFLA_XFRM_LINK, IFLA_XFRM_IF_ID, IFLA_XFRM_COLLECT_METADATA, __IFLA_XFRM_MAX }; #define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1) enum macsec_validation_type { MACSEC_VALIDATE_DISABLED = 0, MACSEC_VALIDATE_CHECK = 1, MACSEC_VALIDATE_STRICT = 2, __MACSEC_VALIDATE_END, MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, }; enum macsec_offload { MACSEC_OFFLOAD_OFF = 0, MACSEC_OFFLOAD_PHY = 1, MACSEC_OFFLOAD_MAC = 2, __MACSEC_OFFLOAD_END, MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1, }; /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, IFLA_IPVLAN_MODE, IFLA_IPVLAN_FLAGS, __IFLA_IPVLAN_MAX }; #define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1) enum ipvlan_mode { IPVLAN_MODE_L2 = 0, IPVLAN_MODE_L3, IPVLAN_MODE_L3S, IPVLAN_MODE_MAX }; #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 /* Tunnel RTM header */ struct tunnel_msg { __u8 family; __u8 flags; __u16 reserved2; __u32 ifindex; }; /* netkit section */ enum netkit_action { NETKIT_NEXT = -1, NETKIT_PASS = 0, NETKIT_DROP = 2, NETKIT_REDIRECT = 7, }; enum netkit_mode { NETKIT_L2, NETKIT_L3, }; enum { IFLA_NETKIT_UNSPEC, IFLA_NETKIT_PEER_INFO, IFLA_NETKIT_PRIMARY, IFLA_NETKIT_POLICY, IFLA_NETKIT_PEER_POLICY, IFLA_NETKIT_MODE, __IFLA_NETKIT_MAX, }; #define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) /* VXLAN section */ /* include statistics in the dump */ #define TUNNEL_MSG_FLAG_STATS 0x01 #define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS /* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */ enum { VNIFILTER_ENTRY_STATS_UNSPEC, VNIFILTER_ENTRY_STATS_RX_BYTES, VNIFILTER_ENTRY_STATS_RX_PKTS, VNIFILTER_ENTRY_STATS_RX_DROPS, VNIFILTER_ENTRY_STATS_RX_ERRORS, VNIFILTER_ENTRY_STATS_TX_BYTES, VNIFILTER_ENTRY_STATS_TX_PKTS, VNIFILTER_ENTRY_STATS_TX_DROPS, VNIFILTER_ENTRY_STATS_TX_ERRORS, VNIFILTER_ENTRY_STATS_PAD, __VNIFILTER_ENTRY_STATS_MAX }; #define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1) enum { VXLAN_VNIFILTER_ENTRY_UNSPEC, VXLAN_VNIFILTER_ENTRY_START, VXLAN_VNIFILTER_ENTRY_END, VXLAN_VNIFILTER_ENTRY_GROUP, VXLAN_VNIFILTER_ENTRY_GROUP6, VXLAN_VNIFILTER_ENTRY_STATS, __VXLAN_VNIFILTER_ENTRY_MAX }; #define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1) enum { VXLAN_VNIFILTER_UNSPEC, VXLAN_VNIFILTER_ENTRY, __VXLAN_VNIFILTER_MAX }; #define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1) enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, IFLA_VXLAN_GROUP, /* group or remote address */ IFLA_VXLAN_LINK, IFLA_VXLAN_LOCAL, IFLA_VXLAN_TTL, IFLA_VXLAN_TOS, IFLA_VXLAN_LEARNING, IFLA_VXLAN_AGEING, IFLA_VXLAN_LIMIT, IFLA_VXLAN_PORT_RANGE, /* source port */ IFLA_VXLAN_PROXY, IFLA_VXLAN_RSC, IFLA_VXLAN_L2MISS, IFLA_VXLAN_L3MISS, IFLA_VXLAN_PORT, /* destination port */ IFLA_VXLAN_GROUP6, IFLA_VXLAN_LOCAL6, IFLA_VXLAN_UDP_CSUM, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, IFLA_VXLAN_REMCSUM_TX, IFLA_VXLAN_REMCSUM_RX, IFLA_VXLAN_GBP, IFLA_VXLAN_REMCSUM_NOPARTIAL, IFLA_VXLAN_COLLECT_METADATA, IFLA_VXLAN_LABEL, IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ IFLA_VXLAN_LOCALBYPASS, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) struct ifla_vxlan_port_range { __be16 low; __be16 high; }; enum ifla_vxlan_df { VXLAN_DF_UNSET = 0, VXLAN_DF_SET, VXLAN_DF_INHERIT, __VXLAN_DF_END, VXLAN_DF_MAX = __VXLAN_DF_END - 1, }; /* GENEVE section */ enum { IFLA_GENEVE_UNSPEC, IFLA_GENEVE_ID, IFLA_GENEVE_REMOTE, IFLA_GENEVE_TTL, IFLA_GENEVE_TOS, IFLA_GENEVE_PORT, /* destination port */ IFLA_GENEVE_COLLECT_METADATA, IFLA_GENEVE_REMOTE6, IFLA_GENEVE_UDP_CSUM, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, IFLA_GENEVE_INNER_PROTO_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) enum ifla_geneve_df { GENEVE_DF_UNSET = 0, GENEVE_DF_SET, GENEVE_DF_INHERIT, __GENEVE_DF_END, GENEVE_DF_MAX = __GENEVE_DF_END - 1, }; /* Bareudp section */ enum { IFLA_BAREUDP_UNSPEC, IFLA_BAREUDP_PORT, IFLA_BAREUDP_ETHERTYPE, IFLA_BAREUDP_SRCPORT_MIN, IFLA_BAREUDP_MULTIPROTO_MODE, __IFLA_BAREUDP_MAX }; #define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1) /* PPP section */ enum { IFLA_PPP_UNSPEC, IFLA_PPP_DEV_FD, __IFLA_PPP_MAX }; #define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1) /* GTP section */ enum ifla_gtp_role { GTP_ROLE_GGSN = 0, GTP_ROLE_SGSN, }; enum { IFLA_GTP_UNSPEC, IFLA_GTP_FD0, IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, IFLA_GTP_CREATE_SOCKETS, IFLA_GTP_RESTART_COUNT, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) /* Bonding section */ enum { IFLA_BOND_UNSPEC, IFLA_BOND_MODE, IFLA_BOND_ACTIVE_SLAVE, IFLA_BOND_MIIMON, IFLA_BOND_UPDELAY, IFLA_BOND_DOWNDELAY, IFLA_BOND_USE_CARRIER, IFLA_BOND_ARP_INTERVAL, IFLA_BOND_ARP_IP_TARGET, IFLA_BOND_ARP_VALIDATE, IFLA_BOND_ARP_ALL_TARGETS, IFLA_BOND_PRIMARY, IFLA_BOND_PRIMARY_RESELECT, IFLA_BOND_FAIL_OVER_MAC, IFLA_BOND_XMIT_HASH_POLICY, IFLA_BOND_RESEND_IGMP, IFLA_BOND_NUM_PEER_NOTIF, IFLA_BOND_ALL_SLAVES_ACTIVE, IFLA_BOND_MIN_LINKS, IFLA_BOND_LP_INTERVAL, IFLA_BOND_PACKETS_PER_SLAVE, IFLA_BOND_AD_LACP_RATE, IFLA_BOND_AD_SELECT, IFLA_BOND_AD_INFO, IFLA_BOND_AD_ACTOR_SYS_PRIO, IFLA_BOND_AD_USER_PORT_KEY, IFLA_BOND_AD_ACTOR_SYSTEM, IFLA_BOND_TLB_DYNAMIC_LB, IFLA_BOND_PEER_NOTIF_DELAY, IFLA_BOND_AD_LACP_ACTIVE, IFLA_BOND_MISSED_MAX, IFLA_BOND_NS_IP6_TARGET, IFLA_BOND_COUPLED_CONTROL, __IFLA_BOND_MAX, }; #define IFLA_BOND_MAX (__IFLA_BOND_MAX - 1) enum { IFLA_BOND_AD_INFO_UNSPEC, IFLA_BOND_AD_INFO_AGGREGATOR, IFLA_BOND_AD_INFO_NUM_PORTS, IFLA_BOND_AD_INFO_ACTOR_KEY, IFLA_BOND_AD_INFO_PARTNER_KEY, IFLA_BOND_AD_INFO_PARTNER_MAC, __IFLA_BOND_AD_INFO_MAX, }; #define IFLA_BOND_AD_INFO_MAX (__IFLA_BOND_AD_INFO_MAX - 1) enum { IFLA_BOND_SLAVE_UNSPEC, IFLA_BOND_SLAVE_STATE, IFLA_BOND_SLAVE_MII_STATUS, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, IFLA_BOND_SLAVE_PERM_HWADDR, IFLA_BOND_SLAVE_QUEUE_ID, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, IFLA_BOND_SLAVE_PRIO, __IFLA_BOND_SLAVE_MAX, }; #define IFLA_BOND_SLAVE_MAX (__IFLA_BOND_SLAVE_MAX - 1) /* SR-IOV virtual function management section */ enum { IFLA_VF_INFO_UNSPEC, IFLA_VF_INFO, __IFLA_VF_INFO_MAX, }; #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) enum { IFLA_VF_UNSPEC, IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VF_VLAN, /* VLAN ID and QoS */ IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */ IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */ IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */ IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query * on/off switch */ IFLA_VF_STATS, /* network device statistics */ IFLA_VF_TRUST, /* Trust VF */ IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */ IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */ IFLA_VF_BROADCAST, /* VF broadcast */ __IFLA_VF_MAX, }; #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) struct ifla_vf_mac { __u32 vf; __u8 mac[32]; /* MAX_ADDR_LEN */ }; struct ifla_vf_broadcast { __u8 broadcast[32]; }; struct ifla_vf_vlan { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ __u32 qos; }; enum { IFLA_VF_VLAN_INFO_UNSPEC, IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */ __IFLA_VF_VLAN_INFO_MAX, }; #define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1) #define MAX_VLAN_LIST_LEN 1 struct ifla_vf_vlan_info { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ __u32 qos; __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */ }; struct ifla_vf_tx_rate { __u32 vf; __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ }; struct ifla_vf_rate { __u32 vf; __u32 min_tx_rate; /* Min Bandwidth in Mbps */ __u32 max_tx_rate; /* Max Bandwidth in Mbps */ }; struct ifla_vf_spoofchk { __u32 vf; __u32 setting; }; struct ifla_vf_guid { __u32 vf; __u64 guid; }; enum { IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ IFLA_VF_LINK_STATE_ENABLE, /* link always up */ IFLA_VF_LINK_STATE_DISABLE, /* link always down */ __IFLA_VF_LINK_STATE_MAX, }; struct ifla_vf_link_state { __u32 vf; __u32 link_state; }; struct ifla_vf_rss_query_en { __u32 vf; __u32 setting; }; enum { IFLA_VF_STATS_RX_PACKETS, IFLA_VF_STATS_TX_PACKETS, IFLA_VF_STATS_RX_BYTES, IFLA_VF_STATS_TX_BYTES, IFLA_VF_STATS_BROADCAST, IFLA_VF_STATS_MULTICAST, IFLA_VF_STATS_PAD, IFLA_VF_STATS_RX_DROPPED, IFLA_VF_STATS_TX_DROPPED, __IFLA_VF_STATS_MAX, }; #define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1) struct ifla_vf_trust { __u32 vf; __u32 setting; }; /* VF ports management section * * Nested layout of set/get msg is: * * [IFLA_NUM_VF] * [IFLA_VF_PORTS] * [IFLA_VF_PORT] * [IFLA_PORT_*], ... * [IFLA_VF_PORT] * [IFLA_PORT_*], ... * ... * [IFLA_PORT_SELF] * [IFLA_PORT_*], ... */ enum { IFLA_VF_PORT_UNSPEC, IFLA_VF_PORT, /* nest */ __IFLA_VF_PORT_MAX, }; #define IFLA_VF_PORT_MAX (__IFLA_VF_PORT_MAX - 1) enum { IFLA_PORT_UNSPEC, IFLA_PORT_VF, /* __u32 */ IFLA_PORT_PROFILE, /* string */ IFLA_PORT_VSI_TYPE, /* 802.1Qbg (pre-)standard VDP */ IFLA_PORT_INSTANCE_UUID, /* binary UUID */ IFLA_PORT_HOST_UUID, /* binary UUID */ IFLA_PORT_REQUEST, /* __u8 */ IFLA_PORT_RESPONSE, /* __u16, output only */ __IFLA_PORT_MAX, }; #define IFLA_PORT_MAX (__IFLA_PORT_MAX - 1) #define PORT_PROFILE_MAX 40 #define PORT_UUID_MAX 16 #define PORT_SELF_VF -1 enum { PORT_REQUEST_PREASSOCIATE = 0, PORT_REQUEST_PREASSOCIATE_RR, PORT_REQUEST_ASSOCIATE, PORT_REQUEST_DISASSOCIATE, }; enum { PORT_VDP_RESPONSE_SUCCESS = 0, PORT_VDP_RESPONSE_INVALID_FORMAT, PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES, PORT_VDP_RESPONSE_UNUSED_VTID, PORT_VDP_RESPONSE_VTID_VIOLATION, PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION, PORT_VDP_RESPONSE_OUT_OF_SYNC, /* 0x08-0xFF reserved for future VDP use */ PORT_PROFILE_RESPONSE_SUCCESS = 0x100, PORT_PROFILE_RESPONSE_INPROGRESS, PORT_PROFILE_RESPONSE_INVALID, PORT_PROFILE_RESPONSE_BADSTATE, PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES, PORT_PROFILE_RESPONSE_ERROR, }; struct ifla_port_vsi { __u8 vsi_mgr_id; __u8 vsi_type_id[3]; __u8 vsi_type_version; __u8 pad[3]; }; /* IPoIB section */ enum { IFLA_IPOIB_UNSPEC, IFLA_IPOIB_PKEY, IFLA_IPOIB_MODE, IFLA_IPOIB_UMCAST, __IFLA_IPOIB_MAX }; enum { IPOIB_MODE_DATAGRAM = 0, /* using unreliable datagram QPs */ IPOIB_MODE_CONNECTED = 1, /* using connected QPs */ }; #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1) /* HSR/PRP section, both uses same interface */ /* Different redundancy protocols for hsr device */ enum { HSR_PROTOCOL_HSR, HSR_PROTOCOL_PRP, HSR_PROTOCOL_MAX, }; enum { IFLA_HSR_UNSPEC, IFLA_HSR_SLAVE1, IFLA_HSR_SLAVE2, IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, IFLA_HSR_VERSION, /* HSR version */ IFLA_HSR_PROTOCOL, /* Indicate different protocol than * HSR. For example PRP. */ __IFLA_HSR_MAX, }; #define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1) /* STATS section */ struct if_stats_msg { __u8 family; __u8 pad1; __u16 pad2; __u32 ifindex; __u32 filter_mask; }; /* A stats attribute can be netdev specific or a global stat. * For netdev stats, lets use the prefix IFLA_STATS_LINK_* */ enum { IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */ IFLA_STATS_LINK_64, IFLA_STATS_LINK_XSTATS, IFLA_STATS_LINK_XSTATS_SLAVE, IFLA_STATS_LINK_OFFLOAD_XSTATS, IFLA_STATS_AF_SPEC, __IFLA_STATS_MAX, }; #define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1) #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) enum { IFLA_STATS_GETSET_UNSPEC, IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with * a filter mask for the corresponding group. */ IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */ __IFLA_STATS_GETSET_MAX, }; #define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1) /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] * -> [rtnl link type specific attributes] */ enum { LINK_XSTATS_TYPE_UNSPEC, LINK_XSTATS_TYPE_BRIDGE, LINK_XSTATS_TYPE_BOND, __LINK_XSTATS_TYPE_MAX }; #define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1) /* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */ enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */ IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) enum { IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */ IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */ __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX, }; #define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \ (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1) /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) #define XDP_FLAGS_SKB_MODE (1U << 1) #define XDP_FLAGS_DRV_MODE (1U << 2) #define XDP_FLAGS_HW_MODE (1U << 3) #define XDP_FLAGS_REPLACE (1U << 4) #define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \ XDP_FLAGS_DRV_MODE | \ XDP_FLAGS_HW_MODE) #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ XDP_FLAGS_MODES | XDP_FLAGS_REPLACE) /* These are stored into IFLA_XDP_ATTACHED on dump. */ enum { XDP_ATTACHED_NONE = 0, XDP_ATTACHED_DRV, XDP_ATTACHED_SKB, XDP_ATTACHED_HW, XDP_ATTACHED_MULTI, }; enum { IFLA_XDP_UNSPEC, IFLA_XDP_FD, IFLA_XDP_ATTACHED, IFLA_XDP_FLAGS, IFLA_XDP_PROG_ID, IFLA_XDP_DRV_PROG_ID, IFLA_XDP_SKB_PROG_ID, IFLA_XDP_HW_PROG_ID, IFLA_XDP_EXPECTED_FD, __IFLA_XDP_MAX, }; #define IFLA_XDP_MAX (__IFLA_XDP_MAX - 1) enum { IFLA_EVENT_NONE, IFLA_EVENT_REBOOT, /* internal reset / reboot */ IFLA_EVENT_FEATURES, /* change in offload features */ IFLA_EVENT_BONDING_FAILOVER, /* change in active slave */ IFLA_EVENT_NOTIFY_PEERS, /* re-sent grat. arp/ndisc */ IFLA_EVENT_IGMP_RESEND, /* re-sent IGMP JOIN */ IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ }; /* tun section */ enum { IFLA_TUN_UNSPEC, IFLA_TUN_OWNER, IFLA_TUN_GROUP, IFLA_TUN_TYPE, IFLA_TUN_PI, IFLA_TUN_VNET_HDR, IFLA_TUN_PERSIST, IFLA_TUN_MULTI_QUEUE, IFLA_TUN_NUM_QUEUES, IFLA_TUN_NUM_DISABLED_QUEUES, __IFLA_TUN_MAX, }; #define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1) /* rmnet section */ #define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5) enum { IFLA_RMNET_UNSPEC, IFLA_RMNET_MUX_ID, IFLA_RMNET_FLAGS, __IFLA_RMNET_MAX, }; #define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1) struct ifla_rmnet_flags { __u32 flags; __u32 mask; }; /* MCTP section */ enum { IFLA_MCTP_UNSPEC, IFLA_MCTP_NET, __IFLA_MCTP_MAX, }; #define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1) /* DSA section */ enum { IFLA_DSA_UNSPEC, IFLA_DSA_MASTER, __IFLA_DSA_MAX, }; #define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1) #endif /* _UAPI_LINUX_IF_LINK_H */ xdp-tools-1.5.4/lib/libbpf/include/uapi/linux/pkt_cls.h0000644000175100001660000003041114706536574022371 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __LINUX_PKT_CLS_H #define __LINUX_PKT_CLS_H #include #include #define TC_COOKIE_MAX_SIZE 16 /* Action attributes */ enum { TCA_ACT_UNSPEC, TCA_ACT_KIND, TCA_ACT_OPTIONS, TCA_ACT_INDEX, TCA_ACT_STATS, TCA_ACT_PAD, TCA_ACT_COOKIE, __TCA_ACT_MAX }; #define TCA_ACT_MAX __TCA_ACT_MAX #define TCA_OLD_COMPAT (TCA_ACT_MAX+1) #define TCA_ACT_MAX_PRIO 32 #define TCA_ACT_BIND 1 #define TCA_ACT_NOBIND 0 #define TCA_ACT_UNBIND 1 #define TCA_ACT_NOUNBIND 0 #define TCA_ACT_REPLACE 1 #define TCA_ACT_NOREPLACE 0 #define TC_ACT_UNSPEC (-1) #define TC_ACT_OK 0 #define TC_ACT_RECLASSIFY 1 #define TC_ACT_SHOT 2 #define TC_ACT_PIPE 3 #define TC_ACT_STOLEN 4 #define TC_ACT_QUEUED 5 #define TC_ACT_REPEAT 6 #define TC_ACT_REDIRECT 7 #define TC_ACT_TRAP 8 /* For hw path, this means "trap to cpu" * and don't further process the frame * in hardware. For sw path, this is * equivalent of TC_ACT_STOLEN - drop * the skb and act like everything * is alright. */ #define TC_ACT_VALUE_MAX TC_ACT_TRAP /* There is a special kind of actions called "extended actions", * which need a value parameter. These have a local opcode located in * the highest nibble, starting from 1. The rest of the bits * are used to carry the value. These two parts together make * a combined opcode. */ #define __TC_ACT_EXT_SHIFT 28 #define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT) #define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1) #define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK)) #define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode) #define TC_ACT_JUMP __TC_ACT_EXT(1) #define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2) #define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN /* Action type identifiers*/ enum { TCA_ID_UNSPEC=0, TCA_ID_POLICE=1, /* other actions go here */ __TCA_ID_MAX=255 }; #define TCA_ID_MAX __TCA_ID_MAX struct tc_police { __u32 index; int action; #define TC_POLICE_UNSPEC TC_ACT_UNSPEC #define TC_POLICE_OK TC_ACT_OK #define TC_POLICE_RECLASSIFY TC_ACT_RECLASSIFY #define TC_POLICE_SHOT TC_ACT_SHOT #define TC_POLICE_PIPE TC_ACT_PIPE __u32 limit; __u32 burst; __u32 mtu; struct tc_ratespec rate; struct tc_ratespec peakrate; int refcnt; int bindcnt; __u32 capab; }; struct tcf_t { __u64 install; __u64 lastuse; __u64 expires; __u64 firstuse; }; struct tc_cnt { int refcnt; int bindcnt; }; #define tc_gen \ __u32 index; \ __u32 capab; \ int action; \ int refcnt; \ int bindcnt enum { TCA_POLICE_UNSPEC, TCA_POLICE_TBF, TCA_POLICE_RATE, TCA_POLICE_PEAKRATE, TCA_POLICE_AVRATE, TCA_POLICE_RESULT, TCA_POLICE_TM, TCA_POLICE_PAD, __TCA_POLICE_MAX #define TCA_POLICE_RESULT TCA_POLICE_RESULT }; #define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1) /* tca flags definitions */ #define TCA_CLS_FLAGS_SKIP_HW (1 << 0) /* don't offload filter to HW */ #define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* don't use filter in SW */ #define TCA_CLS_FLAGS_IN_HW (1 << 2) /* filter is offloaded to HW */ #define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */ #define TCA_CLS_FLAGS_VERBOSE (1 << 4) /* verbose logging */ /* U32 filters */ #define TC_U32_HTID(h) ((h)&0xFFF00000) #define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20) #define TC_U32_HASH(h) (((h)>>12)&0xFF) #define TC_U32_NODE(h) ((h)&0xFFF) #define TC_U32_KEY(h) ((h)&0xFFFFF) #define TC_U32_UNSPEC 0 #define TC_U32_ROOT (0xFFF00000) enum { TCA_U32_UNSPEC, TCA_U32_CLASSID, TCA_U32_HASH, TCA_U32_LINK, TCA_U32_DIVISOR, TCA_U32_SEL, TCA_U32_POLICE, TCA_U32_ACT, TCA_U32_INDEV, TCA_U32_PCNT, TCA_U32_MARK, TCA_U32_FLAGS, TCA_U32_PAD, __TCA_U32_MAX }; #define TCA_U32_MAX (__TCA_U32_MAX - 1) struct tc_u32_key { __be32 mask; __be32 val; int off; int offmask; }; struct tc_u32_sel { unsigned char flags; unsigned char offshift; unsigned char nkeys; __be16 offmask; __u16 off; short offoff; short hoff; __be32 hmask; struct tc_u32_key keys[]; }; struct tc_u32_mark { __u32 val; __u32 mask; __u32 success; }; struct tc_u32_pcnt { __u64 rcnt; __u64 rhit; __u64 kcnts[]; }; /* Flags */ #define TC_U32_TERMINAL 1 #define TC_U32_OFFSET 2 #define TC_U32_VAROFFSET 4 #define TC_U32_EAT 8 #define TC_U32_MAXDEPTH 8 /* ROUTE filter */ enum { TCA_ROUTE4_UNSPEC, TCA_ROUTE4_CLASSID, TCA_ROUTE4_TO, TCA_ROUTE4_FROM, TCA_ROUTE4_IIF, TCA_ROUTE4_POLICE, TCA_ROUTE4_ACT, __TCA_ROUTE4_MAX }; #define TCA_ROUTE4_MAX (__TCA_ROUTE4_MAX - 1) /* FW filter */ enum { TCA_FW_UNSPEC, TCA_FW_CLASSID, TCA_FW_POLICE, TCA_FW_INDEV, TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */ TCA_FW_MASK, __TCA_FW_MAX }; #define TCA_FW_MAX (__TCA_FW_MAX - 1) /* Flow filter */ enum { FLOW_KEY_SRC, FLOW_KEY_DST, FLOW_KEY_PROTO, FLOW_KEY_PROTO_SRC, FLOW_KEY_PROTO_DST, FLOW_KEY_IIF, FLOW_KEY_PRIORITY, FLOW_KEY_MARK, FLOW_KEY_NFCT, FLOW_KEY_NFCT_SRC, FLOW_KEY_NFCT_DST, FLOW_KEY_NFCT_PROTO_SRC, FLOW_KEY_NFCT_PROTO_DST, FLOW_KEY_RTCLASSID, FLOW_KEY_SKUID, FLOW_KEY_SKGID, FLOW_KEY_VLAN_TAG, FLOW_KEY_RXHASH, __FLOW_KEY_MAX, }; #define FLOW_KEY_MAX (__FLOW_KEY_MAX - 1) enum { FLOW_MODE_MAP, FLOW_MODE_HASH, }; enum { TCA_FLOW_UNSPEC, TCA_FLOW_KEYS, TCA_FLOW_MODE, TCA_FLOW_BASECLASS, TCA_FLOW_RSHIFT, TCA_FLOW_ADDEND, TCA_FLOW_MASK, TCA_FLOW_XOR, TCA_FLOW_DIVISOR, TCA_FLOW_ACT, TCA_FLOW_POLICE, TCA_FLOW_EMATCHES, TCA_FLOW_PERTURB, __TCA_FLOW_MAX }; #define TCA_FLOW_MAX (__TCA_FLOW_MAX - 1) /* Basic filter */ enum { TCA_BASIC_UNSPEC, TCA_BASIC_CLASSID, TCA_BASIC_EMATCHES, TCA_BASIC_ACT, TCA_BASIC_POLICE, __TCA_BASIC_MAX }; #define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1) /* Cgroup classifier */ enum { TCA_CGROUP_UNSPEC, TCA_CGROUP_ACT, TCA_CGROUP_POLICE, TCA_CGROUP_EMATCHES, __TCA_CGROUP_MAX, }; #define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1) /* BPF classifier */ #define TCA_BPF_FLAG_ACT_DIRECT (1 << 0) enum { TCA_BPF_UNSPEC, TCA_BPF_ACT, TCA_BPF_POLICE, TCA_BPF_CLASSID, TCA_BPF_OPS_LEN, TCA_BPF_OPS, TCA_BPF_FD, TCA_BPF_NAME, TCA_BPF_FLAGS, TCA_BPF_FLAGS_GEN, TCA_BPF_TAG, TCA_BPF_ID, __TCA_BPF_MAX, }; #define TCA_BPF_MAX (__TCA_BPF_MAX - 1) /* Flower classifier */ enum { TCA_FLOWER_UNSPEC, TCA_FLOWER_CLASSID, TCA_FLOWER_INDEV, TCA_FLOWER_ACT, TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */ TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */ TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */ TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */ TCA_FLOWER_KEY_ETH_TYPE, /* be16 */ TCA_FLOWER_KEY_IP_PROTO, /* u8 */ TCA_FLOWER_KEY_IPV4_SRC, /* be32 */ TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */ TCA_FLOWER_KEY_IPV4_DST, /* be32 */ TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */ TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */ TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */ TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */ TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */ TCA_FLOWER_KEY_TCP_SRC, /* be16 */ TCA_FLOWER_KEY_TCP_DST, /* be16 */ TCA_FLOWER_KEY_UDP_SRC, /* be16 */ TCA_FLOWER_KEY_UDP_DST, /* be16 */ TCA_FLOWER_FLAGS, TCA_FLOWER_KEY_VLAN_ID, /* be16 */ TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */ TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */ TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */ TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */ TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */ TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */ TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */ TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */ TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */ TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */ TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */ TCA_FLOWER_KEY_SCTP_SRC_MASK, /* be16 */ TCA_FLOWER_KEY_SCTP_DST_MASK, /* be16 */ TCA_FLOWER_KEY_SCTP_SRC, /* be16 */ TCA_FLOWER_KEY_SCTP_DST, /* be16 */ TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, /* be16 */ TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, /* be16 */ TCA_FLOWER_KEY_ENC_UDP_DST_PORT, /* be16 */ TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, /* be16 */ TCA_FLOWER_KEY_FLAGS, /* be32 */ TCA_FLOWER_KEY_FLAGS_MASK, /* be32 */ TCA_FLOWER_KEY_ICMPV4_CODE, /* u8 */ TCA_FLOWER_KEY_ICMPV4_CODE_MASK,/* u8 */ TCA_FLOWER_KEY_ICMPV4_TYPE, /* u8 */ TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,/* u8 */ TCA_FLOWER_KEY_ICMPV6_CODE, /* u8 */ TCA_FLOWER_KEY_ICMPV6_CODE_MASK,/* u8 */ TCA_FLOWER_KEY_ICMPV6_TYPE, /* u8 */ TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */ TCA_FLOWER_KEY_ARP_SIP, /* be32 */ TCA_FLOWER_KEY_ARP_SIP_MASK, /* be32 */ TCA_FLOWER_KEY_ARP_TIP, /* be32 */ TCA_FLOWER_KEY_ARP_TIP_MASK, /* be32 */ TCA_FLOWER_KEY_ARP_OP, /* u8 */ TCA_FLOWER_KEY_ARP_OP_MASK, /* u8 */ TCA_FLOWER_KEY_ARP_SHA, /* ETH_ALEN */ TCA_FLOWER_KEY_ARP_SHA_MASK, /* ETH_ALEN */ TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */ TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */ TCA_FLOWER_KEY_MPLS_TTL, /* u8 - 8 bits */ TCA_FLOWER_KEY_MPLS_BOS, /* u8 - 1 bit */ TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */ TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */ TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */ TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */ TCA_FLOWER_KEY_IP_TOS, /* u8 */ TCA_FLOWER_KEY_IP_TOS_MASK, /* u8 */ TCA_FLOWER_KEY_IP_TTL, /* u8 */ TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */ TCA_FLOWER_KEY_CVLAN_ID, /* be16 */ TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */ TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */ TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */ TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */ TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */ TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */ TCA_FLOWER_KEY_ENC_OPTS, TCA_FLOWER_KEY_ENC_OPTS_MASK, TCA_FLOWER_IN_HW_COUNT, __TCA_FLOWER_MAX, }; #define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1) enum { TCA_FLOWER_KEY_ENC_OPTS_UNSPEC, TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested * TCA_FLOWER_KEY_ENC_OPT_GENEVE_ * attributes */ __TCA_FLOWER_KEY_ENC_OPTS_MAX, }; #define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1) enum { TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */ TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */ TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */ __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, }; #define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \ (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1) enum { TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0), TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1), }; /* Match-all classifier */ enum { TCA_MATCHALL_UNSPEC, TCA_MATCHALL_CLASSID, TCA_MATCHALL_ACT, TCA_MATCHALL_FLAGS, __TCA_MATCHALL_MAX, }; #define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1) /* Extended Matches */ struct tcf_ematch_tree_hdr { __u16 nmatches; __u16 progid; }; enum { TCA_EMATCH_TREE_UNSPEC, TCA_EMATCH_TREE_HDR, TCA_EMATCH_TREE_LIST, __TCA_EMATCH_TREE_MAX }; #define TCA_EMATCH_TREE_MAX (__TCA_EMATCH_TREE_MAX - 1) struct tcf_ematch_hdr { __u16 matchid; __u16 kind; __u16 flags; __u16 pad; /* currently unused */ }; /* 0 1 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 * +-----------------------+-+-+---+ * | Unused |S|I| R | * +-----------------------+-+-+---+ * * R(2) ::= relation to next ematch * where: 0 0 END (last ematch) * 0 1 AND * 1 0 OR * 1 1 Unused (invalid) * I(1) ::= invert result * S(1) ::= simple payload */ #define TCF_EM_REL_END 0 #define TCF_EM_REL_AND (1<<0) #define TCF_EM_REL_OR (1<<1) #define TCF_EM_INVERT (1<<2) #define TCF_EM_SIMPLE (1<<3) #define TCF_EM_REL_MASK 3 #define TCF_EM_REL_VALID(v) (((v) & TCF_EM_REL_MASK) != TCF_EM_REL_MASK) enum { TCF_LAYER_LINK, TCF_LAYER_NETWORK, TCF_LAYER_TRANSPORT, __TCF_LAYER_MAX }; #define TCF_LAYER_MAX (__TCF_LAYER_MAX - 1) /* Ematch type assignments * 1..32767 Reserved for ematches inside kernel tree * 32768..65535 Free to use, not reliable */ #define TCF_EM_CONTAINER 0 #define TCF_EM_CMP 1 #define TCF_EM_NBYTE 2 #define TCF_EM_U32 3 #define TCF_EM_META 4 #define TCF_EM_TEXT 5 #define TCF_EM_VLAN 6 #define TCF_EM_CANID 7 #define TCF_EM_IPSET 8 #define TCF_EM_IPT 9 #define TCF_EM_MAX 9 enum { TCF_EM_PROG_TC }; enum { TCF_EM_OPND_EQ, TCF_EM_OPND_GT, TCF_EM_OPND_LT }; #endif xdp-tools-1.5.4/lib/libbpf/include/uapi/linux/if_xdp.h0000644000175100001660000001162014706536574022204 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * if_xdp: XDP socket user-space interface * Copyright(c) 2018 Intel Corporation. * * Author(s): Björn Töpel * Magnus Karlsson */ #ifndef _LINUX_IF_XDP_H #define _LINUX_IF_XDP_H #include /* Options for the sxdp_flags field */ #define XDP_SHARED_UMEM (1 << 0) #define XDP_COPY (1 << 1) /* Force copy-mode */ #define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */ /* If this option is set, the driver might go sleep and in that case * the XDP_RING_NEED_WAKEUP flag in the fill and/or Tx rings will be * set. If it is set, the application need to explicitly wake up the * driver with a poll() (Rx and Tx) or sendto() (Tx only). If you are * running the driver and the application on the same core, you should * use this option so that the kernel will yield to the user space * application. */ #define XDP_USE_NEED_WAKEUP (1 << 3) /* By setting this option, userspace application indicates that it can * handle multiple descriptors per packet thus enabling AF_XDP to split * multi-buffer XDP frames into multiple Rx descriptors. Without this set * such frames will be dropped. */ #define XDP_USE_SG (1 << 4) /* Flags for xsk_umem_config flags */ #define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) /* Force checksum calculation in software. Can be used for testing or * working around potential HW issues. This option causes performance * degradation and only works in XDP_COPY mode. */ #define XDP_UMEM_TX_SW_CSUM (1 << 1) /* Request to reserve tx_metadata_len bytes of per-chunk metadata. */ #define XDP_UMEM_TX_METADATA_LEN (1 << 2) struct sockaddr_xdp { __u16 sxdp_family; __u16 sxdp_flags; __u32 sxdp_ifindex; __u32 sxdp_queue_id; __u32 sxdp_shared_umem_fd; }; /* XDP_RING flags */ #define XDP_RING_NEED_WAKEUP (1 << 0) struct xdp_ring_offset { __u64 producer; __u64 consumer; __u64 desc; __u64 flags; }; struct xdp_mmap_offsets { struct xdp_ring_offset rx; struct xdp_ring_offset tx; struct xdp_ring_offset fr; /* Fill */ struct xdp_ring_offset cr; /* Completion */ }; /* XDP socket options */ #define XDP_MMAP_OFFSETS 1 #define XDP_RX_RING 2 #define XDP_TX_RING 3 #define XDP_UMEM_REG 4 #define XDP_UMEM_FILL_RING 5 #define XDP_UMEM_COMPLETION_RING 6 #define XDP_STATISTICS 7 #define XDP_OPTIONS 8 struct xdp_umem_reg { __u64 addr; /* Start of packet data area */ __u64 len; /* Length of packet data area */ __u32 chunk_size; __u32 headroom; __u32 flags; __u32 tx_metadata_len; }; struct xdp_statistics { __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 rx_ring_full; /* Dropped due to rx ring being full */ __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { __u32 flags; }; /* Flags for the flags field of struct xdp_options */ #define XDP_OPTIONS_ZEROCOPY (1 << 0) /* Pgoff for mmaping the rings */ #define XDP_PGOFF_RX_RING 0 #define XDP_PGOFF_TX_RING 0x80000000 #define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL #define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL /* Masks for unaligned chunks mode */ #define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48 #define XSK_UNALIGNED_BUF_ADDR_MASK \ ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1) /* Request transmit timestamp. Upon completion, put it into tx_timestamp * field of union xsk_tx_metadata. */ #define XDP_TXMD_FLAGS_TIMESTAMP (1 << 0) /* Request transmit checksum offload. Checksum start position and offset * are communicated via csum_start and csum_offset fields of union * xsk_tx_metadata. */ #define XDP_TXMD_FLAGS_CHECKSUM (1 << 1) /* AF_XDP offloads request. 'request' union member is consumed by the driver * when the packet is being transmitted. 'completion' union member is * filled by the driver when the transmit completion arrives. */ struct xsk_tx_metadata { __u64 flags; union { struct { /* XDP_TXMD_FLAGS_CHECKSUM */ /* Offset from desc->addr where checksumming should start. */ __u16 csum_start; /* Offset from csum_start where checksum should be stored. */ __u16 csum_offset; } request; struct { /* XDP_TXMD_FLAGS_TIMESTAMP */ __u64 tx_timestamp; } completion; }; }; /* Rx/Tx descriptor */ struct xdp_desc { __u64 addr; __u32 len; __u32 options; }; /* UMEM descriptor is __u64 */ /* Flag indicating that the packet continues with the buffer pointed out by the * next frame in the ring. The end of the packet is signalled by setting this * bit to zero. For single buffer packets, every descriptor has 'options' set * to 0 and this maintains backward compatibility. */ #define XDP_PKT_CONTD (1 << 0) /* TX packet carries valid metadata. */ #define XDP_TX_METADATA (1 << 1) #endif /* _LINUX_IF_XDP_H */ xdp-tools-1.5.4/lib/libbpf/include/uapi/linux/perf_event.h0000644000175100001660000012470414706536574023100 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * * Started by: Thomas Gleixner and Ingo Molnar * * For licencing details see kernel-base/COPYING */ #ifndef _UAPI_LINUX_PERF_EVENT_H #define _UAPI_LINUX_PERF_EVENT_H #include #include #include /* * User-space ABI bits: */ /* * attr.type */ enum perf_type_id { PERF_TYPE_HARDWARE = 0, PERF_TYPE_SOFTWARE = 1, PERF_TYPE_TRACEPOINT = 2, PERF_TYPE_HW_CACHE = 3, PERF_TYPE_RAW = 4, PERF_TYPE_BREAKPOINT = 5, PERF_TYPE_MAX, /* non-ABI */ }; /* * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA * AA: hardware event ID * EEEEEEEE: PMU type ID * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB * BB: hardware cache ID * CC: hardware cache op ID * DD: hardware cache op result ID * EEEEEEEE: PMU type ID * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. */ #define PERF_PMU_TYPE_SHIFT 32 #define PERF_HW_EVENT_MASK 0xffffffff /* * Generalized performance event event_id types, used by the * attr.event_id parameter of the sys_perf_event_open() * syscall: */ enum perf_hw_id { /* * Common hardware events, generalized by the kernel: */ PERF_COUNT_HW_CPU_CYCLES = 0, PERF_COUNT_HW_INSTRUCTIONS = 1, PERF_COUNT_HW_CACHE_REFERENCES = 2, PERF_COUNT_HW_CACHE_MISSES = 3, PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, PERF_COUNT_HW_REF_CPU_CYCLES = 9, PERF_COUNT_HW_MAX, /* non-ABI */ }; /* * Generalized hardware cache events: * * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x * { read, write, prefetch } x * { accesses, misses } */ enum perf_hw_cache_id { PERF_COUNT_HW_CACHE_L1D = 0, PERF_COUNT_HW_CACHE_L1I = 1, PERF_COUNT_HW_CACHE_LL = 2, PERF_COUNT_HW_CACHE_DTLB = 3, PERF_COUNT_HW_CACHE_ITLB = 4, PERF_COUNT_HW_CACHE_BPU = 5, PERF_COUNT_HW_CACHE_NODE = 6, PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ }; enum perf_hw_cache_op_id { PERF_COUNT_HW_CACHE_OP_READ = 0, PERF_COUNT_HW_CACHE_OP_WRITE = 1, PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ }; enum perf_hw_cache_op_result_id { PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, PERF_COUNT_HW_CACHE_RESULT_MISS = 1, PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ }; /* * Special "software" events provided by the kernel, even if the hardware * does not support performance events. These events measure various * physical and sw events of the kernel (and allow the profiling of them as * well): */ enum perf_sw_ids { PERF_COUNT_SW_CPU_CLOCK = 0, PERF_COUNT_SW_TASK_CLOCK = 1, PERF_COUNT_SW_PAGE_FAULTS = 2, PERF_COUNT_SW_CONTEXT_SWITCHES = 3, PERF_COUNT_SW_CPU_MIGRATIONS = 4, PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_DUMMY = 9, PERF_COUNT_SW_BPF_OUTPUT = 10, PERF_COUNT_SW_CGROUP_SWITCHES = 11, PERF_COUNT_SW_MAX, /* non-ABI */ }; /* * Bits that can be set in attr.sample_type to request information * in the overflow packets. */ enum perf_event_sample_format { PERF_SAMPLE_IP = 1U << 0, PERF_SAMPLE_TID = 1U << 1, PERF_SAMPLE_TIME = 1U << 2, PERF_SAMPLE_ADDR = 1U << 3, PERF_SAMPLE_READ = 1U << 4, PERF_SAMPLE_CALLCHAIN = 1U << 5, PERF_SAMPLE_ID = 1U << 6, PERF_SAMPLE_CPU = 1U << 7, PERF_SAMPLE_PERIOD = 1U << 8, PERF_SAMPLE_STREAM_ID = 1U << 9, PERF_SAMPLE_RAW = 1U << 10, PERF_SAMPLE_BRANCH_STACK = 1U << 11, PERF_SAMPLE_REGS_USER = 1U << 12, PERF_SAMPLE_STACK_USER = 1U << 13, PERF_SAMPLE_WEIGHT = 1U << 14, PERF_SAMPLE_DATA_SRC = 1U << 15, PERF_SAMPLE_IDENTIFIER = 1U << 16, PERF_SAMPLE_TRANSACTION = 1U << 17, PERF_SAMPLE_REGS_INTR = 1U << 18, PERF_SAMPLE_PHYS_ADDR = 1U << 19, PERF_SAMPLE_AUX = 1U << 20, PERF_SAMPLE_CGROUP = 1U << 21, PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22, PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23, PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24, PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */ }; #define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) /* * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set * * If the user does not pass priv level information via branch_sample_type, * the kernel uses the event's priv level. Branch and event priv levels do * not have to match. Branch priv level is checked for permissions. * * The branch types can be combined, however BRANCH_ANY covers all types * of branches and therefore it supersedes all the other types. */ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */ PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */ PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; /* * Common flow change classification */ enum { PERF_BR_UNKNOWN = 0, /* unknown */ PERF_BR_COND = 1, /* conditional */ PERF_BR_UNCOND = 2, /* unconditional */ PERF_BR_IND = 3, /* indirect */ PERF_BR_CALL = 4, /* function call */ PERF_BR_IND_CALL = 5, /* indirect function call */ PERF_BR_RET = 6, /* function return */ PERF_BR_SYSCALL = 7, /* syscall */ PERF_BR_SYSRET = 8, /* syscall return */ PERF_BR_COND_CALL = 9, /* conditional function call */ PERF_BR_COND_RET = 10, /* conditional function return */ PERF_BR_ERET = 11, /* exception return */ PERF_BR_IRQ = 12, /* irq */ PERF_BR_SERROR = 13, /* system error */ PERF_BR_NO_TX = 14, /* not in transaction */ PERF_BR_EXTEND_ABI = 15, /* extend ABI */ PERF_BR_MAX, }; /* * Common branch speculation outcome classification */ enum { PERF_BR_SPEC_NA = 0, /* Not available */ PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ PERF_BR_SPEC_MAX, }; enum { PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ PERF_BR_NEW_MAX, }; enum { PERF_BR_PRIV_UNKNOWN = 0, PERF_BR_PRIV_USER = 1, PERF_BR_PRIV_KERNEL = 2, PERF_BR_PRIV_HV = 3, }; #define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 #define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 #define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 #define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 #define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 #define PERF_SAMPLE_BRANCH_PLM_ALL \ (PERF_SAMPLE_BRANCH_USER|\ PERF_SAMPLE_BRANCH_KERNEL|\ PERF_SAMPLE_BRANCH_HV) /* * Values to determine ABI of the registers dump. */ enum perf_sample_regs_abi { PERF_SAMPLE_REGS_ABI_NONE = 0, PERF_SAMPLE_REGS_ABI_32 = 1, PERF_SAMPLE_REGS_ABI_64 = 2, }; /* * Values for the memory transaction event qualifier, mostly for * abort events. Multiple bits can be set. */ enum { PERF_TXN_ELISION = (1 << 0), /* From elision */ PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ PERF_TXN_RETRY = (1 << 4), /* Retry possible */ PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ PERF_TXN_MAX = (1 << 8), /* non-ABI */ /* bits 32..63 are reserved for the abort code */ PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), PERF_TXN_ABORT_SHIFT = 32, }; /* * The format of the data returned by read() on a perf event fd, * as specified by attr.read_format: * * struct read_format { * { u64 value; * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 id; } && PERF_FORMAT_ID * { u64 lost; } && PERF_FORMAT_LOST * } && !PERF_FORMAT_GROUP * * { u64 nr; * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 value; * { u64 id; } && PERF_FORMAT_ID * { u64 lost; } && PERF_FORMAT_LOST * } cntr[nr]; * } && PERF_FORMAT_GROUP * }; */ enum perf_event_read_format { PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, PERF_FORMAT_ID = 1U << 2, PERF_FORMAT_GROUP = 1U << 3, PERF_FORMAT_LOST = 1U << 4, PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ }; #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ /* add: sample_stack_user */ #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ #define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ #define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ #define PERF_ATTR_SIZE_VER8 136 /* add: config3 */ /* * Hardware event_id to monitor via a performance monitoring event: * * @sample_max_stack: Max number of frame pointers in a callchain, * should be < /proc/sys/kernel/perf_event_max_stack */ struct perf_event_attr { /* * Major type: hardware/software/tracepoint/etc. */ __u32 type; /* * Size of the attr structure, for fwd/bwd compat. */ __u32 size; /* * Type specific configuration information. */ __u64 config; union { __u64 sample_period; __u64 sample_freq; }; __u64 sample_type; __u64 read_format; __u64 disabled : 1, /* off by default */ inherit : 1, /* children inherit it */ pinned : 1, /* must always be on PMU */ exclusive : 1, /* only group on PMU */ exclude_user : 1, /* don't count user */ exclude_kernel : 1, /* ditto kernel */ exclude_hv : 1, /* ditto hypervisor */ exclude_idle : 1, /* don't count when idle */ mmap : 1, /* include mmap data */ comm : 1, /* include comm data */ freq : 1, /* use freq, not period */ inherit_stat : 1, /* per task counts */ enable_on_exec : 1, /* next exec enables */ task : 1, /* trace fork/exit */ watermark : 1, /* wakeup_watermark */ /* * precise_ip: * * 0 - SAMPLE_IP can have arbitrary skid * 1 - SAMPLE_IP must have constant skid * 2 - SAMPLE_IP requested to have 0 skid * 3 - SAMPLE_IP must have 0 skid * * See also PERF_RECORD_MISC_EXACT_IP */ precise_ip : 2, /* skid constraint */ mmap_data : 1, /* non-exec mmap data */ sample_id_all : 1, /* sample_type all events */ exclude_host : 1, /* don't count in host */ exclude_guest : 1, /* don't count in guest */ exclude_callchain_kernel : 1, /* exclude kernel callchains */ exclude_callchain_user : 1, /* exclude user callchains */ mmap2 : 1, /* include mmap with inode data */ comm_exec : 1, /* flag comm events that are due to an exec */ use_clockid : 1, /* use @clockid for time fields */ context_switch : 1, /* context switch data */ write_backward : 1, /* Write ring buffer from end to beginning */ namespaces : 1, /* include namespaces data */ ksymbol : 1, /* include ksymbol events */ bpf_event : 1, /* include bpf events */ aux_output : 1, /* generate AUX records instead of events */ cgroup : 1, /* include cgroup events */ text_poke : 1, /* include text poke events */ build_id : 1, /* use build id in mmap2 events */ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ remove_on_exec : 1, /* event is removed from task on exec */ sigtrap : 1, /* send synchronous SIGTRAP on event */ __reserved_1 : 26; union { __u32 wakeup_events; /* wakeup every n events */ __u32 wakeup_watermark; /* bytes before wakeup */ }; __u32 bp_type; union { __u64 bp_addr; __u64 kprobe_func; /* for perf_kprobe */ __u64 uprobe_path; /* for perf_uprobe */ __u64 config1; /* extension of config */ }; union { __u64 bp_len; __u64 kprobe_addr; /* when kprobe_func == NULL */ __u64 probe_offset; /* for perf_[k,u]probe */ __u64 config2; /* extension of config1 */ }; __u64 branch_sample_type; /* enum perf_branch_sample_type */ /* * Defines set of user regs to dump on samples. * See asm/perf_regs.h for details. */ __u64 sample_regs_user; /* * Defines size of the user stack to dump on samples. */ __u32 sample_stack_user; __s32 clockid; /* * Defines set of regs to dump for each sample * state captured on: * - precise = 0: PMU interrupt * - precise > 0: sampled instruction * * See asm/perf_regs.h for details. */ __u64 sample_regs_intr; /* * Wakeup watermark for AUX area */ __u32 aux_watermark; __u16 sample_max_stack; __u16 __reserved_2; __u32 aux_sample_size; __u32 __reserved_3; /* * User provided data if sigtrap=1, passed back to user via * siginfo_t::si_perf_data, e.g. to permit user to identify the event. * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be * truncated accordingly on 32 bit architectures. */ __u64 sig_data; __u64 config3; /* extension of config2 */ }; /* * Structure used by below PERF_EVENT_IOC_QUERY_BPF command * to query bpf programs attached to the same perf tracepoint * as the given perf event. */ struct perf_event_query_bpf { /* * The below ids array length */ __u32 ids_len; /* * Set by the kernel to indicate the number of * available programs */ __u32 prog_cnt; /* * User provided buffer to store program ids */ __u32 ids[]; }; /* * Ioctls that can be done on a perf event fd: */ #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) #define PERF_EVENT_IOC_RESET _IO ('$', 3) #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *) enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, }; /* * Structure of the page that can be mapped via mmap */ struct perf_event_mmap_page { __u32 version; /* version number of this structure */ __u32 compat_version; /* lowest version this is compat with */ /* * Bits needed to read the hw events in user-space. * * u32 seq, time_mult, time_shift, index, width; * u64 count, enabled, running; * u64 cyc, time_offset; * s64 pmc = 0; * * do { * seq = pc->lock; * barrier() * * enabled = pc->time_enabled; * running = pc->time_running; * * if (pc->cap_usr_time && enabled != running) { * cyc = rdtsc(); * time_offset = pc->time_offset; * time_mult = pc->time_mult; * time_shift = pc->time_shift; * } * * index = pc->index; * count = pc->offset; * if (pc->cap_user_rdpmc && index) { * width = pc->pmc_width; * pmc = rdpmc(index - 1); * } * * barrier(); * } while (pc->lock != seq); * * NOTE: for obvious reason this only works on self-monitoring * processes. */ __u32 lock; /* seqlock for synchronization */ __u32 index; /* hardware event identifier */ __s64 offset; /* add to hardware event value */ __u64 time_enabled; /* time event active */ __u64 time_running; /* time event on cpu */ union { __u64 capabilities; struct { __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */ cap_user_time_zero : 1, /* The time_zero field is used */ cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */ cap_____res : 58; }; }; /* * If cap_user_rdpmc this field provides the bit-width of the value * read using the rdpmc() or equivalent instruction. This can be used * to sign extend the result like: * * pmc <<= 64 - width; * pmc >>= 64 - width; // signed shift right * count += pmc; */ __u16 pmc_width; /* * If cap_usr_time the below fields can be used to compute the time * delta since time_enabled (in ns) using rdtsc or similar. * * u64 quot, rem; * u64 delta; * * quot = (cyc >> time_shift); * rem = cyc & (((u64)1 << time_shift) - 1); * delta = time_offset + quot * time_mult + * ((rem * time_mult) >> time_shift); * * Where time_offset,time_mult,time_shift and cyc are read in the * seqcount loop described above. This delta can then be added to * enabled and possible running (if index), improving the scaling: * * enabled += delta; * if (index) * running += delta; * * quot = count / running; * rem = count % running; * count = quot * enabled + (rem * enabled) / running; */ __u16 time_shift; __u32 time_mult; __u64 time_offset; /* * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated * from sample timestamps. * * time = timestamp - time_zero; * quot = time / time_mult; * rem = time % time_mult; * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; * * And vice versa: * * quot = cyc >> time_shift; * rem = cyc & (((u64)1 << time_shift) - 1); * timestamp = time_zero + quot * time_mult + * ((rem * time_mult) >> time_shift); */ __u64 time_zero; __u32 size; /* Header size up to __reserved[] fields. */ __u32 __reserved_1; /* * If cap_usr_time_short, the hardware clock is less than 64bit wide * and we must compute the 'cyc' value, as used by cap_usr_time, as: * * cyc = time_cycles + ((cyc - time_cycles) & time_mask) * * NOTE: this form is explicitly chosen such that cap_usr_time_short * is a correction on top of cap_usr_time, and code that doesn't * know about cap_usr_time_short still works under the assumption * the counter doesn't wrap. */ __u64 time_cycles; __u64 time_mask; /* * Hole for extension of the self monitor capabilities */ __u8 __reserved[116*8]; /* align to 1k. */ /* * Control data for the mmap() data buffer. * * User-space reading the @data_head value should issue an smp_rmb(), * after reading this value. * * When the mapping is PROT_WRITE the @data_tail value should be * written by userspace to reflect the last read data, after issueing * an smp_mb() to separate the data read from the ->data_tail store. * In this case the kernel will not over-write unread data. * * See perf_output_put_handle() for the data ordering. * * data_{offset,size} indicate the location and size of the perf record * buffer within the mmapped area. */ __u64 data_head; /* head in the data section */ __u64 data_tail; /* user-space written tail */ __u64 data_offset; /* where the buffer starts */ __u64 data_size; /* data buffer size */ /* * AUX area is defined by aux_{offset,size} fields that should be set * by the userspace, so that * * aux_offset >= data_offset + data_size * * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. * * Ring buffer pointers aux_{head,tail} have the same semantics as * data_{head,tail} and same ordering rules apply. */ __u64 aux_head; __u64 aux_tail; __u64 aux_offset; __u64 aux_size; }; /* * The current state of perf_event_header::misc bits usage: * ('|' used bit, '-' unused bit) * * 012 CDEF * |||---------|||| * * Where: * 0-2 CPUMODE_MASK * * C PROC_MAP_PARSE_TIMEOUT * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT * F (reserved) */ #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) #define PERF_RECORD_MISC_KERNEL (1 << 0) #define PERF_RECORD_MISC_USER (2 << 0) #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) #define PERF_RECORD_MISC_GUEST_USER (5 << 0) /* * Indicates that /proc/PID/maps parsing are truncated by time out. */ #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) /* * Following PERF_RECORD_MISC_* are used on different * events, so can reuse the same bit position: * * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events */ #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_FORK_EXEC (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) /* * These PERF_RECORD_MISC_* flags below are safely reused * for the following events: * * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event * * * PERF_RECORD_MISC_EXACT_IP: * Indicates that the content of PERF_SAMPLE_IP points to * the actual instruction that triggered the event. See also * perf_event_attr::precise_ip. * * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: * Indicates that thread was preempted in TASK_RUNNING state. * * PERF_RECORD_MISC_MMAP_BUILD_ID: * Indicates that mmap2 event carries build id data. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) #define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14) /* * Reserve the last bit to indicate some extended misc field */ #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) struct perf_event_header { __u32 type; __u16 misc; __u16 size; }; struct perf_ns_link_info { __u64 dev; __u64 ino; }; enum { NET_NS_INDEX = 0, UTS_NS_INDEX = 1, IPC_NS_INDEX = 2, PID_NS_INDEX = 3, USER_NS_INDEX = 4, MNT_NS_INDEX = 5, CGROUP_NS_INDEX = 6, NR_NAMESPACES, /* number of available namespaces */ }; enum perf_event_type { /* * If perf_event_attr.sample_id_all is set then all event types will * have the sample_type selected fields related to where/when * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed * just after the perf_event_header and the fields already present for * the existing fields, i.e. at the end of the payload. That way a newer * perf.data file will be supported by older perf tools, with these new * optional fields being ignored. * * struct sample_id { * { u32 pid, tid; } && PERF_SAMPLE_TID * { u64 time; } && PERF_SAMPLE_TIME * { u64 id; } && PERF_SAMPLE_ID * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID * { u32 cpu, res; } && PERF_SAMPLE_CPU * { u64 id; } && PERF_SAMPLE_IDENTIFIER * } && perf_event_attr::sample_id_all * * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed * relative to header.size. */ /* * The MMAP events record the PROT_EXEC mappings so that we can * correlate userspace IPs to code. They have the following structure: * * struct { * struct perf_event_header header; * * u32 pid, tid; * u64 addr; * u64 len; * u64 pgoff; * char filename[]; * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP = 1, /* * struct { * struct perf_event_header header; * u64 id; * u64 lost; * struct sample_id sample_id; * }; */ PERF_RECORD_LOST = 2, /* * struct { * struct perf_event_header header; * * u32 pid, tid; * char comm[]; * struct sample_id sample_id; * }; */ PERF_RECORD_COMM = 3, /* * struct { * struct perf_event_header header; * u32 pid, ppid; * u32 tid, ptid; * u64 time; * struct sample_id sample_id; * }; */ PERF_RECORD_EXIT = 4, /* * struct { * struct perf_event_header header; * u64 time; * u64 id; * u64 stream_id; * struct sample_id sample_id; * }; */ PERF_RECORD_THROTTLE = 5, PERF_RECORD_UNTHROTTLE = 6, /* * struct { * struct perf_event_header header; * u32 pid, ppid; * u32 tid, ptid; * u64 time; * struct sample_id sample_id; * }; */ PERF_RECORD_FORK = 7, /* * struct { * struct perf_event_header header; * u32 pid, tid; * * struct read_format values; * struct sample_id sample_id; * }; */ PERF_RECORD_READ = 8, /* * struct { * struct perf_event_header header; * * # * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position * # is fixed relative to header. * # * * { u64 id; } && PERF_SAMPLE_IDENTIFIER * { u64 ip; } && PERF_SAMPLE_IP * { u32 pid, tid; } && PERF_SAMPLE_TID * { u64 time; } && PERF_SAMPLE_TIME * { u64 addr; } && PERF_SAMPLE_ADDR * { u64 id; } && PERF_SAMPLE_ID * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID * { u32 cpu, res; } && PERF_SAMPLE_CPU * { u64 period; } && PERF_SAMPLE_PERIOD * * { struct read_format values; } && PERF_SAMPLE_READ * * { u64 nr, * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN * * # * # The RAW record below is opaque data wrt the ABI * # * # That is, the ABI doesn't make any promises wrt to * # the stability of its content, it may vary depending * # on event, hardware, kernel version and phase of * # the moon. * # * # In other words, PERF_SAMPLE_RAW contents are not an ABI. * # * * { u32 size; * char data[size];}&& PERF_SAMPLE_RAW * * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; * # * # The format of the counters is decided by the * # "branch_counter_nr" and "branch_counter_width", * # which are defined in the ABI. * # * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER * * { u64 size; * char data[size]; * u64 dyn_size; } && PERF_SAMPLE_STACK_USER * * { union perf_sample_weight * { * u64 full; && PERF_SAMPLE_WEIGHT * #if defined(__LITTLE_ENDIAN_BITFIELD) * struct { * u32 var1_dw; * u16 var2_w; * u16 var3_w; * } && PERF_SAMPLE_WEIGHT_STRUCT * #elif defined(__BIG_ENDIAN_BITFIELD) * struct { * u16 var3_w; * u16 var2_w; * u32 var1_dw; * } && PERF_SAMPLE_WEIGHT_STRUCT * #endif * } * } * { u64 data_src; } && PERF_SAMPLE_DATA_SRC * { u64 transaction; } && PERF_SAMPLE_TRANSACTION * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR * { u64 size; * char data[size]; } && PERF_SAMPLE_AUX * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE * }; */ PERF_RECORD_SAMPLE = 9, /* * The MMAP2 records are an augmented version of MMAP, they add * maj, min, ino numbers to be used to uniquely identify each mapping * * struct { * struct perf_event_header header; * * u32 pid, tid; * u64 addr; * u64 len; * u64 pgoff; * union { * struct { * u32 maj; * u32 min; * u64 ino; * u64 ino_generation; * }; * struct { * u8 build_id_size; * u8 __reserved_1; * u16 __reserved_2; * u8 build_id[20]; * }; * }; * u32 prot, flags; * char filename[]; * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP2 = 10, /* * Records that new data landed in the AUX buffer part. * * struct { * struct perf_event_header header; * * u64 aux_offset; * u64 aux_size; * u64 flags; * struct sample_id sample_id; * }; */ PERF_RECORD_AUX = 11, /* * Indicates that instruction trace has started * * struct { * struct perf_event_header header; * u32 pid; * u32 tid; * struct sample_id sample_id; * }; */ PERF_RECORD_ITRACE_START = 12, /* * Records the dropped/lost sample number. * * struct { * struct perf_event_header header; * * u64 lost; * struct sample_id sample_id; * }; */ PERF_RECORD_LOST_SAMPLES = 13, /* * Records a context switch in or out (flagged by * PERF_RECORD_MISC_SWITCH_OUT). See also * PERF_RECORD_SWITCH_CPU_WIDE. * * struct { * struct perf_event_header header; * struct sample_id sample_id; * }; */ PERF_RECORD_SWITCH = 14, /* * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and * next_prev_tid that are the next (switching out) or previous * (switching in) pid/tid. * * struct { * struct perf_event_header header; * u32 next_prev_pid; * u32 next_prev_tid; * struct sample_id sample_id; * }; */ PERF_RECORD_SWITCH_CPU_WIDE = 15, /* * struct { * struct perf_event_header header; * u32 pid; * u32 tid; * u64 nr_namespaces; * { u64 dev, inode; } [nr_namespaces]; * struct sample_id sample_id; * }; */ PERF_RECORD_NAMESPACES = 16, /* * Record ksymbol register/unregister events: * * struct { * struct perf_event_header header; * u64 addr; * u32 len; * u16 ksym_type; * u16 flags; * char name[]; * struct sample_id sample_id; * }; */ PERF_RECORD_KSYMBOL = 17, /* * Record bpf events: * enum perf_bpf_event_type { * PERF_BPF_EVENT_UNKNOWN = 0, * PERF_BPF_EVENT_PROG_LOAD = 1, * PERF_BPF_EVENT_PROG_UNLOAD = 2, * }; * * struct { * struct perf_event_header header; * u16 type; * u16 flags; * u32 id; * u8 tag[BPF_TAG_SIZE]; * struct sample_id sample_id; * }; */ PERF_RECORD_BPF_EVENT = 18, /* * struct { * struct perf_event_header header; * u64 id; * char path[]; * struct sample_id sample_id; * }; */ PERF_RECORD_CGROUP = 19, /* * Records changes to kernel text i.e. self-modified code. 'old_len' is * the number of old bytes, 'new_len' is the number of new bytes. Either * 'old_len' or 'new_len' may be zero to indicate, for example, the * addition or removal of a trampoline. 'bytes' contains the old bytes * followed immediately by the new bytes. * * struct { * struct perf_event_header header; * u64 addr; * u16 old_len; * u16 new_len; * u8 bytes[]; * struct sample_id sample_id; * }; */ PERF_RECORD_TEXT_POKE = 20, /* * Data written to the AUX area by hardware due to aux_output, may need * to be matched to the event by an architecture-specific hardware ID. * This records the hardware ID, but requires sample_id to provide the * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT * records from multiple events. * * struct { * struct perf_event_header header; * u64 hw_id; * struct sample_id sample_id; * }; */ PERF_RECORD_AUX_OUTPUT_HW_ID = 21, PERF_RECORD_MAX, /* non-ABI */ }; enum perf_record_ksymbol_type { PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, PERF_RECORD_KSYMBOL_TYPE_BPF = 1, /* * Out of line code such as kprobe-replaced instructions or optimized * kprobes or ftrace trampolines. */ PERF_RECORD_KSYMBOL_TYPE_OOL = 2, PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ }; #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) enum perf_bpf_event_type { PERF_BPF_EVENT_UNKNOWN = 0, PERF_BPF_EVENT_PROG_LOAD = 1, PERF_BPF_EVENT_PROG_UNLOAD = 2, PERF_BPF_EVENT_MAX, /* non-ABI */ }; #define PERF_MAX_STACK_DEPTH 127 #define PERF_MAX_CONTEXTS_PER_STACK 8 enum perf_callchain_context { PERF_CONTEXT_HV = (__u64)-32, PERF_CONTEXT_KERNEL = (__u64)-128, PERF_CONTEXT_USER = (__u64)-512, PERF_CONTEXT_GUEST = (__u64)-2048, PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, PERF_CONTEXT_GUEST_USER = (__u64)-2560, PERF_CONTEXT_MAX = (__u64)-4095, }; /** * PERF_RECORD_AUX::flags bits */ #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ #define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ #define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ /* CoreSight PMU AUX buffer formats */ #define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ #define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ #define PERF_FLAG_FD_NO_GROUP (1UL << 0) #define PERF_FLAG_FD_OUTPUT (1UL << 1) #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ #if defined(__LITTLE_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { __u64 mem_op:5, /* type of opcode */ mem_lvl:14, /* memory hierarchy level */ mem_snoop:5, /* snoop mode */ mem_lock:2, /* lock instr */ mem_dtlb:7, /* tlb access */ mem_lvl_num:4, /* memory hierarchy level number */ mem_remote:1, /* remote */ mem_snoopx:2, /* snoop mode, ext */ mem_blk:3, /* access blocked */ mem_hops:3, /* hop level */ mem_rsvd:18; }; }; #elif defined(__BIG_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { __u64 mem_rsvd:18, mem_hops:3, /* hop level */ mem_blk:3, /* access blocked */ mem_snoopx:2, /* snoop mode, ext */ mem_remote:1, /* remote */ mem_lvl_num:4, /* memory hierarchy level number */ mem_dtlb:7, /* tlb access */ mem_lock:2, /* lock instr */ mem_snoop:5, /* snoop mode */ mem_lvl:14, /* memory hierarchy level */ mem_op:5; /* type of opcode */ }; }; #else #error "Unknown endianness" #endif /* type of opcode (load/store/prefetch,code) */ #define PERF_MEM_OP_NA 0x01 /* not available */ #define PERF_MEM_OP_LOAD 0x02 /* load instruction */ #define PERF_MEM_OP_STORE 0x04 /* store instruction */ #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ #define PERF_MEM_OP_SHIFT 0 /* * PERF_MEM_LVL_* namespace being depricated to some extent in the * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. * Supporting this namespace inorder to not break defined ABIs. * * memory hierarchy (memory level, hit or miss) */ #define PERF_MEM_LVL_NA 0x01 /* not available */ #define PERF_MEM_LVL_HIT 0x02 /* hit level */ #define PERF_MEM_LVL_MISS 0x04 /* miss level */ #define PERF_MEM_LVL_L1 0x08 /* L1 */ #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ #define PERF_MEM_LVL_L2 0x20 /* L2 */ #define PERF_MEM_LVL_L3 0x40 /* L3 */ #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ #define PERF_MEM_LVL_SHIFT 5 #define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ #define PERF_MEM_REMOTE_SHIFT 37 #define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ #define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */ #define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */ /* 0x7 available */ #define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */ #define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */ #define PERF_MEM_LVLNUM_IO 0x0a /* I/O */ #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ #define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */ #define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ #define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ #define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ #define PERF_MEM_LVLNUM_SHIFT 33 /* snoop mode */ #define PERF_MEM_SNOOP_NA 0x01 /* not available */ #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ #define PERF_MEM_SNOOP_SHIFT 19 #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ #define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */ #define PERF_MEM_SNOOPX_SHIFT 38 /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ #define PERF_MEM_LOCK_SHIFT 24 /* TLB access */ #define PERF_MEM_TLB_NA 0x01 /* not available */ #define PERF_MEM_TLB_HIT 0x02 /* hit level */ #define PERF_MEM_TLB_MISS 0x04 /* miss level */ #define PERF_MEM_TLB_L1 0x08 /* L1 */ #define PERF_MEM_TLB_L2 0x10 /* L2 */ #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ #define PERF_MEM_TLB_SHIFT 26 /* Access blocked */ #define PERF_MEM_BLK_NA 0x01 /* not available */ #define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */ #define PERF_MEM_BLK_ADDR 0x04 /* address conflict */ #define PERF_MEM_BLK_SHIFT 40 /* hop level */ #define PERF_MEM_HOPS_0 0x01 /* remote core, same node */ #define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ #define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ #define PERF_MEM_HOPS_3 0x04 /* remote board */ /* 5-7 available */ #define PERF_MEM_HOPS_SHIFT 43 #define PERF_MEM_S(a, s) \ (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) /* * single taken branch record layout: * * from: source instruction (may not always be a branch insn) * to: branch target * mispred: branch target was mispredicted * predicted: branch target was predicted * * support for mispred, predicted is optional. In case it * is not supported mispred = predicted = 0. * * in_tx: running in a hardware transaction * abort: aborting a hardware transaction * cycles: cycles from last branch (or 0 if not supported) * type: branch type * spec: branch speculation info (or 0 if not supported) */ struct perf_branch_entry { __u64 from; __u64 to; __u64 mispred:1, /* target mispredicted */ predicted:1,/* target predicted */ in_tx:1, /* in transaction */ abort:1, /* transaction abort */ cycles:16, /* cycle count to last branch */ type:4, /* branch type */ spec:2, /* branch speculation info */ new_type:4, /* additional branch type */ priv:3, /* privilege level */ reserved:31; }; /* Size of used info bits in struct perf_branch_entry */ #define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 union perf_sample_weight { __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) struct { __u32 var1_dw; __u16 var2_w; __u16 var3_w; }; #elif defined(__BIG_ENDIAN_BITFIELD) struct { __u16 var3_w; __u16 var2_w; __u32 var1_dw; }; #else #error "Unknown endianness" #endif }; #endif /* _UAPI_LINUX_PERF_EVENT_H */ xdp-tools-1.5.4/lib/libbpf/include/uapi/linux/btf.h0000644000175100001660000001274614706536574021520 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (c) 2018 Facebook */ #ifndef _UAPI__LINUX_BTF_H__ #define _UAPI__LINUX_BTF_H__ #include #define BTF_MAGIC 0xeB9F #define BTF_VERSION 1 struct btf_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 type_off; /* offset of type section */ __u32 type_len; /* length of type section */ __u32 str_off; /* offset of string section */ __u32 str_len; /* length of string section */ }; /* Max # of type identifier */ #define BTF_MAX_TYPE 0x000fffff /* Max offset into the string section */ #define BTF_MAX_NAME_OFFSET 0x00ffffff /* Max # of struct/union/enum members or func args */ #define BTF_MAX_VLEN 0xffff struct btf_type { __u32 name_off; /* "info" bits arrangement * bits 0-15: vlen (e.g. # of struct's members) * bits 16-23: unused * bits 24-28: kind (e.g. int, ptr, array...etc) * bits 29-30: unused * bit 31: kind_flag, currently used by * struct, union, enum, fwd and enum64 */ __u32 info; /* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64. * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, * FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG. * "type" is a type_id referring to another type. */ union { __u32 size; __u32 type; }; }; #define BTF_INFO_KIND(info) (((info) >> 24) & 0x1f) #define BTF_INFO_VLEN(info) ((info) & 0xffff) #define BTF_INFO_KFLAG(info) ((info) >> 31) enum { BTF_KIND_UNKN = 0, /* Unknown */ BTF_KIND_INT = 1, /* Integer */ BTF_KIND_PTR = 2, /* Pointer */ BTF_KIND_ARRAY = 3, /* Array */ BTF_KIND_STRUCT = 4, /* Struct */ BTF_KIND_UNION = 5, /* Union */ BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */ BTF_KIND_FWD = 7, /* Forward */ BTF_KIND_TYPEDEF = 8, /* Typedef */ BTF_KIND_VOLATILE = 9, /* Volatile */ BTF_KIND_CONST = 10, /* Const */ BTF_KIND_RESTRICT = 11, /* Restrict */ BTF_KIND_FUNC = 12, /* Function */ BTF_KIND_FUNC_PROTO = 13, /* Function Proto */ BTF_KIND_VAR = 14, /* Variable */ BTF_KIND_DATASEC = 15, /* Section */ BTF_KIND_FLOAT = 16, /* Floating point */ BTF_KIND_DECL_TAG = 17, /* Decl Tag */ BTF_KIND_TYPE_TAG = 18, /* Type Tag */ BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */ NR_BTF_KINDS, BTF_KIND_MAX = NR_BTF_KINDS - 1, }; /* For some specific BTF_KIND, "struct btf_type" is immediately * followed by extra data. */ /* BTF_KIND_INT is followed by a u32 and the following * is the 32 bits arrangement: */ #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) #define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16) #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) /* Attributes stored in the BTF_INT_ENCODING */ #define BTF_INT_SIGNED (1 << 0) #define BTF_INT_CHAR (1 << 1) #define BTF_INT_BOOL (1 << 2) /* BTF_KIND_ENUM is followed by multiple "struct btf_enum". * The exact number of btf_enum is stored in the vlen (of the * info in "struct btf_type"). */ struct btf_enum { __u32 name_off; __s32 val; }; /* BTF_KIND_ARRAY is followed by one "struct btf_array" */ struct btf_array { __u32 type; __u32 index_type; __u32 nelems; }; /* BTF_KIND_STRUCT and BTF_KIND_UNION are followed * by multiple "struct btf_member". The exact number * of btf_member is stored in the vlen (of the info in * "struct btf_type"). */ struct btf_member { __u32 name_off; __u32 type; /* If the type info kind_flag is set, the btf_member offset * contains both member bitfield size and bit offset. The * bitfield size is set for bitfield members. If the type * info kind_flag is not set, the offset contains only bit * offset. */ __u32 offset; }; /* If the struct/union type info kind_flag is set, the * following two macros are used to access bitfield_size * and bit_offset from btf_member.offset. */ #define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24) #define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff) /* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param". * The exact number of btf_param is stored in the vlen (of the * info in "struct btf_type"). */ struct btf_param { __u32 name_off; __u32 type; }; enum { BTF_VAR_STATIC = 0, BTF_VAR_GLOBAL_ALLOCATED = 1, BTF_VAR_GLOBAL_EXTERN = 2, }; enum btf_func_linkage { BTF_FUNC_STATIC = 0, BTF_FUNC_GLOBAL = 1, BTF_FUNC_EXTERN = 2, }; /* BTF_KIND_VAR is followed by a single "struct btf_var" to describe * additional information related to the variable such as its linkage. */ struct btf_var { __u32 linkage; }; /* BTF_KIND_DATASEC is followed by multiple "struct btf_var_secinfo" * to describe all BTF_KIND_VAR types it contains along with it's * in-section offset as well as size. */ struct btf_var_secinfo { __u32 type; __u32 offset; __u32 size; }; /* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe * additional information related to the tag applied location. * If component_idx == -1, the tag is applied to a struct, union, * variable or function. Otherwise, it is applied to a struct/union * member or a func argument, and component_idx indicates which member * or argument (0 ... vlen-1). */ struct btf_decl_tag { __s32 component_idx; }; /* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64". * The exact number of btf_enum64 is stored in the vlen (of the * info in "struct btf_type"). */ struct btf_enum64 { __u32 name_off; __u32 val_lo32; __u32 val_hi32; }; #endif /* _UAPI__LINUX_BTF_H__ */ xdp-tools-1.5.4/lib/libbpf/include/uapi/linux/bpf_common.h0000644000175100001660000000254614706536574023061 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__LINUX_BPF_COMMON_H__ #define _UAPI__LINUX_BPF_COMMON_H__ /* Instruction classes */ #define BPF_CLASS(code) ((code) & 0x07) #define BPF_LD 0x00 #define BPF_LDX 0x01 #define BPF_ST 0x02 #define BPF_STX 0x03 #define BPF_ALU 0x04 #define BPF_JMP 0x05 #define BPF_RET 0x06 #define BPF_MISC 0x07 /* ld/ldx fields */ #define BPF_SIZE(code) ((code) & 0x18) #define BPF_W 0x00 /* 32-bit */ #define BPF_H 0x08 /* 16-bit */ #define BPF_B 0x10 /* 8-bit */ /* eBPF BPF_DW 0x18 64-bit */ #define BPF_MODE(code) ((code) & 0xe0) #define BPF_IMM 0x00 #define BPF_ABS 0x20 #define BPF_IND 0x40 #define BPF_MEM 0x60 #define BPF_LEN 0x80 #define BPF_MSH 0xa0 /* alu/jmp fields */ #define BPF_OP(code) ((code) & 0xf0) #define BPF_ADD 0x00 #define BPF_SUB 0x10 #define BPF_MUL 0x20 #define BPF_DIV 0x30 #define BPF_OR 0x40 #define BPF_AND 0x50 #define BPF_LSH 0x60 #define BPF_RSH 0x70 #define BPF_NEG 0x80 #define BPF_MOD 0x90 #define BPF_XOR 0xa0 #define BPF_JA 0x00 #define BPF_JEQ 0x10 #define BPF_JGT 0x20 #define BPF_JGE 0x30 #define BPF_JSET 0x40 #define BPF_SRC(code) ((code) & 0x08) #define BPF_K 0x00 #define BPF_X 0x08 #ifndef BPF_MAXINSNS #define BPF_MAXINSNS 4096 #endif #endif /* _UAPI__LINUX_BPF_COMMON_H__ */ xdp-tools-1.5.4/lib/libbpf/include/uapi/linux/bpf.h0000644000175100001660000104277214706536574021517 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #ifndef _UAPI__LINUX_BPF_H__ #define _UAPI__LINUX_BPF_H__ #include #include /* Extended instruction set based on top of classic BPF */ /* instruction classes */ #define BPF_JMP32 0x06 /* jmp mode in word width */ #define BPF_ALU64 0x07 /* alu mode in double word width */ /* ld/ldx fields */ #define BPF_DW 0x18 /* double word (64-bit) */ #define BPF_MEMSX 0x80 /* load with sign extension */ #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ #define BPF_XADD 0xc0 /* exclusive add - legacy name */ /* alu/jmp fields */ #define BPF_MOV 0xb0 /* mov reg to reg */ #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ /* change endianness of a register */ #define BPF_END 0xd0 /* flags for endianness conversion: */ #define BPF_TO_LE 0x00 /* convert to little-endian */ #define BPF_TO_BE 0x08 /* convert to big-endian */ #define BPF_FROM_LE BPF_TO_LE #define BPF_FROM_BE BPF_TO_BE /* jmp encodings */ #define BPF_JNE 0x50 /* jump != */ #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ #define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ /* atomic op type fields (stored in immediate) */ #define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */ #define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ #define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ enum bpf_cond_pseudo_jmp { BPF_MAY_GOTO = 0, }; /* Register numbers */ enum { BPF_REG_0 = 0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5, BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9, BPF_REG_10, __MAX_BPF_REG, }; /* BPF has 10 general purpose 64-bit registers and stack frame. */ #define MAX_BPF_REG __MAX_BPF_REG struct bpf_insn { __u8 code; /* opcode */ __u8 dst_reg:4; /* dest register */ __u8 src_reg:4; /* source register */ __s16 off; /* signed offset */ __s32 imm; /* signed immediate constant */ }; /* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for * the trailing flexible array member) instead. */ struct bpf_lpm_trie_key { __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ __u8 data[0]; /* Arbitrary size */ }; /* Header for bpf_lpm_trie_key structs */ struct bpf_lpm_trie_key_hdr { __u32 prefixlen; }; /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */ struct bpf_lpm_trie_key_u8 { union { struct bpf_lpm_trie_key_hdr hdr; __u32 prefixlen; }; __u8 data[]; /* Arbitrary size */ }; struct bpf_cgroup_storage_key { __u64 cgroup_inode_id; /* cgroup inode id */ __u32 attach_type; /* program attach type (enum bpf_attach_type) */ }; enum bpf_cgroup_iter_order { BPF_CGROUP_ITER_ORDER_UNSPEC = 0, BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */ BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */ BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */ BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */ }; union bpf_iter_link_info { struct { __u32 map_fd; } map; struct { enum bpf_cgroup_iter_order order; /* At most one of cgroup_fd and cgroup_id can be non-zero. If * both are zero, the walk starts from the default cgroup v2 * root. For walking v1 hierarchy, one should always explicitly * specify cgroup_fd. */ __u32 cgroup_fd; __u64 cgroup_id; } cgroup; /* Parameters of task iterators. */ struct { __u32 tid; __u32 pid; __u32 pid_fd; } task; }; /* BPF syscall commands, see bpf(2) man-page for more details. */ /** * DOC: eBPF Syscall Preamble * * The operation to be performed by the **bpf**\ () system call is determined * by the *cmd* argument. Each operation takes an accompanying argument, * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see * below). The size argument is the size of the union pointed to by *attr*. */ /** * DOC: eBPF Syscall Commands * * BPF_MAP_CREATE * Description * Create a map and return a file descriptor that refers to the * map. The close-on-exec file descriptor flag (see **fcntl**\ (2)) * is automatically enabled for the new file descriptor. * * Applying **close**\ (2) to the file descriptor returned by * **BPF_MAP_CREATE** will delete the map (but see NOTES). * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_MAP_LOOKUP_ELEM * Description * Look up an element with a given *key* in the map referred to * by the file descriptor *map_fd*. * * The *flags* argument may be specified as one of the * following: * * **BPF_F_LOCK** * Look up the value of a spin-locked map without * returning the lock. This must be specified if the * elements contain a spinlock. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_UPDATE_ELEM * Description * Create or update an element (key/value pair) in a specified map. * * The *flags* argument should be specified as one of the * following: * * **BPF_ANY** * Create a new element or update an existing element. * **BPF_NOEXIST** * Create a new element only if it did not exist. * **BPF_EXIST** * Update an existing element. * **BPF_F_LOCK** * Update a spin_lock-ed map element. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, * **E2BIG**, **EEXIST**, or **ENOENT**. * * **E2BIG** * The number of elements in the map reached the * *max_entries* limit specified at map creation time. * **EEXIST** * If *flags* specifies **BPF_NOEXIST** and the element * with *key* already exists in the map. * **ENOENT** * If *flags* specifies **BPF_EXIST** and the element with * *key* does not exist in the map. * * BPF_MAP_DELETE_ELEM * Description * Look up and delete an element by key in a specified map. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_GET_NEXT_KEY * Description * Look up an element by key in a specified map and return the key * of the next element. Can be used to iterate over all elements * in the map. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * The following cases can be used to iterate over all elements of * the map: * * * If *key* is not found, the operation returns zero and sets * the *next_key* pointer to the key of the first element. * * If *key* is found, the operation returns zero and sets the * *next_key* pointer to the key of the next element. * * If *key* is the last element, returns -1 and *errno* is set * to **ENOENT**. * * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or * **EINVAL** on error. * * BPF_PROG_LOAD * Description * Verify and load an eBPF program, returning a new file * descriptor associated with the program. * * Applying **close**\ (2) to the file descriptor returned by * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES). * * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is * automatically enabled for the new file descriptor. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_OBJ_PIN * Description * Pin an eBPF program or map referred by the specified *bpf_fd* * to the provided *pathname* on the filesystem. * * The *pathname* argument must not contain a dot ("."). * * On success, *pathname* retains a reference to the eBPF object, * preventing deallocation of the object when the original * *bpf_fd* is closed. This allow the eBPF object to live beyond * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent * process. * * Applying **unlink**\ (2) or similar calls to the *pathname* * unpins the object from the filesystem, removing the reference. * If no other file descriptors or filesystem nodes refer to the * same object, it will be deallocated (see NOTES). * * The filesystem type for the parent directory of *pathname* must * be **BPF_FS_MAGIC**. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_OBJ_GET * Description * Open a file descriptor for the eBPF object pinned to the * specified *pathname*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_PROG_ATTACH * Description * Attach an eBPF program to a *target_fd* at the specified * *attach_type* hook. * * The *attach_type* specifies the eBPF attachment point to * attach the program to, and must be one of *bpf_attach_type* * (see below). * * The *attach_bpf_fd* must be a valid file descriptor for a * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap * or sock_ops type corresponding to the specified *attach_type*. * * The *target_fd* must be a valid file descriptor for a kernel * object which depends on the attach type of *attach_bpf_fd*: * * **BPF_PROG_TYPE_CGROUP_DEVICE**, * **BPF_PROG_TYPE_CGROUP_SKB**, * **BPF_PROG_TYPE_CGROUP_SOCK**, * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, * **BPF_PROG_TYPE_CGROUP_SYSCTL**, * **BPF_PROG_TYPE_SOCK_OPS** * * Control Group v2 hierarchy with the eBPF controller * enabled. Requires the kernel to be compiled with * **CONFIG_CGROUP_BPF**. * * **BPF_PROG_TYPE_FLOW_DISSECTOR** * * Network namespace (eg /proc/self/ns/net). * * **BPF_PROG_TYPE_LIRC_MODE2** * * LIRC device path (eg /dev/lircN). Requires the kernel * to be compiled with **CONFIG_BPF_LIRC_MODE2**. * * **BPF_PROG_TYPE_SK_SKB**, * **BPF_PROG_TYPE_SK_MSG** * * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**). * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_PROG_DETACH * Description * Detach the eBPF program associated with the *target_fd* at the * hook specified by *attach_type*. The program must have been * previously attached using **BPF_PROG_ATTACH**. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_PROG_TEST_RUN * Description * Run the eBPF program associated with the *prog_fd* a *repeat* * number of times against a provided program context *ctx_in* and * data *data_in*, and return the modified program context * *ctx_out*, *data_out* (for example, packet data), result of the * execution *retval*, and *duration* of the test run. * * The sizes of the buffers provided as input and output * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must * be provided in the corresponding variables *ctx_size_in*, * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any * of these parameters are not provided (ie set to NULL), the * corresponding size field must be zero. * * Some program types have particular requirements: * * **BPF_PROG_TYPE_SK_LOOKUP** * *data_in* and *data_out* must be NULL. * * **BPF_PROG_TYPE_RAW_TRACEPOINT**, * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE** * * *ctx_out*, *data_in* and *data_out* must be NULL. * *repeat* must be zero. * * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * **ENOSPC** * Either *data_size_out* or *ctx_size_out* is too small. * **ENOTSUPP** * This command is not supported by the program type of * the program referred to by *prog_fd*. * * BPF_PROG_GET_NEXT_ID * Description * Fetch the next eBPF program currently loaded into the kernel. * * Looks for the eBPF program with an id greater than *start_id* * and updates *next_id* on success. If no other eBPF programs * remain with ids higher than *start_id*, returns -1 and sets * *errno* to **ENOENT**. * * Return * Returns zero on success. On error, or when no id remains, -1 * is returned and *errno* is set appropriately. * * BPF_MAP_GET_NEXT_ID * Description * Fetch the next eBPF map currently loaded into the kernel. * * Looks for the eBPF map with an id greater than *start_id* * and updates *next_id* on success. If no other eBPF maps * remain with ids higher than *start_id*, returns -1 and sets * *errno* to **ENOENT**. * * Return * Returns zero on success. On error, or when no id remains, -1 * is returned and *errno* is set appropriately. * * BPF_PROG_GET_FD_BY_ID * Description * Open a file descriptor for the eBPF program corresponding to * *prog_id*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_MAP_GET_FD_BY_ID * Description * Open a file descriptor for the eBPF map corresponding to * *map_id*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_OBJ_GET_INFO_BY_FD * Description * Obtain information about the eBPF object corresponding to * *bpf_fd*. * * Populates up to *info_len* bytes of *info*, which will be in * one of the following formats depending on the eBPF object type * of *bpf_fd*: * * * **struct bpf_prog_info** * * **struct bpf_map_info** * * **struct bpf_btf_info** * * **struct bpf_link_info** * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_PROG_QUERY * Description * Obtain information about eBPF programs associated with the * specified *attach_type* hook. * * The *target_fd* must be a valid file descriptor for a kernel * object which depends on the attach type of *attach_bpf_fd*: * * **BPF_PROG_TYPE_CGROUP_DEVICE**, * **BPF_PROG_TYPE_CGROUP_SKB**, * **BPF_PROG_TYPE_CGROUP_SOCK**, * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, * **BPF_PROG_TYPE_CGROUP_SYSCTL**, * **BPF_PROG_TYPE_SOCK_OPS** * * Control Group v2 hierarchy with the eBPF controller * enabled. Requires the kernel to be compiled with * **CONFIG_CGROUP_BPF**. * * **BPF_PROG_TYPE_FLOW_DISSECTOR** * * Network namespace (eg /proc/self/ns/net). * * **BPF_PROG_TYPE_LIRC_MODE2** * * LIRC device path (eg /dev/lircN). Requires the kernel * to be compiled with **CONFIG_BPF_LIRC_MODE2**. * * **BPF_PROG_QUERY** always fetches the number of programs * attached and the *attach_flags* which were used to attach those * programs. Additionally, if *prog_ids* is nonzero and the number * of attached programs is less than *prog_cnt*, populates * *prog_ids* with the eBPF program ids of the programs attached * at *target_fd*. * * The following flags may alter the result: * * **BPF_F_QUERY_EFFECTIVE** * Only return information regarding programs which are * currently effective at the specified *target_fd*. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_RAW_TRACEPOINT_OPEN * Description * Attach an eBPF program to a tracepoint *name* to access kernel * internal arguments of the tracepoint in their raw form. * * The *prog_fd* must be a valid file descriptor associated with * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**. * * No ABI guarantees are made about the content of tracepoint * arguments exposed to the corresponding eBPF program. * * Applying **close**\ (2) to the file descriptor returned by * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES). * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_BTF_LOAD * Description * Verify and load BPF Type Format (BTF) metadata into the kernel, * returning a new file descriptor associated with the metadata. * BTF is described in more detail at * https://www.kernel.org/doc/html/latest/bpf/btf.html. * * The *btf* parameter must point to valid memory providing * *btf_size* bytes of BTF binary metadata. * * The returned file descriptor can be passed to other **bpf**\ () * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to * associate the BTF with those objects. * * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional * parameters to specify a *btf_log_buf*, *btf_log_size* and * *btf_log_level* which allow the kernel to return freeform log * output regarding the BTF verification process. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_BTF_GET_FD_BY_ID * Description * Open a file descriptor for the BPF Type Format (BTF) * corresponding to *btf_id*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_TASK_FD_QUERY * Description * Obtain information about eBPF programs associated with the * target process identified by *pid* and *fd*. * * If the *pid* and *fd* are associated with a tracepoint, kprobe * or uprobe perf event, then the *prog_id* and *fd_type* will * be populated with the eBPF program id and file descriptor type * of type **bpf_task_fd_type**. If associated with a kprobe or * uprobe, the *probe_offset* and *probe_addr* will also be * populated. Optionally, if *buf* is provided, then up to * *buf_len* bytes of *buf* will be populated with the name of * the tracepoint, kprobe or uprobe. * * The resulting *prog_id* may be introspected in deeper detail * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_LOOKUP_AND_DELETE_ELEM * Description * Look up an element with the given *key* in the map referred to * by the file descriptor *fd*, and if found, delete the element. * * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map * types, the *flags* argument needs to be set to 0, but for other * map types, it may be specified as: * * **BPF_F_LOCK** * Look up and delete the value of a spin-locked map * without returning the lock. This must be specified if * the elements contain a spinlock. * * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types * implement this command as a "pop" operation, deleting the top * element rather than one corresponding to *key*. * The *key* and *key_len* parameters should be zeroed when * issuing this operation for these map types. * * This command is only valid for the following map types: * * **BPF_MAP_TYPE_QUEUE** * * **BPF_MAP_TYPE_STACK** * * **BPF_MAP_TYPE_HASH** * * **BPF_MAP_TYPE_PERCPU_HASH** * * **BPF_MAP_TYPE_LRU_HASH** * * **BPF_MAP_TYPE_LRU_PERCPU_HASH** * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_FREEZE * Description * Freeze the permissions of the specified map. * * Write permissions may be frozen by passing zero *flags*. * Upon success, no future syscall invocations may alter the * map state of *map_fd*. Write operations from eBPF programs * are still possible for a frozen map. * * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_BTF_GET_NEXT_ID * Description * Fetch the next BPF Type Format (BTF) object currently loaded * into the kernel. * * Looks for the BTF object with an id greater than *start_id* * and updates *next_id* on success. If no other BTF objects * remain with ids higher than *start_id*, returns -1 and sets * *errno* to **ENOENT**. * * Return * Returns zero on success. On error, or when no id remains, -1 * is returned and *errno* is set appropriately. * * BPF_MAP_LOOKUP_BATCH * Description * Iterate and fetch multiple elements in a map. * * Two opaque values are used to manage batch operations, * *in_batch* and *out_batch*. Initially, *in_batch* must be set * to NULL to begin the batched operation. After each subsequent * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant * *out_batch* as the *in_batch* for the next operation to * continue iteration from the current point. Both *in_batch* and * *out_batch* must point to memory large enough to hold a key, * except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH, * LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters * must be at least 4 bytes wide regardless of key size. * * The *keys* and *values* are output parameters which must point * to memory large enough to hold *count* items based on the key * and value size of the map *map_fd*. The *keys* buffer must be * of *key_size* * *count*. The *values* buffer must be of * *value_size* * *count*. * * The *elem_flags* argument may be specified as one of the * following: * * **BPF_F_LOCK** * Look up the value of a spin-locked map without * returning the lock. This must be specified if the * elements contain a spinlock. * * On success, *count* elements from the map are copied into the * user buffer, with the keys copied into *keys* and the values * copied into the corresponding indices in *values*. * * If an error is returned and *errno* is not **EFAULT**, *count* * is set to the number of successfully processed elements. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * May set *errno* to **ENOSPC** to indicate that *keys* or * *values* is too small to dump an entire bucket during * iteration of a hash-based map type. * * BPF_MAP_LOOKUP_AND_DELETE_BATCH * Description * Iterate and delete all elements in a map. * * This operation has the same behavior as * **BPF_MAP_LOOKUP_BATCH** with two exceptions: * * * Every element that is successfully returned is also deleted * from the map. This is at least *count* elements. Note that * *count* is both an input and an output parameter. * * Upon returning with *errno* set to **EFAULT**, up to * *count* elements may be deleted without returning the keys * and values of the deleted elements. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_UPDATE_BATCH * Description * Update multiple elements in a map by *key*. * * The *keys* and *values* are input parameters which must point * to memory large enough to hold *count* items based on the key * and value size of the map *map_fd*. The *keys* buffer must be * of *key_size* * *count*. The *values* buffer must be of * *value_size* * *count*. * * Each element specified in *keys* is sequentially updated to the * value in the corresponding index in *values*. The *in_batch* * and *out_batch* parameters are ignored and should be zeroed. * * The *elem_flags* argument should be specified as one of the * following: * * **BPF_ANY** * Create new elements or update a existing elements. * **BPF_NOEXIST** * Create new elements only if they do not exist. * **BPF_EXIST** * Update existing elements. * **BPF_F_LOCK** * Update spin_lock-ed map elements. This must be * specified if the map value contains a spinlock. * * On success, *count* elements from the map are updated. * * If an error is returned and *errno* is not **EFAULT**, *count* * is set to the number of successfully processed elements. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or * **E2BIG**. **E2BIG** indicates that the number of elements in * the map reached the *max_entries* limit specified at map * creation time. * * May set *errno* to one of the following error codes under * specific circumstances: * * **EEXIST** * If *flags* specifies **BPF_NOEXIST** and the element * with *key* already exists in the map. * **ENOENT** * If *flags* specifies **BPF_EXIST** and the element with * *key* does not exist in the map. * * BPF_MAP_DELETE_BATCH * Description * Delete multiple elements in a map by *key*. * * The *keys* parameter is an input parameter which must point * to memory large enough to hold *count* items based on the key * size of the map *map_fd*, that is, *key_size* * *count*. * * Each element specified in *keys* is sequentially deleted. The * *in_batch*, *out_batch*, and *values* parameters are ignored * and should be zeroed. * * The *elem_flags* argument may be specified as one of the * following: * * **BPF_F_LOCK** * Look up the value of a spin-locked map without * returning the lock. This must be specified if the * elements contain a spinlock. * * On success, *count* elements from the map are updated. * * If an error is returned and *errno* is not **EFAULT**, *count* * is set to the number of successfully processed elements. If * *errno* is **EFAULT**, up to *count* elements may be been * deleted. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_LINK_CREATE * Description * Attach an eBPF program to a *target_fd* at the specified * *attach_type* hook and return a file descriptor handle for * managing the link. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_LINK_UPDATE * Description * Update the eBPF program in the specified *link_fd* to * *new_prog_fd*. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_LINK_GET_FD_BY_ID * Description * Open a file descriptor for the eBPF Link corresponding to * *link_id*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_LINK_GET_NEXT_ID * Description * Fetch the next eBPF link currently loaded into the kernel. * * Looks for the eBPF link with an id greater than *start_id* * and updates *next_id* on success. If no other eBPF links * remain with ids higher than *start_id*, returns -1 and sets * *errno* to **ENOENT**. * * Return * Returns zero on success. On error, or when no id remains, -1 * is returned and *errno* is set appropriately. * * BPF_ENABLE_STATS * Description * Enable eBPF runtime statistics gathering. * * Runtime statistics gathering for the eBPF runtime is disabled * by default to minimize the corresponding performance overhead. * This command enables statistics globally. * * Multiple programs may independently enable statistics. * After gathering the desired statistics, eBPF runtime statistics * may be disabled again by calling **close**\ (2) for the file * descriptor returned by this function. Statistics will only be * disabled system-wide when all outstanding file descriptors * returned by prior calls for this subcommand are closed. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_ITER_CREATE * Description * Create an iterator on top of the specified *link_fd* (as * previously created using **BPF_LINK_CREATE**) and return a * file descriptor that can be used to trigger the iteration. * * If the resulting file descriptor is pinned to the filesystem * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls * for that path will trigger the iterator to read kernel state * using the eBPF program attached to *link_fd*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_LINK_DETACH * Description * Forcefully detach the specified *link_fd* from its * corresponding attachment point. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_PROG_BIND_MAP * Description * Bind a map to the lifetime of an eBPF program. * * The map identified by *map_fd* is bound to the program * identified by *prog_fd* and only released when *prog_fd* is * released. This may be used in cases where metadata should be * associated with a program which otherwise does not contain any * references to the map (for example, embedded in the eBPF * program instructions). * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_TOKEN_CREATE * Description * Create BPF token with embedded information about what * BPF-related functionality it allows: * - a set of allowed bpf() syscall commands; * - a set of allowed BPF map types to be created with * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed; * - a set of allowed BPF program types and BPF program attach * types to be loaded with BPF_PROG_LOAD command, if * BPF_PROG_LOAD itself is allowed. * * BPF token is created (derived) from an instance of BPF FS, * assuming it has necessary delegation mount options specified. * This BPF token can be passed as an extra parameter to various * bpf() syscall commands to grant BPF subsystem functionality to * unprivileged processes. * * When created, BPF token is "associated" with the owning * user namespace of BPF FS instance (super block) that it was * derived from, and subsequent BPF operations performed with * BPF token would be performing capabilities checks (i.e., * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within * that user namespace. Without BPF token, such capabilities * have to be granted in init user namespace, making bpf() * syscall incompatible with user namespace, for the most part. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * NOTES * eBPF objects (maps and programs) can be shared between processes. * * * After **fork**\ (2), the child inherits file descriptors * referring to the same eBPF objects. * * File descriptors referring to eBPF objects can be transferred over * **unix**\ (7) domain sockets. * * File descriptors referring to eBPF objects can be duplicated in the * usual way, using **dup**\ (2) and similar calls. * * File descriptors referring to eBPF objects can be pinned to the * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2). * * An eBPF object is deallocated only after all file descriptors referring * to the object have been closed and no references remain pinned to the * filesystem or attached (for example, bound to a program or device). */ enum bpf_cmd { BPF_MAP_CREATE, BPF_MAP_LOOKUP_ELEM, BPF_MAP_UPDATE_ELEM, BPF_MAP_DELETE_ELEM, BPF_MAP_GET_NEXT_KEY, BPF_PROG_LOAD, BPF_OBJ_PIN, BPF_OBJ_GET, BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, BPF_PROG_RUN = BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, BPF_MAP_GET_FD_BY_ID, BPF_OBJ_GET_INFO_BY_FD, BPF_PROG_QUERY, BPF_RAW_TRACEPOINT_OPEN, BPF_BTF_LOAD, BPF_BTF_GET_FD_BY_ID, BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE, BPF_BTF_GET_NEXT_ID, BPF_MAP_LOOKUP_BATCH, BPF_MAP_LOOKUP_AND_DELETE_BATCH, BPF_MAP_UPDATE_BATCH, BPF_MAP_DELETE_BATCH, BPF_LINK_CREATE, BPF_LINK_UPDATE, BPF_LINK_GET_FD_BY_ID, BPF_LINK_GET_NEXT_ID, BPF_ENABLE_STATS, BPF_ITER_CREATE, BPF_LINK_DETACH, BPF_PROG_BIND_MAP, BPF_TOKEN_CREATE, __MAX_BPF_CMD, }; enum bpf_map_type { BPF_MAP_TYPE_UNSPEC, BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_ARRAY, BPF_MAP_TYPE_STACK_TRACE, BPF_MAP_TYPE_CGROUP_ARRAY, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_DEVMAP, BPF_MAP_TYPE_SOCKMAP, BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, /* BPF_MAP_TYPE_CGROUP_STORAGE is available to bpf programs attaching * to a cgroup. The newer BPF_MAP_TYPE_CGRP_STORAGE is available to * both cgroup-attached and other progs and supports all functionality * provided by BPF_MAP_TYPE_CGROUP_STORAGE. So mark * BPF_MAP_TYPE_CGROUP_STORAGE deprecated. */ BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED, /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE + * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE * deprecated. */ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_SK_STORAGE, BPF_MAP_TYPE_DEVMAP_HASH, BPF_MAP_TYPE_STRUCT_OPS, BPF_MAP_TYPE_RINGBUF, BPF_MAP_TYPE_INODE_STORAGE, BPF_MAP_TYPE_TASK_STORAGE, BPF_MAP_TYPE_BLOOM_FILTER, BPF_MAP_TYPE_USER_RINGBUF, BPF_MAP_TYPE_CGRP_STORAGE, BPF_MAP_TYPE_ARENA, __MAX_BPF_MAP_TYPE }; /* Note that tracing related programs such as * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} * are not subject to a stable API since kernel internal data * structures can change from release to release and may * therefore break existing tracing BPF programs. Tracing BPF * programs correspond to /a/ specific kernel which is to be * analyzed, and not /a/ specific kernel /and/ all future ones. */ enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, BPF_PROG_TYPE_SOCKET_FILTER, BPF_PROG_TYPE_KPROBE, BPF_PROG_TYPE_SCHED_CLS, BPF_PROG_TYPE_SCHED_ACT, BPF_PROG_TYPE_TRACEPOINT, BPF_PROG_TYPE_XDP, BPF_PROG_TYPE_PERF_EVENT, BPF_PROG_TYPE_CGROUP_SKB, BPF_PROG_TYPE_CGROUP_SOCK, BPF_PROG_TYPE_LWT_IN, BPF_PROG_TYPE_LWT_OUT, BPF_PROG_TYPE_LWT_XMIT, BPF_PROG_TYPE_SOCK_OPS, BPF_PROG_TYPE_SK_SKB, BPF_PROG_TYPE_CGROUP_DEVICE, BPF_PROG_TYPE_SK_MSG, BPF_PROG_TYPE_RAW_TRACEPOINT, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, BPF_PROG_TYPE_SK_REUSEPORT, BPF_PROG_TYPE_FLOW_DISSECTOR, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_PROG_TYPE_TRACING, BPF_PROG_TYPE_STRUCT_OPS, BPF_PROG_TYPE_EXT, BPF_PROG_TYPE_LSM, BPF_PROG_TYPE_SK_LOOKUP, BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ BPF_PROG_TYPE_NETFILTER, __MAX_BPF_PROG_TYPE }; enum bpf_attach_type { BPF_CGROUP_INET_INGRESS, BPF_CGROUP_INET_EGRESS, BPF_CGROUP_INET_SOCK_CREATE, BPF_CGROUP_SOCK_OPS, BPF_SK_SKB_STREAM_PARSER, BPF_SK_SKB_STREAM_VERDICT, BPF_CGROUP_DEVICE, BPF_SK_MSG_VERDICT, BPF_CGROUP_INET4_BIND, BPF_CGROUP_INET6_BIND, BPF_CGROUP_INET4_CONNECT, BPF_CGROUP_INET6_CONNECT, BPF_CGROUP_INET4_POST_BIND, BPF_CGROUP_INET6_POST_BIND, BPF_CGROUP_UDP4_SENDMSG, BPF_CGROUP_UDP6_SENDMSG, BPF_LIRC_MODE2, BPF_FLOW_DISSECTOR, BPF_CGROUP_SYSCTL, BPF_CGROUP_UDP4_RECVMSG, BPF_CGROUP_UDP6_RECVMSG, BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, BPF_TRACE_RAW_TP, BPF_TRACE_FENTRY, BPF_TRACE_FEXIT, BPF_MODIFY_RETURN, BPF_LSM_MAC, BPF_TRACE_ITER, BPF_CGROUP_INET4_GETPEERNAME, BPF_CGROUP_INET6_GETPEERNAME, BPF_CGROUP_INET4_GETSOCKNAME, BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, BPF_CGROUP_INET_SOCK_RELEASE, BPF_XDP_CPUMAP, BPF_SK_LOOKUP, BPF_XDP, BPF_SK_SKB_VERDICT, BPF_SK_REUSEPORT_SELECT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, BPF_TRACE_KPROBE_MULTI, BPF_LSM_CGROUP, BPF_STRUCT_OPS, BPF_NETFILTER, BPF_TCX_INGRESS, BPF_TCX_EGRESS, BPF_TRACE_UPROBE_MULTI, BPF_CGROUP_UNIX_CONNECT, BPF_CGROUP_UNIX_SENDMSG, BPF_CGROUP_UNIX_RECVMSG, BPF_CGROUP_UNIX_GETPEERNAME, BPF_CGROUP_UNIX_GETSOCKNAME, BPF_NETKIT_PRIMARY, BPF_NETKIT_PEER, BPF_TRACE_KPROBE_SESSION, __MAX_BPF_ATTACH_TYPE }; #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE /* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[] * in sync with the definitions below. */ enum bpf_link_type { BPF_LINK_TYPE_UNSPEC = 0, BPF_LINK_TYPE_RAW_TRACEPOINT = 1, BPF_LINK_TYPE_TRACING = 2, BPF_LINK_TYPE_CGROUP = 3, BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, BPF_LINK_TYPE_NETKIT = 13, BPF_LINK_TYPE_SOCKMAP = 14, __MAX_BPF_LINK_TYPE, }; #define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE enum bpf_perf_event_type { BPF_PERF_EVENT_UNSPEC = 0, BPF_PERF_EVENT_UPROBE = 1, BPF_PERF_EVENT_URETPROBE = 2, BPF_PERF_EVENT_KPROBE = 3, BPF_PERF_EVENT_KRETPROBE = 4, BPF_PERF_EVENT_TRACEPOINT = 5, BPF_PERF_EVENT_EVENT = 6, }; /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. * * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, * the program in this cgroup yields to sub-cgroup program. * * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, * that cgroup program gets run in addition to the program in this cgroup. * * Only one program is allowed to be attached to a cgroup with * NONE or BPF_F_ALLOW_OVERRIDE flag. * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will * release old program and attach the new one. Attach flags has to match. * * Multiple programs are allowed to be attached to a cgroup with * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order * (those that were attached first, run first) * The programs of sub-cgroup are executed first, then programs of * this cgroup and then programs of parent cgroup. * When children program makes decision (like picking TCP CA or sock bind) * parent program has a chance to override it. * * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of * programs for a cgroup. Though it's possible to replace an old program at * any position by also specifying BPF_F_REPLACE flag and position itself in * replace_bpf_fd attribute. Old program at this position will be released. * * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. * A cgroup with NONE doesn't allow any programs in sub-cgroups. * Ex1: * cgrp1 (MULTI progs A, B) -> * cgrp2 (OVERRIDE prog C) -> * cgrp3 (MULTI prog D) -> * cgrp4 (OVERRIDE prog E) -> * cgrp5 (NONE prog F) * the event in cgrp5 triggers execution of F,D,A,B in that order. * if prog F is detached, the execution is E,D,A,B * if prog F and D are detached, the execution is E,A,B * if prog F, E and D are detached, the execution is C,A,B * * All eligible programs are executed regardless of return code from * earlier programs. */ #define BPF_F_ALLOW_OVERRIDE (1U << 0) #define BPF_F_ALLOW_MULTI (1U << 1) /* Generic attachment flags. */ #define BPF_F_REPLACE (1U << 2) #define BPF_F_BEFORE (1U << 3) #define BPF_F_AFTER (1U << 4) #define BPF_F_ID (1U << 5) #define BPF_F_LINK BPF_F_LINK /* 1 << 13 */ /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will perform strict alignment checking as if the kernel * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, * and NET_IP_ALIGN defined to 2. */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will allow any alignment whatsoever. On platforms * with strict alignment requirements for loads ands stores (such * as sparc and mips) the verifier validates that all loads and * stores provably follow this requirement. This flag turns that * checking and enforcement off. * * It is mostly used for testing when we want to validate the * context and memory access aspects of the verifier, but because * of an unaligned access the alignment check would trigger before * the one we are interested in. */ #define BPF_F_ANY_ALIGNMENT (1U << 1) /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. * Verifier does sub-register def/use analysis and identifies instructions whose * def only matters for low 32-bit, high 32-bit is never referenced later * through implicit zero extension. Therefore verifier notifies JIT back-ends * that it is safe to ignore clearing high 32-bit for these instructions. This * saves some back-ends a lot of code-gen. However such optimization is not * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends * hence hasn't used verifier's analysis result. But, we really want to have a * way to be able to verify the correctness of the described optimization on * x86_64 on which testsuites are frequently exercised. * * So, this flag is introduced. Once it is set, verifier will randomize high * 32-bit for those instructions who has been identified as safe to ignore them. * Then, if verifier is not doing correct analysis, such randomization will * regress tests to expose bugs. */ #define BPF_F_TEST_RND_HI32 (1U << 2) /* The verifier internal test flag. Behavior is undefined */ #define BPF_F_TEST_STATE_FREQ (1U << 3) /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will * restrict map and helper usage for such programs. Sleepable BPF programs can * only be attached to hooks where kernel execution context allows sleeping. * Such programs are allowed to use helpers that may sleep like * bpf_copy_from_user(). */ #define BPF_F_SLEEPABLE (1U << 4) /* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program * fully support xdp frags. */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) /* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded * program becomes device-bound but can access XDP metadata. */ #define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6) /* The verifier internal test flag. Behavior is undefined */ #define BPF_F_TEST_REG_INVARIANTS (1U << 7) /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ enum { BPF_F_KPROBE_MULTI_RETURN = (1U << 0) }; /* link_create.uprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_UPROBE_MULTI attach type to create return probe. */ enum { BPF_F_UPROBE_MULTI_RETURN = (1U << 0) }; /* link_create.netfilter.flags used in LINK_CREATE command for * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation. */ #define BPF_F_NETFILTER_IP_DEFRAG (1U << 0) /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX] * insn[0].imm: map fd or fd_idx * insn[1].imm: 0 * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of map * verifier type: CONST_PTR_TO_MAP */ #define BPF_PSEUDO_MAP_FD 1 #define BPF_PSEUDO_MAP_IDX 5 /* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE * insn[0].imm: map fd or fd_idx * insn[1].imm: offset into value * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of map[0]+offset * verifier type: PTR_TO_MAP_VALUE */ #define BPF_PSEUDO_MAP_VALUE 2 #define BPF_PSEUDO_MAP_IDX_VALUE 6 /* insn[0].src_reg: BPF_PSEUDO_BTF_ID * insn[0].imm: kernel btd id of VAR * insn[1].imm: 0 * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of the kernel variable * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var * is struct/union. */ #define BPF_PSEUDO_BTF_ID 3 /* insn[0].src_reg: BPF_PSEUDO_FUNC * insn[0].imm: insn offset to the func * insn[1].imm: 0 * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of the function * verifier type: PTR_TO_FUNC. */ #define BPF_PSEUDO_FUNC 4 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative * offset to another bpf function */ #define BPF_PSEUDO_CALL 1 /* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL, * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel */ #define BPF_PSEUDO_KFUNC_CALL 2 enum bpf_addr_space_cast { BPF_ADDR_SPACE_CAST = 1, }; /* flags for BPF_MAP_UPDATE_ELEM command */ enum { BPF_ANY = 0, /* create new element or update existing */ BPF_NOEXIST = 1, /* create new element if it didn't exist */ BPF_EXIST = 2, /* update existing element */ BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ }; /* flags for BPF_MAP_CREATE command */ enum { BPF_F_NO_PREALLOC = (1U << 0), /* Instead of having one common LRU list in the * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * which can scale and perform better. * Note, the LRU nodes (including free nodes) cannot be moved * across different LRU lists. */ BPF_F_NO_COMMON_LRU = (1U << 1), /* Specify numa node during map creation */ BPF_F_NUMA_NODE = (1U << 2), /* Flags for accessing BPF object from syscall side. */ BPF_F_RDONLY = (1U << 3), BPF_F_WRONLY = (1U << 4), /* Flag for stack_map, store build_id+offset instead of pointer */ BPF_F_STACK_BUILD_ID = (1U << 5), /* Zero-initialize hash function seed. This should only be used for testing. */ BPF_F_ZERO_SEED = (1U << 6), /* Flags for accessing BPF object from program side. */ BPF_F_RDONLY_PROG = (1U << 7), BPF_F_WRONLY_PROG = (1U << 8), /* Clone map from listener for newly accepted socket */ BPF_F_CLONE = (1U << 9), /* Enable memory-mapping BPF map */ BPF_F_MMAPABLE = (1U << 10), /* Share perf_event among processes */ BPF_F_PRESERVE_ELEMS = (1U << 11), /* Create a map that is suitable to be an inner map with dynamic max entries */ BPF_F_INNER_MAP = (1U << 12), /* Create a map that will be registered/unregesitered by the backed bpf_link */ BPF_F_LINK = (1U << 13), /* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */ BPF_F_PATH_FD = (1U << 14), /* Flag for value_type_btf_obj_fd, the fd is available */ BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15), /* BPF token FD is passed in a corresponding command's token_fd field */ BPF_F_TOKEN_FD = (1U << 16), /* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */ BPF_F_SEGV_ON_FAULT = (1U << 17), /* Do not translate kernel bpf_arena pointers to user pointers */ BPF_F_NO_USER_CONV = (1U << 18), }; /* Flags for BPF_PROG_QUERY. */ /* Query effective (directly attached + inherited from ancestor cgroups) * programs that will be executed for events within a cgroup. * attach_flags with this flag are always returned 0. */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) /* Flags for BPF_PROG_TEST_RUN */ /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ #define BPF_F_TEST_RUN_ON_CPU (1U << 0) /* If set, XDP frames will be transmitted after processing */ #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) /* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */ #define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { /* enabled run_time_ns and run_cnt */ BPF_STATS_RUN_TIME = 0, }; enum bpf_stack_build_id_status { /* user space need an empty entry to identify end of a trace */ BPF_STACK_BUILD_ID_EMPTY = 0, /* with valid build_id and offset */ BPF_STACK_BUILD_ID_VALID = 1, /* couldn't get build_id, fallback to ip */ BPF_STACK_BUILD_ID_IP = 2, }; #define BPF_BUILD_ID_SIZE 20 struct bpf_stack_build_id { __s32 status; unsigned char build_id[BPF_BUILD_ID_SIZE]; union { __u64 offset; __u64 ip; }; }; #define BPF_OBJ_NAME_LEN 16U union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ __u32 key_size; /* size of key in bytes */ __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ __u32 map_flags; /* BPF_MAP_CREATE related * flags defined above. */ __u32 inner_map_fd; /* fd pointing to the inner map */ __u32 numa_node; /* numa node (effective only if * BPF_F_NUMA_NODE is set). */ char map_name[BPF_OBJ_NAME_LEN]; __u32 map_ifindex; /* ifindex of netdev to create on */ __u32 btf_fd; /* fd pointing to a BTF type data */ __u32 btf_key_type_id; /* BTF type_id of the key */ __u32 btf_value_type_id; /* BTF type_id of the value */ __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- * struct stored as the * map value */ /* Any per-map-type extra fields * * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the * number of hash functions (if 0, the bloom filter will default * to using 5 hash functions). * * BPF_MAP_TYPE_ARENA - contains the address where user space * is going to mmap() the arena. It has to be page aligned. */ __u64 map_extra; __s32 value_type_btf_obj_fd; /* fd pointing to a BTF * type data for * btf_vmlinux_value_type_id. */ /* BPF token FD to use with BPF_MAP_CREATE operation. * If provided, map_flags should have BPF_F_TOKEN_FD flag set. */ __s32 map_token_fd; }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ __u32 map_fd; __aligned_u64 key; union { __aligned_u64 value; __aligned_u64 next_key; }; __u64 flags; }; struct { /* struct used by BPF_MAP_*_BATCH commands */ __aligned_u64 in_batch; /* start batch, * NULL to start from beginning */ __aligned_u64 out_batch; /* output: next start batch */ __aligned_u64 keys; __aligned_u64 values; __u32 count; /* input/output: * input: # of key/value * elements * output: # of filled elements */ __u32 map_fd; __u64 elem_flags; __u64 flags; } batch; struct { /* anonymous struct used by BPF_PROG_LOAD command */ __u32 prog_type; /* one of enum bpf_prog_type */ __u32 insn_cnt; __aligned_u64 insns; __aligned_u64 license; __u32 log_level; /* verbosity level of verifier */ __u32 log_size; /* size of user buffer */ __aligned_u64 log_buf; /* user supplied buffer */ __u32 kern_version; /* not used */ __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; __u32 prog_ifindex; /* ifindex of netdev to prep for */ /* For some prog types expected attach type must be known at * load time to verify attach type specific parts of prog * (context accesses, allowed helpers, etc). */ __u32 expected_attach_type; __u32 prog_btf_fd; /* fd pointing to BTF type data */ __u32 func_info_rec_size; /* userspace bpf_func_info size */ __aligned_u64 func_info; /* func info */ __u32 func_info_cnt; /* number of bpf_func_info records */ __u32 line_info_rec_size; /* userspace bpf_line_info size */ __aligned_u64 line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ union { /* valid prog_fd to attach to bpf prog */ __u32 attach_prog_fd; /* or valid module BTF object fd or 0 to attach to vmlinux */ __u32 attach_btf_obj_fd; }; __u32 core_relo_cnt; /* number of bpf_core_relo */ __aligned_u64 fd_array; /* array of FDs */ __aligned_u64 core_relos; __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ /* output: actual total log contents size (including termintaing zero). * It could be both larger than original log_size (if log was * truncated), or smaller (if log buffer wasn't filled completely). */ __u32 log_true_size; /* BPF token FD to use with BPF_PROG_LOAD operation. * If provided, prog_flags should have BPF_F_TOKEN_FD flag set. */ __s32 prog_token_fd; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ __aligned_u64 pathname; __u32 bpf_fd; __u32 file_flags; /* Same as dirfd in openat() syscall; see openat(2) * manpage for details of path FD and pathname semantics; * path_fd should accompanied by BPF_F_PATH_FD flag set in * file_flags field, otherwise it should be set to zero; * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed. */ __s32 path_fd; }; struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ union { __u32 target_fd; /* target object to attach to or ... */ __u32 target_ifindex; /* target ifindex */ }; __u32 attach_bpf_fd; __u32 attach_type; __u32 attach_flags; __u32 replace_bpf_fd; union { __u32 relative_fd; __u32 relative_id; }; __u64 expected_revision; }; struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ __u32 prog_fd; __u32 retval; __u32 data_size_in; /* input: len of data_in */ __u32 data_size_out; /* input/output: len of data_out * returns ENOSPC if data_out * is too small. */ __aligned_u64 data_in; __aligned_u64 data_out; __u32 repeat; __u32 duration; __u32 ctx_size_in; /* input: len of ctx_in */ __u32 ctx_size_out; /* input/output: len of ctx_out * returns ENOSPC if ctx_out * is too small. */ __aligned_u64 ctx_in; __aligned_u64 ctx_out; __u32 flags; __u32 cpu; __u32 batch_size; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ union { __u32 start_id; __u32 prog_id; __u32 map_id; __u32 btf_id; __u32 link_id; }; __u32 next_id; __u32 open_flags; }; struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ __u32 bpf_fd; __u32 info_len; __aligned_u64 info; } info; struct { /* anonymous struct used by BPF_PROG_QUERY command */ union { __u32 target_fd; /* target object to query or ... */ __u32 target_ifindex; /* target ifindex */ }; __u32 attach_type; __u32 query_flags; __u32 attach_flags; __aligned_u64 prog_ids; union { __u32 prog_cnt; __u32 count; }; __u32 :32; /* output: per-program attach_flags. * not allowed to be set during effective query. */ __aligned_u64 prog_attach_flags; __aligned_u64 link_ids; __aligned_u64 link_attach_flags; __u64 revision; } query; struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ __u64 name; __u32 prog_fd; __u32 :32; __aligned_u64 cookie; } raw_tracepoint; struct { /* anonymous struct for BPF_BTF_LOAD */ __aligned_u64 btf; __aligned_u64 btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; /* output: actual total log contents size (including termintaing zero). * It could be both larger than original log_size (if log was * truncated), or smaller (if log buffer wasn't filled completely). */ __u32 btf_log_true_size; __u32 btf_flags; /* BPF token FD to use with BPF_BTF_LOAD operation. * If provided, btf_flags should have BPF_F_TOKEN_FD flag set. */ __s32 btf_token_fd; }; struct { __u32 pid; /* input: pid */ __u32 fd; /* input: fd */ __u32 flags; /* input: flags */ __u32 buf_len; /* input/output: buf len */ __aligned_u64 buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe */ __u32 prog_id; /* output: prod_id */ __u32 fd_type; /* output: BPF_FD_TYPE_* */ __u64 probe_offset; /* output: probe_offset */ __u64 probe_addr; /* output: probe_addr */ } task_fd_query; struct { /* struct used by BPF_LINK_CREATE command */ union { __u32 prog_fd; /* eBPF program to attach */ __u32 map_fd; /* struct_ops to attach */ }; union { __u32 target_fd; /* target object to attach to or ... */ __u32 target_ifindex; /* target ifindex */ }; __u32 attach_type; /* attach type */ __u32 flags; /* extra flags */ union { __u32 target_btf_id; /* btf_id of target to attach to */ struct { __aligned_u64 iter_info; /* extra bpf_iter_link_info */ __u32 iter_info_len; /* iter_info length */ }; struct { /* black box user-provided value passed through * to BPF program at the execution time and * accessible through bpf_get_attach_cookie() BPF helper */ __u64 bpf_cookie; } perf_event; struct { __u32 flags; __u32 cnt; __aligned_u64 syms; __aligned_u64 addrs; __aligned_u64 cookies; } kprobe_multi; struct { /* this is overlaid with the target_btf_id above. */ __u32 target_btf_id; /* black box user-provided value passed through * to BPF program at the execution time and * accessible through bpf_get_attach_cookie() BPF helper */ __u64 cookie; } tracing; struct { __u32 pf; __u32 hooknum; __s32 priority; __u32 flags; } netfilter; struct { union { __u32 relative_fd; __u32 relative_id; }; __u64 expected_revision; } tcx; struct { __aligned_u64 path; __aligned_u64 offsets; __aligned_u64 ref_ctr_offsets; __aligned_u64 cookies; __u32 cnt; __u32 flags; __u32 pid; } uprobe_multi; struct { union { __u32 relative_fd; __u32 relative_id; }; __u64 expected_revision; } netkit; }; } link_create; struct { /* struct used by BPF_LINK_UPDATE command */ __u32 link_fd; /* link fd */ union { /* new program fd to update link with */ __u32 new_prog_fd; /* new struct_ops map fd to update link with */ __u32 new_map_fd; }; __u32 flags; /* extra flags */ union { /* expected link's program fd; is specified only if * BPF_F_REPLACE flag is set in flags. */ __u32 old_prog_fd; /* expected link's map fd; is specified only * if BPF_F_REPLACE flag is set. */ __u32 old_map_fd; }; } link_update; struct { __u32 link_fd; } link_detach; struct { /* struct used by BPF_ENABLE_STATS command */ __u32 type; } enable_stats; struct { /* struct used by BPF_ITER_CREATE command */ __u32 link_fd; __u32 flags; } iter_create; struct { /* struct used by BPF_PROG_BIND_MAP command */ __u32 prog_fd; __u32 map_fd; __u32 flags; /* extra flags */ } prog_bind_map; struct { /* struct used by BPF_TOKEN_CREATE command */ __u32 flags; __u32 bpffs_fd; } token_create; } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF * developers about the multiple available eBPF helper functions. It can be * parsed and used to produce a manual page. The workflow is the following, * and requires the rst2man utility: * * $ ./scripts/bpf_doc.py \ * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 * $ man /tmp/bpf-helpers.7 * * Note that in order to produce this external documentation, some RST * formatting is used in the descriptions to get "bold" and "italics" in * manual pages. Also note that the few trailing white spaces are * intentional, removing them would break paragraphs for rst2man. * * Start of BPF helper function descriptions: * * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) * Description * Perform a lookup in *map* for an entry associated to *key*. * Return * Map value associated to *key*, or **NULL** if no entry was * found. * * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * Flag value **BPF_NOEXIST** cannot be used for maps of types * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all * elements always exist), the helper would return an error. * Return * 0 on success, or a negative error in case of failure. * * long bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. * * Generally, use **bpf_probe_read_user**\ () or * **bpf_probe_read_kernel**\ () instead. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_ktime_get_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. * Does not include time the system was suspended. * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) * Return * Current *ktime*. * * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) * to file *\/sys/kernel/tracing/trace* from TraceFS, if * available. It can take up to three additional **u64** * arguments (as an eBPF helpers, the total number of arguments is * limited to five). * * Each time the helper is called, it appends a line to the trace. * Lines are discarded while *\/sys/kernel/tracing/trace* is * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this. * The format of the trace is customizable, and the exact output * one will get depends on the options set in * *\/sys/kernel/tracing/trace_options* (see also the * *README* file under the same directory). However, it usually * defaults to something like: * * :: * * telnet-470 [001] .N.. 419421.045894: 0x00000001: * * In the above: * * * ``telnet`` is the name of the current task. * * ``470`` is the PID of the current task. * * ``001`` is the CPU number on which the task is * running. * * In ``.N..``, each character refers to a set of * options (whether irqs are enabled, scheduling * options, whether hard/softirqs are running, level of * preempt_disabled respectively). **N** means that * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** * are set. * * ``419421.045894`` is a timestamp. * * ``0x00000001`` is a fake value used by BPF for the * instruction pointer register. * * ```` is the message formatted with * *fmt*. * * The conversion specifiers supported by *fmt* are similar, but * more limited than for printk(). They are **%d**, **%i**, * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size * of field, padding with zeroes, etc.) is available, and the * helper will return **-EINVAL** (but print nothing) if it * encounters an unknown specifier. * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice * block (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values * to user space, perf events should be preferred. * Return * The number of bytes written to the buffer, or a negative error * in case of failure. * * u32 bpf_get_prandom_u32(void) * Description * Get a pseudo-random number. * * From a security point of view, this helper uses its own * pseudo-random internal state, and cannot be used to infer the * seed of other random functions in the kernel. However, it is * essential to note that the generator used by the helper is not * cryptographically secure. * Return * A random 32-bit unsigned value. * * u32 bpf_get_smp_processor_id(void) * Description * Get the SMP (symmetric multiprocessing) processor id. Note that * all programs run with migration disabled, which means that the * SMP processor id is stable during all the execution of the * program. * Return * The SMP id of the processor running the program. * Attributes * __bpf_fastcall * * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the * checksum for the packet after storing the bytes) and * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **->swhash** and *skb*\ **->l4hash** to 0). * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper * must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored in *size*. * Alternatively, it is possible to store the difference between * the previous and the new values of the header field in *to*, by * setting *from* and *size* to 0. For both methods, *offset* * indicates the location of the IP checksum within the packet. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the * helper must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored on the lowest * four bits of *flags*. Alternatively, it is possible to store * the difference between the previous and the new values of the * header field in *to*, by setting *from* and the four lowest * bits of *flags* to 0. For both methods, *offset* indicates the * location of the IP checksum within the packet. In addition to * the size of the field, *flags* can be added (bitwise OR) actual * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and * for updates resulting in a null checksum the value is set to * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates * the checksum is to be computed against a pseudo-header. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack * frame is used (but values on stack and in registers for the * caller are not accessible to the callee). This mechanism allows * for program chaining, either for raising the maximum number of * available eBPF instructions, or to execute given programs in * conditional blocks. For security reasons, there is an upper * limit to the number of successive tail calls that can be * performed. * * Upon call of this helper, the program attempts to jump into a * program referenced at index *index* in *prog_array_map*, a * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes * *ctx*, a pointer to the context. * * If the call succeeds, the kernel immediately runs the first * instruction of the new program. This is not a function call, * and it never returns to the previous program. If the call * fails, then the helper has no effect, and the caller continues * to run its subsequent instructions. A call can fail if the * destination program for the jump does not exist (i.e. *index* * is superior to the number of entries in *prog_array_map*), or * if the maximum number of tail calls has been reached for this * chain of programs. This limit is defined in the kernel by the * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), * which is currently set to 33. * Return * 0 on success, or a negative error in case of failure. * * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress * interfaces can be used for redirection. The **BPF_F_INGRESS** * value in *flags* is used to make the distinction (ingress path * is selected if the flag is present, egress path otherwise). * This is the only flag supported for now. * * In comparison with **bpf_redirect**\ () helper, * **bpf_clone_redirect**\ () has the associated cost of * duplicating the packet buffer, but this can be executed out of * the eBPF program. Conversely, **bpf_redirect**\ () is more * efficient, but it is handled through an action code where the * redirection happens only after the eBPF program has returned. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. Positive * error indicates a potential drop or congestion in the target * device. The particular positive error codes are not defined. * * u64 bpf_get_current_pid_tgid(void) * Description * Get the current pid and tgid. * Return * A 64-bit integer containing the current tgid and pid, and * created as such: * *current_task*\ **->tgid << 32 \|** * *current_task*\ **->pid**. * * u64 bpf_get_current_uid_gid(void) * Description * Get the current uid and gid. * Return * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * * long bpf_get_current_comm(void *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of * the executable (excluding the path) for the current task. The * *size_of_buf* must be strictly positive. On success, the * helper makes sure that the *buf* is NUL-terminated. On failure, * it is filled with zeroes. * Return * 0 on success, or a negative error in case of failure. * * u32 bpf_get_cgroup_classid(struct sk_buff *skb) * Description * Retrieve the classid for the current task, i.e. for the net_cls * cgroup to which *skb* belongs. * * This helper can be used on TC egress path, but not on ingress. * * The net_cls cgroup provides an interface to tag network packets * based on a user-provided identifier for all traffic coming from * the tasks belonging to the related cgroup. See also the related * kernel documentation, available from the Linux sources in file * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. * * The Linux kernel has two versions for cgroups: there are * cgroups v1 and cgroups v2. Both are available to users, who can * use a mixture of them, but note that the net_cls cgroup is for * cgroup v1 only. This makes it incompatible with BPF programs * run on cgroups, which is a cgroup-v2-only feature (a socket can * only hold data for one version of cgroups at a time). * * This helper is only available is the kernel was compiled with * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to * "**y**" or to "**m**". * Return * The classid, or 0 for the default unconfigured classid. * * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update * the checksum. Note that if *vlan_proto* is different from * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * be **ETH_P_8021Q**. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be * filled with tunnel metadata for the packet associated to *skb*. * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which * indicates that the tunnel is based on IPv6 protocol instead of * IPv4. * * The **struct bpf_tunnel_key** is an object that generalizes the * principal parameters used by various tunneling protocols into a * single struct. This way, it can be used to easily make a * decision based on the contents of the encapsulation header, * "summarized" in this struct. In particular, it holds the IP * address of the remote end (IPv4 or IPv6, depending on the case) * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, * this struct exposes the *key*\ **->tunnel_id**, which is * generally mapped to a VNI (Virtual Network Identifier), making * it programmable together with the **bpf_skb_set_tunnel_key**\ * () helper. * * Let's imagine that the following code is part of a program * attached to the TC ingress interface, on one end of a GRE * tunnel, and is supposed to filter out all messages coming from * remote ends with IPv4 address other than 10.0.0.1: * * :: * * int ret; * struct bpf_tunnel_key key = {}; * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices * that can operate in "collect metadata" mode: instead of having * one network device per specific configuration, the "collect * metadata" mode only requires a single device where the * configuration can be extracted from this helper. * * This can be used together with various tunnels such as VXLan, * Geneve, GRE or IP in IP (IPIP). * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The * *flags* can be set to a combination of the following values: * * **BPF_F_TUNINFO_IPV6** * Indicate that the tunnel is based on IPv6 protocol * instead of IPv4. * **BPF_F_ZERO_CSUM_TX** * For IPv4 packets, add a flag to tunnel metadata * indicating that checksum computation should be skipped * and checksum set to zeroes. * **BPF_F_DONT_FRAGMENT** * Add a flag to tunnel metadata indicating that the * packet should not be fragmented. * **BPF_F_SEQ_NUMBER** * Add a flag to tunnel metadata indicating that a * sequence number should be added to tunnel header before * sending the packet. This flag was added for GRE * encapsulation, but might be used with other protocols * as well in the future. * **BPF_F_NO_TUNNEL_KEY** * Add a flag to tunnel metadata indicating that no tunnel * key should be set in the resulting tunnel header. * * Here is a typical usage on the transmit path: * * :: * * struct bpf_tunnel_key key; * populate key ... * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); * * See also the description of the **bpf_skb_get_tunnel_key**\ () * helper for additional information. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) * Description * Read the value of a perf event counter. This helper relies on a * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of * the perf event counter is selected when *map* is updated with * perf event file descriptors. The *map* is an array whose size * is the number of available CPUs, and each cell contains a value * relative to one CPU. The value to retrieve is indicated by * *flags*, that contains the index of the CPU to look up, masked * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * Note that before Linux 4.13, only hardware perf event can be * retrieved. * * Also, be aware that the newer helper * **bpf_perf_event_read_value**\ () is recommended over * **bpf_perf_event_read**\ () in general. The latter has some ABI * quirks where error and counter value are used as a return code * (which is wrong to do since ranges may overlap). This issue is * fixed with **bpf_perf_event_read_value**\ (), which at the same * time provides more features over the **bpf_perf_event_read**\ * () interface. Please refer to the description of * **bpf_perf_event_read_value**\ () for details. * Return * The value of the perf event counter read from the map, or a * negative error code in case of failure. * * long bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ * (), except that the packet is not cloned, which provides * increased performance. * * Except for XDP, both ingress and egress interfaces can be used * for redirection. The **BPF_F_INGRESS** value in *flags* is used * to make the distinction (ingress path is selected if the flag * is present, egress path otherwise). Currently, XDP only * supports redirection to the egress interface, and accepts no * flag at all. * * The same effect can also be attained with the more generic * **bpf_redirect_map**\ (), which uses a BPF map to store the * redirect target instead of providing it directly to the helper. * Return * For XDP, the helper returns **XDP_REDIRECT** on success or * **XDP_ABORTED** on error. For other program types, the values * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on * error. * * u32 bpf_get_route_realm(struct sk_buff *skb) * Description * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The * identifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. * * Retrieving this identifier works with the clsact TC egress hook * (see also **tc-bpf(8)**), or alternatively on conventional * classful egress qdiscs, but not on TC ingress path. In case of * clsact TC egress hook, this has the advantage that, internally, * the destination entry has not been dropped yet in the transmit * path. Therefore, the destination entry does not need to be * artificially held via **netif_keep_dst**\ () for a classful * qdisc until the *skb* is freed. * * This helper is available only if the kernel was compiled with * **CONFIG_IP_ROUTE_CLASSID** configuration option. * Return * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * The context of the program *ctx* needs also be passed to the * helper. * * On user space, a program willing to read the values needs to * call **perf_event_open**\ () on the perf event (either for * one or for all CPUs) and to store the file descriptor into the * *map*. This must be done before the eBPF program can send data * into it. An example is available in file * *samples/bpf/trace_output_user.c* in the Linux kernel source * tree (the eBPF program counterpart is in * *samples/bpf/trace_output_kern.c*). * * **bpf_perf_event_output**\ () achieves better performance * than **bpf_trace_printk**\ () for sharing data with user * space, and is much better suitable for streaming data from eBPF * programs. * * Note that this helper is not restricted to tracing use cases * and can be used with programs attached to TC or XDP as well, * where it allows for passing data to user space listeners. Data * can be: * * * Only custom structs, * * Only the packet payload, or * * A combination of both. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from * the packet associated to *skb*, into the buffer pointed by * *to*. * * Since Linux 4.7, usage of this helper has mostly been replaced * by "direct packet access", enabling packet data to be * manipulated with *skb*\ **->data** and *skb*\ **->data_end** * pointing respectively to the first byte of packet data and to * the byte after the last byte of packet data. However, it * remains useful if one wishes to read large quantities of data * at once from a packet into the eBPF stack. * Return * 0 on success, or a negative error in case of failure. * * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context * on which the tracing program is executed, and a pointer to a * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * a combination of the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_FAST_STACK_CMP** * Compare stacks by hash only. * **BPF_F_REUSE_STACKID** * If two different stacks hash into the same *stackid*, * discard the old one. * * The stack id retrieved is a 32 bit long integer handle which * can be further combined with other data (including other stack * ids) and used as a key into maps. This can be useful for * generating a variety of graphs (such as flame graphs or off-cpu * graphs). * * For walking a stack, this helper is an improvement over * **bpf_probe_read**\ (), which can be used with unrolled loops * but is not efficient and consumes a lot of eBPF instructions. * Instead, **bpf_get_stackid**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * The positive or null stack id on success, or a negative error * in case of failure. * * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) * Description * Compute a checksum difference, from the raw buffer pointed by * *from*, of length *from_size* (that must be a multiple of 4), * towards the raw buffer pointed by *to*, of size *to_size* * (same remark). An optional *seed* can be added to the value * (this can be cascaded, the seed may come from a previous call * to the helper). * * This is flexible enough to be used in several ways: * * * With *from_size* == 0, *to_size* > 0 and *seed* set to * checksum, it can be used when pushing new data. * * With *from_size* > 0, *to_size* == 0 and *seed* set to * checksum, it can be used when removing data from a packet. * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it * can be used to compute a diff. Note that *from_size* and * *to_size* do not need to be equal. * * This helper can be used in combination with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to * which one can feed in the difference computed with * **bpf_csum_diff**\ (). * Return * The checksum result, or a negative error code in case of * failure. * * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* * of *size*. * * This helper can be used with encapsulation devices that can * operate in "collect metadata" mode (please refer to the related * note in the description of **bpf_skb_get_tunnel_key**\ () for * more details). A particular example where this can be used is * in combination with the Geneve encapsulation protocol, where it * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) * and retrieving arbitrary TLVs (Type-Length-Value headers) from * the eBPF program. This allows for full customization of these * headers. * Return * The size of the option data retrieved. * * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. * * See also the description of the **bpf_skb_get_tunnel_opt**\ () * helper for additional information. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to * IPv4. The helper takes care of the groundwork for the * transition, including resizing the socket buffer. The eBPF * program is expected to fill the new headers, if any, via * **skb_store_bytes**\ () and to recompute the checksums with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ * (). The main case for this helper is to perform NAT64 * operations out of an eBPF program. * * Internally, the GSO type is marked as dodgy so that headers are * checked and segments are recalculated by the GSO/GRO engine. * The size for GSO target is adapted as well. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except * the eBPF program does not have a write access to *skb*\ * **->pkt_type** beside this helper. Using a helper here allows * for graceful handling of errors. * * The major use case is to change incoming *skb*s to * **PACKET_HOST** in a programmatic way instead of having to * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for * example. * * Note that *type* only allows certain values. At this time, they * are: * * **PACKET_HOST** * Packet is for us. * **PACKET_BROADCAST** * Send packet to all. * **PACKET_MULTICAST** * Send packet to group. * **PACKET_OTHERHOST** * Send packet to someone else. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * Return * The return value depends on the result of the test, and can be: * * * 0, if the *skb* failed the cgroup2 descendant test. * * 1, if the *skb* succeeded the cgroup2 descendant test. * * A negative error code, if an error occurred. * * u32 bpf_get_hash_recalc(struct sk_buff *skb) * Description * Retrieve the hash of the packet, *skb*\ **->hash**. If it is * not set, in particular if the hash was cleared due to mangling, * recompute this hash. Later accesses to the hash can be done * directly with *skb*\ **->hash**. * * Calling **bpf_set_hash_invalid**\ (), changing a packet * prototype with **bpf_skb_change_proto**\ (), or calling * **bpf_skb_store_bytes**\ () with the * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear * the hash and to trigger a new computation for the next call to * **bpf_get_hash_recalc**\ (). * Return * The 32-bit hash. * * u64 bpf_get_current_task(void) * Description * Get the current task. * Return * A pointer to the current task struct. * * long bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in * user context, and *dst* must be a valid user space address. * * This helper should not be used to implement any kind of * security mechanism because of TOC-TOU attacks, but rather to * debug, divert, and manipulate execution of semi-cooperative * processes. * * Keep in mind that this feature is meant for experiments, and it * has a risk of crashing the system and running programs. * Therefore, when an eBPF program using this helper is attached, * a warning including PID and process name is printed to kernel * logs. * Return * 0 on success, or a negative error in case of failure. * * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * Return * The return value depends on the result of the test, and can be: * * * 1, if current task belongs to the cgroup2. * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must * be left at zero. * * The basic idea is that the helper performs the needed work to * change the size of the packet, then the eBPF program rewrites * the rest via helpers like **bpf_skb_store_bytes**\ (), * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () * and others. This helper is a slow path utility intended for * replies with control messages. And because it is targeted for * slow path, the helper itself can afford to be slow: it * implicitly linearizes, unclones and drops offloads from the * *skb*. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes * from *skb* readable and writable. If a zero value is passed for * *len*, then all bytes in the linear part of *skb* will be made * readable and writable. * * This helper is only needed for reading and writing with direct * packet access. * * For direct packet access, testing that offsets to access * are within packet boundaries (test on *skb*\ **->data_end**) is * susceptible to fail if offsets are invalid, or if the requested * data is in non-linear parts of the *skb*. On failure the * program can just bail out, or in the case of a non-linear * buffer, use a helper to make the data available. The * **bpf_skb_load_bytes**\ () helper is a first solution to access * the data. Another one consists in using **bpf_skb_pull_data** * to pull in once the non-linear parts, then retesting and * eventually access the data. * * At the same time, this also makes sure the *skb* is uncloned, * which is a necessary condition for direct write. As this needs * to be an invariant for the write part only, the verifier * detects writes and adds a prologue that is calling * **bpf_skb_pull_data()** to effectively unclone the *skb* from * the very beginning in case it is indeed cloned. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) * Description * Add the checksum *csum* into *skb*\ **->csum** in case the * driver has supplied a checksum for the entire packet into that * field. Return an error otherwise. This helper is intended to be * used in combination with **bpf_csum_diff**\ (), in particular * when the checksum needs to be updated after data has been * written into the packet through direct packet access. * Return * The checksum on success, or a negative error code in case of * failure. * * void bpf_set_hash_invalid(struct sk_buff *skb) * Description * Invalidate the current *skb*\ **->hash**. It can be used after * mangling on headers through direct packet access, in order to * indicate that the hash is outdated and to trigger a * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * Return * void. * * long bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA * node, when the program is attached to sockets using the * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), * but the helper is also available to other eBPF program types, * similarly to **bpf_get_smp_processor_id**\ (). * Return * The id of current NUMA node. * * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of * space. It automatically extends and reallocates memory as * required. * * This helper can be used on a layer 3 *skb* to push a MAC header * for redirection into a layer 2 device. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper * can be used to prepare the packet for pushing or popping * headers. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for * more details. * * Generally, use **bpf_probe_read_user_str**\ () or * **bpf_probe_read_kernel_str**\ () instead. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative * value. * * u64 bpf_get_socket_cookie(struct sk_buff *skb) * Description * If the **struct sk_buff** pointed by *skb* has a known socket, * retrieve the cookie (generated by the kernel) of this socket. * If no cookie has been set yet, generate a new cookie. Once * generated, the socket cookie remains stable for the life of the * socket. This helper can be useful for monitoring per socket * networking traffic statistics as it provides a global socket * identifier that can be assumed unique. * Return * A 8-byte long unique number on success, or 0 if the socket * field is missing inside *skb*. * * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) * Description * Equivalent to bpf_get_socket_cookie() helper that accepts * *skb*, but gets socket from **struct bpf_sock_addr** context. * Return * A 8-byte long unique number. * * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) * Description * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts * *skb*, but gets socket from **struct bpf_sock_ops** context. * Return * A 8-byte long unique number. * * u64 bpf_get_socket_cookie(struct sock *sk) * Description * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts * *sk*, but gets socket from a BTF **struct sock**. This helper * also works for sleepable programs. * Return * A 8-byte long unique number or 0 if *sk* is NULL. * * u32 bpf_get_socket_uid(struct sk_buff *skb) * Description * Get the owner UID of the socked associated to *skb*. * Return * The owner UID of the socket associated to *skb*. If the socket * is **NULL**, or if it is not a full socket (i.e. if it is a * time-wait or a request socket instead), **overflowuid** value * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * * long bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **setsockopt(2)** for more information. * The option value of length *optlen* is pointed by *optval*. * * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, * **SO_BINDTODEVICE**, **SO_KEEPALIVE**, **SO_REUSEADDR**, * **SO_REUSEPORT**, **SO_BINDTOIFINDEX**, **SO_TXREHASH**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**, * **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**, * **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**, * **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports the following *optname*\ s: * **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. * * By default, the helper will reset any offloaded checksum * indicator of the skb to CHECKSUM_NONE. This can be avoided * by the following flag: * * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded * checksum data of the skb to CHECKSUM_NONE. * * There are two supported modes at this time: * * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer * (room space is added or removed between the layer 2 and * layer 3 headers). * * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer * (room space is added or removed between the layer 3 and * layer 4 headers). * * The following flags are supported at this time: * * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. * Adjusting mss in this way is not allowed for datagrams. * * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: * Any new space is reserved to hold a tunnel header. * Configure skb offsets and other fields accordingly. * * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: * Use with ENCAP_L3 flags to further specify the tunnel type. * * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): * Use with ENCAP_L3/L4 flags to further specify the tunnel * type; *len* is the length of the inner MAC header. * * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the * L2 type as Ethernet. * * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**, * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**: * Indicate the new IP header version after decapsulating the outer * IP header. Used when the inner and outer IP versions are different. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_redirect_map(struct bpf_map *map, u64 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain * references to net devices (for forwarding packets through other * ports), or to CPUs (for redirecting XDP frames to another CPU; * but this is only implemented for native XDP (with driver * support) as of this writing). * * The lower two bits of *flags* are used as the return code if * the map lookup fails. This is so that the return value can be * one of the XDP program return codes up to **XDP_TX**, as chosen * by the caller. The higher bits of *flags* can be set to * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. * * With BPF_F_BROADCAST the packet will be broadcasted to all the * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress * interface will be excluded when do broadcasting. * * See also **bpf_redirect**\ (), which only supports redirecting * to an ifindex, but doesn't require a map to do so. * Return * **XDP_REDIRECT** on success, or the value of the two lower bits * of the *flags* argument on error. * * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * Return * 0 on success, or a negative error in case of failure. * * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this * operation modifies the address stored in *xdp_md*\ **->data**, * so the latter must be loaded only after the helper has been * called. * * The use of *xdp_md*\ **->data_meta** is optional and programs * are not required to use it. The rationale is that when the * packet is processed with XDP (e.g. as DoS filter), it is * possible to push further meta data along with it before passing * to the stack, and to give the guarantee that an ingress eBPF * program attached as a TC classifier on the same device can pick * this up for further post-processing. Since TC works with socket * buffers, it remains possible to set from XDP the **mark** or * **priority** pointers, or other pointers for the socket buffer. * Having this scratch space generic and programmable allows for * more flexibility as the user is free to store whatever meta * data they need. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event * counter is selected when *map* is updated with perf event file * descriptors. The *map* is an array whose size is the number of * available CPUs, and each cell contains a value relative to one * CPU. The value to retrieve is indicated by *flags*, that * contains the index of the CPU to look up, masked with * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * This helper behaves in a way close to * **bpf_perf_event_read**\ () helper, save that instead of * just returning the value observed, it fills the *buf* * structure. This allows for additional data to be retrieved: in * particular, the enabled and running times (in *buf*\ * **->enabled** and *buf*\ **->running**, respectively) are * copied. In general, **bpf_perf_event_read_value**\ () is * recommended over **bpf_perf_event_read**\ (), which has some * ABI issues and provides fewer functionalities. * * These values are interesting, because hardware PMU (Performance * Monitoring Unit) counters are limited resources. When there are * more PMU based perf events opened than available counters, * kernel will multiplex these events so each event gets certain * percentage (but not all) of the PMU time. In case that * multiplexing happens, the number of samples or counter value * will not reflect the case compared to when no multiplexing * occurs. This makes comparison between different runs difficult. * Typically, the counter value should be normalized before * comparing to other experiments. The usual normalization is done * as follows. * * :: * * normalized_counter = counter * t_enabled / t_running * * Where t_enabled is the time enabled for event and t_running is * the time running for event since last normalization. The * enabled and running times are accumulated since the perf event * open. To achieve scaling factor between two invocations of an * eBPF program, users can use CPU id as the key (which is * typical for perf array usage model) to remember the previous * value and do the calculation inside the eBPF program. * Return * 0 on success, or a negative error in case of failure. * * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For an eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in * the structure pointed by *buf* and of size *buf_size*. Enabled * and running times are also stored in the structure (see * description of helper **bpf_perf_event_read_value**\ () for * more details). * Return * 0 on success, or a negative error in case of failure. * * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **getsockopt(2)** for more information. * The retrieved value is stored in the structure pointed by * *opval* and of length *optlen*. * * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **getsockopt()**. * It supports the same set of *optname*\ s that is supported by * the **bpf_setsockopt**\ () helper. The exceptions are * **TCP_BPF_*** is **bpf_setsockopt**\ () only and * **TCP_SAVED_SYN** is **bpf_getsockopt**\ () only. * Return * 0 on success, or a negative error in case of failure. * * long bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. * The first argument is the context *regs* on which the kprobe * works. * * This helper works by setting the PC (program counter) * to an override function which is run in place of the original * probed function. This means the probed function is not run at * all. The replacement function just returns with the required * value. * * This helper has security implications, and thus is subject to * restrictions. It is only available if the kernel was compiled * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration * option, and in this case it only works on functions tagged with * **ALLOW_ERROR_INJECTION** in the kernel code. * Return * 0 * * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to * *argval*. * * The primary use of this field is to determine if there should * be calls to eBPF programs of type * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP * code. A program of the same type can change its value, per * connection and as necessary, when the connection is * established. This field is directly accessible for reading, but * this helper must be used for updates in order to return an * error if an eBPF program tries to set a callback that is not * supported in the current kernel. * * *argval* is a flag array which can combine these flags: * * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) * * Therefore, this function can be used to clear a callback flag by * setting the appropriate bit to zero. e.g. to disable the RTO * callback: * * **bpf_sock_ops_cb_flags_set(bpf_sock,** * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** * * Here are some examples of where one could call such eBPF * program: * * * When RTO fires. * * When a packet is retransmitted. * * When the connection terminates. * * When a packet is sent. * * When a packet is received. * Return * Code **-EINVAL** if the socket is not a full TCP socket; * otherwise, a positive number containing the bits that could not * be set is returned (which comes down to 0 if all bits were set * as required). * * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. * * For example, this helper can be used in the following cases: * * * A single **sendmsg**\ () or **sendfile**\ () system call * contains multiple logical messages that the eBPF program is * supposed to read and for which it should apply a verdict. * * An eBPF program only cares to read the first *bytes* of a * *msg*. If the message has a large payload, then setting up * and calling the eBPF program repeatedly for all bytes, even * though the verdict is already known, would create unnecessary * overhead. * * When called from within an eBPF program, the helper sets a * counter internal to the BPF infrastructure, that is used to * apply the last verdict to the next *bytes*. If *bytes* is * smaller than the current data being processed from a * **sendmsg**\ () or **sendfile**\ () system call, the first * *bytes* will be sent and the eBPF program will be re-run with * the pointer for start of data pointing to byte number *bytes* * **+ 1**. If *bytes* is larger than the current data being * processed, then the eBPF verdict will be applied to multiple * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are * consumed. * * Note that if a socket closes with the internal counter holding * a non-zero value, this is not a problem because data is not * being buffered for *bytes* and is sent as it is received. * Return * 0 * * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been * accumulated. * * This can be used when one needs a specific number of bytes * before a verdict can be assigned, even if the data spans * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme * case would be a user calling **sendmsg**\ () repeatedly with * 1-byte long message segments. Obviously, this is bad for * performance, but it is still valid. If the eBPF program needs * *bytes* bytes to validate a header, this helper can be used to * prevent the eBPF program to be called again until *bytes* have * been accumulated. * Return * 0 * * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ * **->data_end** to *start* and *end* bytes offsets into *msg*, * respectively. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it can only parse data that the (**data**, **data_end**) * pointers have already consumed. For **sendmsg**\ () hooks this * is likely the first scatterlist element. But for calls relying * on the **sendpage** handler (e.g. **sendfile**\ ()) this will * be the range (**0**, **0**) because the data is shared with * user space and by default the objective is to avoid allowing * user space to modify data while (or after) eBPF verdict is * being decided. This helper can be used to pull in data and to * set the start and end pointer to given values. Data will be * copied if necessary (i.e. if data was not linear and if start * and end pointers do not point to the same chunk). * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * All values for *flags* are reserved for future usage, and must * be left at zero. * Return * 0 on success, or a negative error in case of failure. * * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing * connection from the desired IP address, which can be useful for * example when all processes inside a cgroup should use one * single IP address on a host that has multiple IP configured. * * This helper works for IPv4 and IPv6, TCP and UDP sockets. The * domain (*addr*\ **->sa_family**) must be **AF_INET** (or * **AF_INET6**). It's advised to pass zero port (**sin_port** * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like * behavior and lets the kernel efficiently pick up an unused * port as long as 4-tuple is unique. Passing non-zero port might * lead to degraded performance. * Return * 0 on success, or a negative error in case of failure. * * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * possible to both shrink and grow the packet tail. * Shrink done via *delta* being a negative integer. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. * * The retrieved value is stored in the **struct bpf_xfrm_state** * pointed by *xfrm_state* and of length *size*. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_XFRM** configuration option. * Return * 0 on success, or a negative error in case of failure. * * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer * to the context on which the tracing program is executed. * To store the stacktrace, the bpf program provides *buf* with * a nonnegative *size*. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_USER_BUILD_ID** * Collect (build_id, file_offset) instead of ips for user * stack, only valid if **BPF_F_USER_STACK** is also * specified. * * *file_offset* is an offset relative to the beginning * of the executable or shared object file backing the vma * which the *ip* falls in. It is *not* an offset relative * to that object's base address. Accordingly, it must be * adjusted by adding (sh_addr - sh_offset), where * sh_{addr,offset} correspond to the executable section * containing *file_offset* in the object, for comparisons * to symbols' st_value to be valid. * * **bpf_get_stack**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * to sufficient large buffer size. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * The non-negative copied *buf* length equal to or less than * *size* on success, or a negative error in case of failure. * * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* * from the packet associated to *skb*, into the buffer pointed * by *to*. The difference to **bpf_skb_load_bytes**\ () is that * a fifth argument *start_header* exists in order to select a * base offset to start from. *start_header* can be one of: * * **BPF_HDR_START_MAC** * Base offset to load data from is *skb*'s mac header. * **BPF_HDR_START_NET** * Base offset to load data from is *skb*'s network header. * * In general, "direct packet access" is the preferred method to * access packet data, however, this helper is in particular useful * in socket filters where *skb*\ **->data** does not always point * to the start of the mac header and where "direct packet access" * is not available. * Return * 0 on success, or a negative error in case of failure. * * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be * forwarded, the neighbor tables are searched for the nexthop. * If successful (ie., FIB lookup shows forwarding and nexthop * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric * is set to metric from route (IPv4/IPv6 only), and ifindex * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the * following values: * * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB * rules. * **BPF_FIB_LOOKUP_TBID** * Used with BPF_FIB_LOOKUP_DIRECT. * Use the routing table ID present in *params*->tbid * for the fib lookup. * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). * **BPF_FIB_LOOKUP_SKIP_NEIGH** * Skip the neighbour table lookup. *params*->dmac * and *params*->smac will not be set as output. A common * use case is to call **bpf_redirect_neigh**\ () after * doing **bpf_fib_lookup**\ (). * **BPF_FIB_LOOKUP_SRC** * Derive and set source IP addr in *params*->ipv{4,6}_src * for the nexthop. If the src addr cannot be derived, * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this * case, *params*->dmac and *params*->smac are not set either. * **BPF_FIB_LOOKUP_MARK** * Use the mark present in *params*->mark for the fib lookup. * This option should not be used with BPF_FIB_LOOKUP_DIRECT, * as it only has meaning for full lookups. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * Return * * < 0 if any input argument is invalid * * 0 on success (packet is forwarded, nexthop neighbor exists) * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU * was exceeded and output params->mtu_result contains the MTU. * * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * Return * 0 on success, or a negative error in case of failure. * * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. * if the verdict eBPF program returns **SK_PASS**), redirect it * to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at * address *hdr*, with *len* its size in bytes. *type* indicates * the protocol of the header and can be one of: * * **BPF_LWT_ENCAP_SEG6** * IPv6 encapsulation with Segment Routing Header * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, * the IPv6 header is computed by the kernel. * **BPF_LWT_ENCAP_SEG6_INLINE** * Only works if *skb* contains an IPv6 packet. Insert a * Segment Routing Header (**struct ipv6_sr_hdr**) inside * the IPv6 header. * **BPF_LWT_ENCAP_IP** * IP encapsulation (GRE/GUE/IPIP/etc). The outer header * must be IPv4 or IPv6, followed by zero or more * additional headers, up to **LWT_BPF_MAX_HEADROOM** * total bytes in all prepended headers. Please note that * if **skb_is_gso**\ (*skb*) is true, no more than two * headers can be prepended, and the inner header, if * present, should be either GRE or UDP/GUE. * * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and * **BPF_PROG_TYPE_LWT_XMIT**. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs * inside the outermost IPv6 Segment Routing Header can be * modified through this helper. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to * *skb*, at position *offset* by *delta* bytes. Only offsets * after the segments are accepted. *delta* can be as well * positive (growing) as negative (shrinking). * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter * contained at address *param*, and of length *param_len* bytes. * *action* can be one of: * * **SEG6_LOCAL_ACTION_END_X** * End.X action: Endpoint with Layer-3 cross-connect. * Type of *param*: **struct in6_addr**. * **SEG6_LOCAL_ACTION_END_T** * End.T action: Endpoint with specific IPv6 table lookup. * Type of *param*: **int**. * **SEG6_LOCAL_ACTION_END_B6** * End.B6 action: Endpoint bound to an SRv6 policy. * Type of *param*: **struct ipv6_sr_hdr**. * **SEG6_LOCAL_ACTION_END_B6_ENCAP** * End.B6.Encap action: Endpoint bound to an SRv6 * encapsulation policy. * Type of *param*: **struct ipv6_sr_hdr**. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays * the generation of a key up event for previously generated * key down event. * * Some IR protocols like NEC have a special IR message for * repeating last button, for when a button is held down. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, * *toggle* value in the given *protocol*. The scancode will be * translated to a keycode using the rc keymap, and reported as * an input key down event. After a period a key up event is * generated. This period can be extended by calling either * **bpf_rc_keydown**\ () again with the same values, or calling * **bpf_rc_repeat**\ (). * * Some protocols include a toggle bit, in case the button was * released and pressed again between consecutive scancodes. * * The *ctx* should point to the lirc sample as passed into * the program. * * The *protocol* is the decoded protocol number (see * **enum rc_proto** for some predefined values). * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * u64 bpf_skb_cgroup_id(struct sk_buff *skb) * Description * Return the cgroup v2 id of the socket associated with the *skb*. * This is roughly similar to the **bpf_get_cgroup_classid**\ () * helper for cgroup v1 by providing a tag resp. identifier that * can be matched on or used for map lookups e.g. to implement * policy. The cgroup v2 id of a given path in the hierarchy is * exposed in user space through the f_handle API in order to get * to the same 64-bit id. * * This helper can be used on TC egress path, but not on ingress, * and is available only if the kernel was compiled with the * **CONFIG_SOCK_CGROUP_DATA** configuration option. * Return * The id is returned or 0 in case the id could not be retrieved. * * u64 bpf_get_current_cgroup_id(void) * Description * Get the current cgroup id based on the cgroup within which * the current task is running. * Return * A 64-bit integer containing the current cgroup id based * on the cgroup within which the current task is running. * * void *bpf_get_local_storage(void *map, u64 flags) * Description * Get the pointer to the local storage area. * The type and the size of the local storage is defined * by the *map* argument. * The *flags* meaning is specific for each map type, * and has to be 0 for cgroup local storage. * * Depending on the BPF program type, a local storage area * can be shared between multiple instances of the BPF program, * running simultaneously. * * A user should care about the synchronization by himself. * For example, by using the **BPF_ATOMIC** instructions to alter * the shared data. * Return * A pointer to the local storage area. * * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. * It checks the selected socket is matching the incoming * request in the socket buffer. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) * Description * Return id of cgroup v2 that is ancestor of cgroup associated * with the *skb* at the *ancestor_level*. The root cgroup is at * *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with *skb*, then return value will be same as that * of **bpf_skb_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with *skb*. * * The format of returned id and helper limitations are same as in * **bpf_skb_cgroup_id**\ (). * Return * The id is returned or 0 in case the id could not be retrieved. * * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for UDP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * * long bpf_sk_release(void *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from * **bpf_sk_lookup_xxx**\ (). * Return * 0 on success, or a negative error in case of failure. * * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * * **BPF_EXIST** * If the queue/stack is full, the oldest element is * removed to make room for this. * Return * 0 on success, or a negative error in case of failure. * * long bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it may want to insert metadata or options into the *msg*. * This can later be read and used by any of the lower layer BPF * hooks. * * This helper may fail if under memory pressure (a malloc * fails) in these cases BPF programs will get an appropriate * error and BPF programs will need to handle them. * Return * 0 on success, or a negative error in case of failure. * * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if * an allocation and copy are required due to a full ring buffer. * However, the helper will try to avoid doing the allocation * if possible. Other errors can occur if input parameters are * invalid either due to *start* byte not being valid part of *msg* * payload and/or *pop* value being to large. * Return * 0 on success, or a negative error in case of failure. * * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * long bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to * safely update the rest of the fields in that value. The * spinlock can (and must) later be released with a call to * **bpf_spin_unlock**\ (\ *lock*\ ). * * Spinlocks in BPF programs come with a number of restrictions * and constraints: * * * **bpf_spin_lock** objects are only allowed inside maps of * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this * list could be extended in the future). * * BTF description of the map is mandatory. * * The BPF program can take ONE lock at a time, since taking two * or more could cause dead locks. * * Only one **struct bpf_spin_lock** is allowed per map element. * * When the lock is taken, calls (either BPF to BPF or helpers) * are not allowed. * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not * allowed inside a spinlock-ed region. * * The BPF program MUST call **bpf_spin_unlock**\ () to release * the lock, on all execution paths, before it returns. * * The BPF program can access **struct bpf_spin_lock** only via * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () * helpers. Loading or storing data into the **struct * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. * * To use the **bpf_spin_lock**\ () helper, the BTF description * of the map value must be a struct and have **struct * bpf_spin_lock** *anyname*\ **;** field at the top level. * Nested lock inside another struct is not allowed. * * The **struct bpf_spin_lock** *lock* field in a map value must * be aligned on a multiple of 4 bytes in that value. * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy * the **bpf_spin_lock** field to user space. * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from * a BPF program, do not update the **bpf_spin_lock** field. * * **bpf_spin_lock** cannot be on the stack or inside a * networking packet (it can only be inside of a map values). * * **bpf_spin_lock** is available to root only. * * Tracing programs and socket filter programs cannot use * **bpf_spin_lock**\ () due to insufficient preemption checks * (but this may change in the future). * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. * Return * 0 * * long bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). * Return * 0 * * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) * Description * This helper gets a **struct bpf_sock** pointer such * that all the fields in this **bpf_sock** can be accessed. * Return * A **struct bpf_sock** pointer on success, or **NULL** in * case of failure. * * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) * Description * This helper gets a **struct bpf_tcp_sock** pointer from a * **struct bpf_sock** pointer. * Return * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * * long bpf_skb_ecn_set_ce(struct sk_buff *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 * and IPv4. * Return * 1 if the **CE** flag is set (either by the current helper call * or because it was already present), 0 if it is not set. * * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) * Description * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. * **bpf_sk_release**\ () is unnecessary and not allowed. * Return * A **struct bpf_sock** pointer on success, or **NULL** in * case of failure. * * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * This function is identical to **bpf_sk_lookup_tcp**\ (), except * that it also returns timewait or request sockets. Use * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the * full structure. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. * * *iph* points to the start of the IPv4 or IPv6 header, while * *iph_len* contains **sizeof**\ (**struct iphdr**) or * **sizeof**\ (**struct ipv6hdr**). * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header (at least * **sizeof**\ (**struct tcphdr**)). * Return * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. * * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. * * The buffer is always NUL terminated, unless it's zero-sized. * * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name * only (e.g. "tcp_mem"). * Return * Number of character copied (not including the trailing NUL). * * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided * by program buffer *buf* of size *buf_len*. * * The whole value is copied, no matter what file position user * space issued e.g. sys_read at. * * The buffer is always NUL terminated, unless it's zero-sized. * Return * Number of character copied (not including the trailing NUL). * * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. * * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into * provided by program buffer *buf* of size *buf_len*. * * User space may write new value at file position > 0. * * The buffer is always NUL terminated, unless it's zero-sized. * Return * Number of character copied (not including the trailing NUL). * * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * * **-EINVAL** if sysctl is being read. * * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) * Description * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. * * *buf* should contain a string in same form as provided by user * space on sysctl write. * * User space may write new value at file position > 0. To override * the whole sysctl value file position should be set to zero. * Return * 0 on success. * * **-E2BIG** if the *buf_len* is too big. * * **-EINVAL** if sysctl is being read. * * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base * and save the result in *res*. * * The string may begin with an arbitrary amount of white space * (as determined by **isspace**\ (3)) followed by a single * optional '**-**' sign. * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically * similar to user space **strtol**\ (3). * Return * Number of characters consumed on success. Must be positive but * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. * * **-ERANGE** if resulting value was out of range. * * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the * given base and save the result in *res*. * * The string may begin with an arbitrary amount of white space * (as determined by **isspace**\ (3)). * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically * similar to user space **strtoul**\ (3). * Return * Number of characters consumed on success. Must be positive but * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. * * **-ERANGE** if resulting value was out of range. * * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) * Description * Get a bpf-local-storage from a *sk*. * * Logically, it could be thought of getting the value from * a *map* with *sk* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this * helper enforces the key must be a full socket and the map must * be a **BPF_MAP_TYPE_SK_STORAGE** also. * * Underneath, the value is stored locally at *sk* instead of * the *map*. The *map* is used as the bpf-local-storage * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf-local-storages residing at *sk*. * * *sk* is a kernel **struct sock** pointer for LSM program. * *sk* is a **struct bpf_sock** pointer for other program types. * * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be * used such that a new bpf-local-storage will be * created if one does not exist. *value* can be used * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf-local-storage. If *value* is * **NULL**, the new bpf-local-storage will be zero initialized. * Return * A bpf-local-storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * * long bpf_sk_storage_delete(struct bpf_map *map, void *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return * 0 on success. * * **-ENOENT** if the bpf-local-storage cannot be found. * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). * * long bpf_send_signal(u32 sig) * Description * Send signal *sig* to the process of the current task. * The signal may be delivered to any of this process's threads. * Return * 0 on success or successfully queued. * * **-EBUSY** if work queue under nmi is full. * * **-EINVAL** if *sig* is invalid. * * **-EPERM** if no permission to send the *sig*. * * **-EAGAIN** if bpf program can try again. * * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Try to issue a SYN cookie for the packet with corresponding * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. * * *iph* points to the start of the IPv4 or IPv6 header, while * *iph_len* contains **sizeof**\ (**struct iphdr**) or * **sizeof**\ (**struct ipv6hdr**). * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header with options (at least * **sizeof**\ (**struct tcphdr**)). * Return * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, * and the top 16 bits are unused. * * On failure, the returned value is one of the following: * * **-EINVAL** SYN cookie cannot be issued due to error * * **-ENOENT** SYN cookie should not be issued (no SYN flood) * * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * *ctx* is a pointer to in-kernel struct sk_buff. * * This helper is similar to **bpf_perf_event_output**\ () but * restricted to raw_tracepoint bpf programs. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from user space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from kernel space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe user address * *unsafe_ptr* to *dst*. The *size* should include the * terminating NUL byte. In case the string length is smaller than * *size*, the target is not padded with further NUL bytes. If the * string length is larger than *size*, just *size*-1 bytes are * copied and the last byte is set to NUL. * * On success, returns the number of bytes that were written, * including the terminal NUL. This makes this helper useful in * tracing programs for reading strings, and more importantly to * get its length at runtime. See the following snippet: * * :: * * SEC("kprobe/sys_open") * void bpf_sys_open(struct pt_regs *ctx) * { * char buf[PATHLEN]; // PATHLEN is defined to 256 * int res = bpf_probe_read_user_str(buf, sizeof(buf), * ctx->di); * * // Consume buf, for example push it to * // userspace via bpf_perf_event_output(); we * // can use res (the string length) as event * // size, after checking its boundaries. * } * * In comparison, using **bpf_probe_read_user**\ () helper here * instead to read the string would require to estimate the length * at compile time, and would often result in copying more memory * than necessary. * * Another useful use case is when parsing individual process * arguments or individual environment variables navigating * *current*\ **->mm->arg_start** and *current*\ * **->mm->env_start**: using this helper and the return value, * one can quickly iterate at the right offset of the memory area. * Return * On success, the strictly positive length of the output string, * including the trailing NUL character. On error, a negative * value. * * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. * Return * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. * * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) * Description * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * Return * 0 on success, or a negative error in case of failure. * * long bpf_send_signal_thread(u32 sig) * Description * Send signal *sig* to the thread corresponding to the current task. * Return * 0 on success or successfully queued. * * **-EBUSY** if work queue under nmi is full. * * **-EINVAL** if *sig* is invalid. * * **-EPERM** if no permission to send the *sig*. * * **-EAGAIN** if bpf program can try again. * * u64 bpf_jiffies64(void) * Description * Obtain the 64bit jiffies * Return * The 64 bit jiffies * * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) * Description * For an eBPF program attached to a perf event, retrieve the * branch records (**struct perf_branch_entry**) associated to *ctx* * and store it in the buffer pointed by *buf* up to size * *size* bytes. * Return * On success, number of bytes written to *buf*. On error, a * negative value. * * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to * instead return the number of bytes required to store all the * branch entries. If this flag is set, *buf* may be NULL. * * **-EINVAL** if arguments invalid or **size** not a multiple * of **sizeof**\ (**struct perf_branch_entry**\ ). * * **-ENOENT** if architecture does not support branch records. * * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) * Description * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. * Return * 0 on success, or one of the following in case of failure: * * **-EINVAL** if dev and inum supplied don't match dev_t and inode number * with nsfs of current task, or if dev conversion to dev_t lost high bits. * * **-ENOENT** if pidns does not exists for the current task. * * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * *ctx* is a pointer to in-kernel struct xdp_buff. * * This helper is similar to **bpf_perf_eventoutput**\ () but * restricted to raw_tracepoint bpf programs. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_get_netns_cookie(void *ctx) * Description * Retrieve the cookie (generated by the kernel) of the network * namespace the input *ctx* is associated with. The network * namespace cookie remains stable for its lifetime and provides * a global identifier that can be assumed unique. If *ctx* is * NULL, then the helper returns the cookie for the initial * network namespace. The cookie itself is very similar to that * of **bpf_get_socket_cookie**\ () helper, but for network * namespaces instead of sockets. * Return * A 8-byte long opaque number. * * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) * Description * Return id of cgroup v2 that is ancestor of the cgroup associated * with the current task at the *ancestor_level*. The root cgroup * is at *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with the current task, then return value will be the * same as that of **bpf_get_current_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with the current task. * * The format of returned id and helper limitations are same as in * **bpf_get_current_cgroup_id**\ (). * Return * The id is returned or 0 in case the id could not be retrieved. * * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags) * Description * Helper is overloaded depending on BPF program type. This * description applies to **BPF_PROG_TYPE_SCHED_CLS** and * **BPF_PROG_TYPE_SCHED_ACT** programs. * * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, * will cause *skb* to be delivered to the specified socket. * Subsequent redirection of *skb* via **bpf_redirect**\ (), * **bpf_clone_redirect**\ () or other methods outside of BPF may * interfere with successful delivery to the socket. * * This operation is only valid from TC ingress path. * * The *flags* argument must be zero. * Return * 0 on success, or a negative error in case of failure: * * **-EINVAL** if specified *flags* are not supported. * * **-ENOENT** if the socket is unavailable for assignment. * * **-ENETUNREACH** if the socket is unreachable (wrong netns). * * **-EOPNOTSUPP** if the operation is not supported, for example * a call from outside of TC ingress. * * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) * Description * Helper is overloaded depending on BPF program type. This * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. * * Select the *sk* as a result of a socket lookup. * * For the operation to succeed passed socket must be compatible * with the packet description provided by the *ctx* object. * * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must * be an exact match. While IP family (**AF_INET** or * **AF_INET6**) must be compatible, that is IPv6 sockets * that are not v6-only can be selected for IPv4 packets. * * Only TCP listeners and UDP unconnected sockets can be * selected. *sk* can also be NULL to reset any previous * selection. * * *flags* argument can combination of following values: * * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous * socket selection, potentially done by a BPF program * that ran before us. * * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip * load-balancing within reuseport group for the socket * being selected. * * On success *ctx->sk* will point to the selected socket. * * Return * 0 on success, or a negative errno in case of failure. * * * **-EAFNOSUPPORT** if socket family (*sk->family*) is * not compatible with packet family (*ctx->family*). * * * **-EEXIST** if socket has been already selected, * potentially by another program, and * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. * * * **-EINVAL** if unsupported flags were specified. * * * **-EPROTOTYPE** if socket L4 protocol * (*sk->protocol*) doesn't match packet protocol * (*ctx->protocol*). * * * **-ESOCKTNOSUPPORT** if socket is not in allowed * state (TCP listening or UDP unconnected). * * u64 bpf_ktime_get_boot_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. * Does include the time the system was suspended. * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) * Return * Current *ktime*. * * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print * out the format string. * The *m* represents the seq_file. The *fmt* and *fmt_size* are for * the format string itself. The *data* and *data_len* are format string * arguments. The *data* are a **u64** array and corresponding format string * values are stored in the array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* array. * The *data_len* is the size of *data* in bytes - must be a multiple of 8. * * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. * Reading kernel memory may fail due to either invalid address or * valid address but requiring a major memory fault. If reading kernel memory * fails, the string for **%s** will be an empty string, and the ip * address for **%p{i,I}{4,6}** will be 0. Not returning error to * bpf program is consistent with what **bpf_trace_printk**\ () does for now. * Return * 0 on success, or a negative error in case of failure: * * **-EBUSY** if per-CPU memory copy buffer is busy, can try again * by returning 1 from bpf program. * * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. * * **-E2BIG** if *fmt* contains too many format specifiers. * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) * Description * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. * The *m* represents the seq_file. The *data* and *len* represent the * data to write in bytes. * Return * 0 on success, or a negative error in case of failure: * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * * u64 bpf_sk_cgroup_id(void *sk) * Description * Return the cgroup v2 id of the socket *sk*. * * *sk* must be a non-**NULL** pointer to a socket, e.g. one * returned from **bpf_sk_lookup_xxx**\ (), * **bpf_sk_fullsock**\ (), etc. The format of returned id is * same as in **bpf_skb_cgroup_id**\ (). * * This helper is available only if the kernel was compiled with * the **CONFIG_SOCK_CGROUP_DATA** configuration option. * Return * The id is returned or 0 in case the id could not be retrieved. * * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level) * Description * Return id of cgroup v2 that is ancestor of cgroup associated * with the *sk* at the *ancestor_level*. The root cgroup is at * *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with *sk*, then return value will be same as that * of **bpf_sk_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with *sk*. * * The format of returned id and helper limitations are same as in * **bpf_sk_cgroup_id**\ (). * Return * The id is returned or 0 in case the id could not be retrieved. * * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. * If **0** is specified in *flags*, an adaptive notification * of new data availability is sent. * * An adaptive notification is a notification sent whenever the user-space * process has caught up and consumed all available payloads. In case the user-space * process is still processing a previous payload, then no notification is needed * as it will process the newly added payload automatically. * Return * 0 on success, or a negative error in case of failure. * * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) * Description * Reserve *size* bytes of payload in a ring buffer *ringbuf*. * *flags* must be 0. * Return * Valid pointer with *size* bytes of memory available; NULL, * otherwise. * * void bpf_ringbuf_submit(void *data, u64 flags) * Description * Submit reserved ring buffer sample, pointed to by *data*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. * If **0** is specified in *flags*, an adaptive notification * of new data availability is sent. * * See 'bpf_ringbuf_output()' for the definition of adaptive notification. * Return * Nothing. Always succeeds. * * void bpf_ringbuf_discard(void *data, u64 flags) * Description * Discard reserved ring buffer sample, pointed to by *data*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. * If **0** is specified in *flags*, an adaptive notification * of new data availability is sent. * * See 'bpf_ringbuf_output()' for the definition of adaptive notification. * Return * Nothing. Always succeeds. * * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) * Description * Query various characteristics of provided ring buffer. What * exactly is queries is determined by *flags*: * * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. * * **BPF_RB_RING_SIZE**: The size of ring buffer. * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). * * Data returned is just a momentary snapshot of actual values * and could be inaccurate, so this facility should be used to * power heuristics and for reporting, not to make 100% correct * calculation. * Return * Requested value, or 0, if *flags* are not recognized. * * long bpf_csum_level(struct sk_buff *skb, u64 level) * Description * Change the skbs checksum level by one layer up or down, or * reset it entirely to none in order to have the stack perform * checksum validation. The level is applicable to the following * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | * through **bpf_skb_adjust_room**\ () helper with passing in * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since * the UDP header is removed. Similarly, an encap of the latter * into the former could be accompanied by a helper call to * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the * skb is still intended to be processed in higher layers of the * stack instead of just egressing at tc. * * There are three supported level settings at this time: * * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs * with CHECKSUM_UNNECESSARY. * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs * with CHECKSUM_UNNECESSARY. * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and * sets CHECKSUM_NONE to force checksum validation by the stack. * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current * skb->csum_level. * Return * 0 on success, or a negative error in case of failure. In the * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level * is returned or the error code -EACCES in case the skb is not * subject to CHECKSUM_UNNECESSARY. * * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * Note: the user stack will only be populated if the *task* is * the current task; all other tasks will return -EOPNOTSUPP. * To achieve this, the helper needs *task*, which is a valid * pointer to **struct task_struct**. To store the stacktrace, the * bpf program provides *buf* with a nonnegative *size*. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * The *task* must be the current task. * **BPF_F_USER_BUILD_ID** * Collect buildid+offset instead of ips for user stack, * only valid if **BPF_F_USER_STACK** is also specified. * * **bpf_get_task_stack**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * to sufficient large buffer size. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * The non-negative copied *buf* length equal to or less than * *size* on success, or a negative error in case of failure. * * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) * Description * Load header option. Support reading a particular TCP header * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). * * If *flags* is 0, it will search the option from the * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** * has details on what skb_data contains under different * *skops*\ **->op**. * * The first byte of the *searchby_res* specifies the * kind that it wants to search. * * If the searching kind is an experimental kind * (i.e. 253 or 254 according to RFC6994). It also * needs to specify the "magic" which is either * 2 bytes or 4 bytes. It then also needs to * specify the size of the magic by using * the 2nd byte which is "kind-length" of a TCP * header option and the "kind-length" also * includes the first 2 bytes "kind" and "kind-length" * itself as a normal TCP header option also does. * * For example, to search experimental kind 254 with * 2 byte magic 0xeB9F, the searchby_res should be * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. * * To search for the standard window scale option (3), * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. * Note, kind-length must be 0 for regular option. * * Searching for No-Op (0) and End-of-Option-List (1) are * not supported. * * *len* must be at least 2 bytes which is the minimal size * of a header option. * * Supported flags: * * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the * saved_syn packet or the just-received syn packet. * * Return * > 0 when found, the header option is copied to *searchby_res*. * The return value is the total length copied. On failure, a * negative error code is returned: * * **-EINVAL** if a parameter is invalid. * * **-ENOMSG** if the option is not found. * * **-ENOENT** if no syn packet is available when * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. * * **-ENOSPC** if there is not enough space. Only *len* number of * bytes are copied. * * **-EFAULT** on failure to parse the header options in the * packet. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. * * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags) * Description * Store header option. The data will be copied * from buffer *from* with length *len* to the TCP header. * * The buffer *from* should have the whole option that * includes the kind, kind-length, and the actual * option data. The *len* must be at least kind-length * long. The kind-length does not have to be 4 byte * aligned. The kernel will take care of the padding * and setting the 4 bytes aligned value to th->doff. * * This helper will check for duplicated option * by searching the same option in the outgoing skb. * * This helper can only be called during * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. * * Return * 0 on success, or negative error in case of failure: * * **-EINVAL** If param is invalid. * * **-ENOSPC** if there is not enough space in the header. * Nothing has been written * * **-EEXIST** if the option already exists. * * **-EFAULT** on failure to parse the existing header options. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. * * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags) * Description * Reserve *len* bytes for the bpf header option. The * space will be used by **bpf_store_hdr_opt**\ () later in * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. * * If **bpf_reserve_hdr_opt**\ () is called multiple times, * the total number of bytes will be reserved. * * This helper can only be called during * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. * * Return * 0 on success, or negative error in case of failure: * * **-EINVAL** if a parameter is invalid. * * **-ENOSPC** if there is not enough space in the header. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. * * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags) * Description * Get a bpf_local_storage from an *inode*. * * Logically, it could be thought of as getting the value from * a *map* with *inode* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this * helper enforces the key must be an inode and the map must also * be a **BPF_MAP_TYPE_INODE_STORAGE**. * * Underneath, the value is stored locally at *inode* instead of * the *map*. The *map* is used as the bpf-local-storage * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf_local_storage residing at *inode*. * * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be * used such that a new bpf_local_storage will be * created if one does not exist. *value* can be used * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf_local_storage. If *value* is * **NULL**, the new bpf_local_storage will be zero initialized. * Return * A bpf_local_storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf_local_storage. * * int bpf_inode_storage_delete(struct bpf_map *map, void *inode) * Description * Delete a bpf_local_storage from an *inode*. * Return * 0 on success. * * **-ENOENT** if the bpf_local_storage cannot be found. * * long bpf_d_path(struct path *path, char *buf, u32 sz) * Description * Return full path for given **struct path** object, which * needs to be the kernel BTF *path* object. The path is * returned in the provided buffer *buf* of size *sz* and * is zero terminated. * * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative * value. * * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr) * Description * Read *size* bytes from user space address *user_ptr* and store * the data in *dst*. This is a wrapper of **copy_from_user**\ (). * Return * 0 on success, or a negative error in case of failure. * * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags) * Description * Use BTF to store a string representation of *ptr*->ptr in *str*, * using *ptr*->type_id. This value should specify the type * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) * can be used to look up vmlinux BTF type ids. Traversing the * data structure using BTF, the type information and values are * stored in the first *str_size* - 1 bytes of *str*. Safe copy of * the pointer data is carried out to avoid kernel crashes during * operation. Smaller types can use string space on the stack; * larger programs can use map data to store the string * representation. * * The string can be subsequently shared with userspace via * bpf_perf_event_output() or ring buffer interfaces. * bpf_trace_printk() is to be avoided as it places too small * a limit on string size to be useful. * * *flags* is a combination of * * **BTF_F_COMPACT** * no formatting around type information * **BTF_F_NONAME** * no struct/union member names/types * **BTF_F_PTR_RAW** * show raw (unobfuscated) pointer values; * equivalent to printk specifier %px. * **BTF_F_ZERO** * show zero-valued struct/union members; they * are not displayed by default * * Return * The number of bytes that were written (or would have been * written if output had to be truncated due to string size), * or a negative error in cases of failure. * * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags) * Description * Use BTF to write to seq_write a string representation of * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). * *flags* are identical to those used for bpf_snprintf_btf. * Return * 0 on success or a negative error in case of failure. * * u64 bpf_skb_cgroup_classid(struct sk_buff *skb) * Description * See **bpf_get_cgroup_classid**\ () for the main description. * This helper differs from **bpf_get_cgroup_classid**\ () in that * the cgroup v1 net_cls class is retrieved only from the *skb*'s * associated socket instead of the current process. * Return * The id is returned or 0 in case the id could not be retrieved. * * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags) * Description * Redirect the packet to another net device of index *ifindex* * and fill in L2 addresses from neighboring subsystem. This helper * is somewhat similar to **bpf_redirect**\ (), except that it * populates L2 addresses as well, meaning, internally, the helper * relies on the neighbor lookup for the L2 address of the nexthop. * * The helper will perform a FIB lookup based on the skb's * networking header to get the address of the next hop, unless * this is supplied by the caller in the *params* argument. The * *plen* argument indicates the len of *params* and should be set * to 0 if *params* is NULL. * * The *flags* argument is reserved and must be 0. The helper is * currently only supported for tc BPF program types, and enabled * for IPv4 and IPv6 protocols. * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. * * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu) * Description * Take a pointer to a percpu ksym, *percpu_ptr*, and return a * pointer to the percpu kernel variable on *cpu*. A ksym is an * extern variable decorated with '__ksym'. For ksym, there is a * global var (either static or global) defined of the same name * in the kernel. The ksym is percpu if the global var is percpu. * The returned pointer points to the global percpu var on *cpu*. * * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the * kernel, except that bpf_per_cpu_ptr() may return NULL. This * happens if *cpu* is larger than nr_cpu_ids. The caller of * bpf_per_cpu_ptr() must check the returned value. * Return * A pointer pointing to the kernel percpu variable on *cpu*, or * NULL, if *cpu* is invalid. * * void *bpf_this_cpu_ptr(const void *percpu_ptr) * Description * Take a pointer to a percpu ksym, *percpu_ptr*, and return a * pointer to the percpu kernel variable on this cpu. See the * description of 'ksym' in **bpf_per_cpu_ptr**\ (). * * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would * never return NULL. * Return * A pointer pointing to the kernel percpu variable on this cpu. * * long bpf_redirect_peer(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_redirect**\ (), except * that the redirection happens to the *ifindex*' peer device and * the netns switch takes place from ingress to ingress without * going through the CPU's backlog queue. * * The *flags* argument is reserved and must be 0. The helper is * currently only supported for tc BPF program types at the * ingress hook and for veth and netkit target device types. The * peer device must reside in a different network namespace. * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. * * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags) * Description * Get a bpf_local_storage from the *task*. * * Logically, it could be thought of as getting the value from * a *map* with *task* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this * helper enforces the key must be a task_struct and the map must also * be a **BPF_MAP_TYPE_TASK_STORAGE**. * * Underneath, the value is stored locally at *task* instead of * the *map*. The *map* is used as the bpf-local-storage * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf_local_storage residing at *task*. * * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be * used such that a new bpf_local_storage will be * created if one does not exist. *value* can be used * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf_local_storage. If *value* is * **NULL**, the new bpf_local_storage will be zero initialized. * Return * A bpf_local_storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf_local_storage. * * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task) * Description * Delete a bpf_local_storage from a *task*. * Return * 0 on success. * * **-ENOENT** if the bpf_local_storage cannot be found. * * struct task_struct *bpf_get_current_task_btf(void) * Description * Return a BTF pointer to the "current" task. * This pointer can also be used in helpers that accept an * *ARG_PTR_TO_BTF_ID* of type *task_struct*. * Return * Pointer to the current task. * * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags) * Description * Set or clear certain options on *bprm*: * * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit * which sets the **AT_SECURE** auxv for glibc. The bit * is cleared if the flag is not specified. * Return * **-EINVAL** if invalid *flags* are passed, zero otherwise. * * u64 bpf_ktime_get_coarse_ns(void) * Description * Return a coarse-grained version of the time elapsed since * system boot, in nanoseconds. Does not include time the system * was suspended. * * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**) * Return * Current *ktime*. * * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size) * Description * Returns the stored IMA hash of the *inode* (if it's available). * If the hash is larger than *size*, then only *size* * bytes will be copied to *dst* * Return * The **hash_algo** is returned on success, * **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if * invalid arguments are passed. * * struct socket *bpf_sock_from_file(struct file *file) * Description * If the given file represents a socket, returns the associated * socket. * Return * A pointer to a struct socket on success or NULL if the file is * not a socket. * * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) * Description * Check packet size against exceeding MTU of net device (based * on *ifindex*). This helper will likely be used in combination * with helpers that adjust/change the packet size. * * The argument *len_diff* can be used for querying with a planned * size change. This allows to check MTU prior to changing packet * ctx. Providing a *len_diff* adjustment that is larger than the * actual packet size (resulting in negative packet size) will in * principle not exceed the MTU, which is why it is not considered * a failure. Other BPF helpers are needed for performing the * planned size change; therefore the responsibility for catching * a negative packet size belongs in those helpers. * * Specifying *ifindex* zero means the MTU check is performed * against the current net device. This is practical if this isn't * used prior to redirect. * * On input *mtu_len* must be a valid pointer, else verifier will * reject BPF program. If the value *mtu_len* is initialized to * zero then the ctx packet size is use. When value *mtu_len* is * provided as input this specify the L3 length that the MTU check * is done against. Remember XDP and TC length operate at L2, but * this value is L3 as this correlate to MTU and IP-header tot_len * values which are L3 (similar behavior as bpf_fib_lookup). * * The Linux kernel route table can configure MTUs on a more * specific per route level, which is not provided by this helper. * For route level MTU checks use the **bpf_fib_lookup**\ () * helper. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** for tc cls_act programs. * * The *flags* argument can be a combination of one or more of the * following values: * * **BPF_MTU_CHK_SEGS** * This flag will only works for *ctx* **struct sk_buff**. * If packet context contains extra packet segment buffers * (often knows as GSO skb), then MTU check is harder to * check at this point, because in transmit path it is * possible for the skb packet to get re-segmented * (depending on net device features). This could still be * a MTU violation, so this flag enables performing MTU * check against segments, with a different violation * return code to tell it apart. Check cannot use len_diff. * * On return *mtu_len* pointer contains the MTU value of the net * device. Remember the net device configured MTU is the L3 size, * which is returned here and XDP and TC length operate at L2. * Helper take this into account for you, but remember when using * MTU value in your BPF-code. * * Return * * 0 on success, and populate MTU value in *mtu_len* pointer. * * * < 0 if any input argument is invalid (*mtu_len* not updated) * * MTU violations return positive values, but also populate MTU * value in *mtu_len* pointer, as this can be needed for * implementing PMTU handing: * * * **BPF_MTU_CHK_RET_FRAG_NEEDED** * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** * * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags) * Description * For each element in **map**, call **callback_fn** function with * **map**, **callback_ctx** and other map-specific parameters. * The **callback_fn** should be a static function and * the **callback_ctx** should be a pointer to the stack. * The **flags** is used to control certain aspects of the helper. * Currently, the **flags** must be 0. * * The following are a list of supported map types and their * respective expected callback signatures: * * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY * * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); * * For per_cpu maps, the map_value is the value on the cpu where the * bpf_prog is running. * * If **callback_fn** return 0, the helper will continue to the next * element. If return value is 1, the helper will skip the rest of * elements and return. Other return values are not used now. * * Return * The number of traversed map elements for success, **-EINVAL** for * invalid **flags**. * * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len) * Description * Outputs a string into the **str** buffer of size **str_size** * based on a format string stored in a read-only map pointed by * **fmt**. * * Each format specifier in **fmt** corresponds to one u64 element * in the **data** array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* * array. The *data_len* is the size of *data* in bytes - must be * a multiple of 8. * * Formats **%s** and **%p{i,I}{4,6}** require to read kernel * memory. Reading kernel memory may fail due to either invalid * address or valid address but requiring a major memory fault. If * reading kernel memory fails, the string for **%s** will be an * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. * Not returning error to bpf program is consistent with what * **bpf_trace_printk**\ () does for now. * * Return * The strictly positive length of the formatted string, including * the trailing zero character. If the return value is greater than * **str_size**, **str** contains a truncated string, guaranteed to * be zero-terminated except when **str_size** is 0. * * Or **-EBUSY** if the per-CPU memory copy buffer is busy. * * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size) * Description * Execute bpf syscall with given arguments. * Return * A syscall result. * * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags) * Description * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. * Return * Returns btf_id and btf_obj_fd in lower and upper 32 bits. * * long bpf_sys_close(u32 fd) * Description * Execute close syscall for given FD. * Return * A syscall result. * * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) * Description * Initialize the timer. * First 4 bits of *flags* specify clockid. * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. * All other bits of *flags* are reserved. * The verifier will reject the program if *timer* is not from * the same *map*. * Return * 0 on success. * **-EBUSY** if *timer* is already initialized. * **-EINVAL** if invalid *flags* are passed. * **-EPERM** if *timer* is in a map that doesn't have any user references. * The user space should either hold a file descriptor to a map with timers * or pin such map in bpffs. When map is unpinned or file descriptor is * closed all timers in the map will be cancelled and freed. * * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) * Description * Configure the timer to call *callback_fn* static function. * Return * 0 on success. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. * **-EPERM** if *timer* is in a map that doesn't have any user references. * The user space should either hold a file descriptor to a map with timers * or pin such map in bpffs. When map is unpinned or file descriptor is * closed all timers in the map will be cancelled and freed. * * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) * Description * Set timer expiration N nanoseconds from the current time. The * configured callback will be invoked in soft irq context on some cpu * and will not repeat unless another bpf_timer_start() is made. * In such case the next invocation can migrate to a different cpu. * Since struct bpf_timer is a field inside map element the map * owns the timer. The bpf_timer_set_callback() will increment refcnt * of BPF program to make sure that callback_fn code stays valid. * When user space reference to a map reaches zero all timers * in a map are cancelled and corresponding program's refcnts are * decremented. This is done to make sure that Ctrl-C of a user * process doesn't leave any timers running. If map is pinned in * bpffs the callback_fn can re-arm itself indefinitely. * bpf_map_update/delete_elem() helpers and user space sys_bpf commands * cancel and free the timer in the given map element. * The map can contain timers that invoke callback_fn-s from different * programs. The same callback_fn can serve different timers from * different maps if key/value layout matches across maps. * Every bpf_timer_set_callback() can have different callback_fn. * * *flags* can be one of: * * **BPF_F_TIMER_ABS** * Start the timer in absolute expire value instead of the * default relative one. * **BPF_F_TIMER_CPU_PIN** * Timer will be pinned to the CPU of the caller. * * Return * 0 on success. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier * or invalid *flags* are passed. * * long bpf_timer_cancel(struct bpf_timer *timer) * Description * Cancel the timer and wait for callback_fn to finish if it was running. * Return * 0 if the timer was not active. * 1 if the timer was active. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its * own timer which would have led to a deadlock otherwise. * * u64 bpf_get_func_ip(void *ctx) * Description * Get address of the traced function (for tracing and kprobe programs). * * When called for kprobe program attached as uprobe it returns * probe address for both entry and return uprobe. * * Return * Address of the traced function for kprobe. * 0 for kprobes placed within the function (not at the entry). * Address of the probe for uprobe and return uprobe. * * u64 bpf_get_attach_cookie(void *ctx) * Description * Get bpf_cookie value provided (optionally) during the program * attachment. It might be different for each individual * attachment, even if BPF program itself is the same. * Expects BPF program context *ctx* as a first argument. * * Supported for the following program types: * - kprobe/uprobe; * - tracepoint; * - perf_event. * Return * Value specified by user at BPF link creation/attachment time * or 0, if it was not specified. * * long bpf_task_pt_regs(struct task_struct *task) * Description * Get the struct pt_regs associated with **task**. * Return * A pointer to struct pt_regs. * * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags) * Description * Get branch trace from hardware engines like Intel LBR. The * hardware engine is stopped shortly after the helper is * called. Therefore, the user need to filter branch entries * based on the actual use case. To capture branch trace * before the trigger point of the BPF program, the helper * should be called at the beginning of the BPF program. * * The data is stored as struct perf_branch_entry into output * buffer *entries*. *size* is the size of *entries* in bytes. * *flags* is reserved for now and must be zero. * * Return * On success, number of bytes written to *buf*. On error, a * negative value. * * **-EINVAL** if *flags* is not zero. * * **-ENOENT** if architecture does not support branch records. * * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64 * to format and can handle more format args as a result. * * Arguments are to be used as in **bpf_seq_printf**\ () helper. * Return * The number of bytes written to the buffer, or a negative error * in case of failure. * * struct unix_sock *bpf_skc_to_unix_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *unix_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res) * Description * Get the address of a kernel symbol, returned in *res*. *res* is * set to 0 if the symbol is not found. * Return * On success, zero. On error, a negative value. * * **-EINVAL** if *flags* is not zero. * * **-EINVAL** if string *name* is not the same size as *name_sz*. * * **-ENOENT** if symbol is not found. * * **-EPERM** if caller does not have permission to obtain kernel address. * * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags) * Description * Find vma of *task* that contains *addr*, call *callback_fn* * function with *task*, *vma*, and *callback_ctx*. * The *callback_fn* should be a static function and * the *callback_ctx* should be a pointer to the stack. * The *flags* is used to control certain aspects of the helper. * Currently, the *flags* must be 0. * * The expected callback signature is * * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx); * * Return * 0 on success. * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. * **-EBUSY** if failed to try lock mmap_lock. * **-EINVAL** for invalid **flags**. * * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags) * Description * For **nr_loops**, call **callback_fn** function * with **callback_ctx** as the context parameter. * The **callback_fn** should be a static function and * the **callback_ctx** should be a pointer to the stack. * The **flags** is used to control certain aspects of the helper. * Currently, the **flags** must be 0. Currently, nr_loops is * limited to 1 << 23 (~8 million) loops. * * long (\*callback_fn)(u64 index, void \*ctx); * * where **index** is the current index in the loop. The index * is zero-indexed. * * If **callback_fn** returns 0, the helper will continue to the next * loop. If return value is 1, the helper will skip the rest of * the loops and return. Other return values are not used now, * and will be rejected by the verifier. * * Return * The number of loops performed, **-EINVAL** for invalid **flags**, * **-E2BIG** if **nr_loops** exceeds the maximum number of loops. * * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2) * Description * Do strncmp() between **s1** and **s2**. **s1** doesn't need * to be null-terminated and **s1_sz** is the maximum storage * size of **s1**. **s2** must be a read-only string. * Return * An integer less than, equal to, or greater than zero * if the first **s1_sz** bytes of **s1** is found to be * less than, to match, or be greater than **s2**. * * long bpf_get_func_arg(void *ctx, u32 n, u64 *value) * Description * Get **n**-th argument register (zero based) of the traced function (for tracing programs) * returned in **value**. * * Return * 0 on success. * **-EINVAL** if n >= argument register count of traced function. * * long bpf_get_func_ret(void *ctx, u64 *value) * Description * Get return value of the traced function (for tracing programs) * in **value**. * * Return * 0 on success. * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN. * * long bpf_get_func_arg_cnt(void *ctx) * Description * Get number of registers of the traced function (for tracing programs) where * function arguments are stored in these registers. * * Return * The number of argument registers of the traced function. * * int bpf_get_retval(void) * Description * Get the BPF program's return value that will be returned to the upper layers. * * This helper is currently supported by cgroup programs and only by the hooks * where BPF program's return value is returned to the userspace via errno. * Return * The BPF program's return value. * * int bpf_set_retval(int retval) * Description * Set the BPF program's return value that will be returned to the upper layers. * * This helper is currently supported by cgroup programs and only by the hooks * where BPF program's return value is returned to the userspace via errno. * * Note that there is the following corner case where the program exports an error * via bpf_set_retval but signals success via 'return 1': * * bpf_set_retval(-EPERM); * return 1; * * In this case, the BPF program's return value will use helper's -EPERM. This * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case. * * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md) * Description * Get the total size of a given xdp buff (linear and paged area) * Return * The total size of a given xdp buffer. * * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) * Description * This helper is provided as an easy way to load data from a * xdp buffer. It can be used to load *len* bytes from *offset* from * the frame associated to *xdp_md*, into the buffer pointed by * *buf*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) * Description * Store *len* bytes from buffer *buf* into the frame * associated to *xdp_md*, at *offset*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags) * Description * Read *size* bytes from user space address *user_ptr* in *tsk*'s * address space, and stores the data in *dst*. *flags* is not * used yet and is provided for future extensibility. This helper * can only be used by sleepable programs. * Return * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. * * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) * Description * Change the __sk_buff->tstamp_type to *tstamp_type* * and set *tstamp* to the __sk_buff->tstamp together. * * If there is no need to change the __sk_buff->tstamp_type, * the tstamp value can be directly written to __sk_buff->tstamp * instead. * * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that * will be kept during bpf_redirect_*(). A non zero * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO * *tstamp_type*. * * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used * with a zero *tstamp*. * * Only IPv4 and IPv6 skb->protocol are supported. * * This function is most useful when it needs to set a * mono delivery time to __sk_buff->tstamp and then * bpf_redirect_*() to the egress of an iface. For example, * changing the (rcv) timestamp in __sk_buff->tstamp at * ingress to a mono delivery time and then bpf_redirect_*() * to sch_fq@phy-dev. * Return * 0 on success. * **-EINVAL** for invalid input * **-EOPNOTSUPP** for unsupported protocol * * long bpf_ima_file_hash(struct file *file, void *dst, u32 size) * Description * Returns a calculated IMA hash of the *file*. * If the hash is larger than *size*, then only *size* * bytes will be copied to *dst* * Return * The **hash_algo** is returned on success, * **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if * invalid arguments are passed. * * void *bpf_kptr_xchg(void *dst, void *ptr) * Description * Exchange kptr at pointer *dst* with *ptr*, and return the old value. * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise * it must be a referenced pointer which will be released when this helper * is called. * Return * The old value of kptr (which can be NULL). The returned pointer * if not NULL, is a reference which must be released using its * corresponding release function, or moved into a BPF map before * program exit. * * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu) * Description * Perform a lookup in *percpu map* for an entry associated to * *key* on *cpu*. * Return * Map value associated to *key* on *cpu*, or **NULL** if no entry * was found or *cpu* is invalid. * * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr) * Description * Get a dynptr to local memory *data*. * * *data* must be a ptr to a map value. * The maximum *size* supported is DYNPTR_MAX_SIZE. * *flags* is currently unused. * Return * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE, * -EINVAL if flags is not 0. * * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr) * Description * Reserve *size* bytes of payload in a ring buffer *ringbuf* * through the dynptr interface. *flags* must be 0. * * Please note that a corresponding bpf_ringbuf_submit_dynptr or * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the * reservation fails. This is enforced by the verifier. * Return * 0 on success, or a negative error in case of failure. * * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags) * Description * Submit reserved ring buffer sample, pointed to by *data*, * through the dynptr interface. This is a no-op if the dynptr is * invalid/null. * * For more information on *flags*, please see * 'bpf_ringbuf_submit'. * Return * Nothing. Always succeeds. * * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags) * Description * Discard reserved ring buffer sample through the dynptr * interface. This is a no-op if the dynptr is invalid/null. * * For more information on *flags*, please see * 'bpf_ringbuf_discard'. * Return * Nothing. Always succeeds. * * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags) * Description * Read *len* bytes from *src* into *dst*, starting from *offset* * into *src*. * *flags* is currently unused. * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if * *flags* is not 0. * * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags) * Description * Write *len* bytes from *src* into *dst*, starting from *offset* * into *dst*. * * *flags* must be 0 except for skb-type dynptrs. * * For skb-type dynptrs: * * All data slices of the dynptr are automatically * invalidated after **bpf_dynptr_write**\ (). This is * because writing may pull the skb and change the * underlying packet buffer. * * * For *flags*, please see the flags accepted by * **bpf_skb_store_bytes**\ (). * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs, * other errors correspond to errors returned by **bpf_skb_store_bytes**\ (). * * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len) * Description * Get a pointer to the underlying dynptr data. * * *len* must be a statically known value. The returned data slice * is invalidated whenever the dynptr is invalidated. * * skb and xdp type dynptrs may not use bpf_dynptr_data. They should * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr. * Return * Pointer to the underlying dynptr data, NULL if the dynptr is * read-only, if the dynptr is invalid, or if the offset and length * is out of bounds. * * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len) * Description * Try to issue a SYN cookie for the packet with corresponding * IPv4/TCP headers, *iph* and *th*, without depending on a * listening socket. * * *iph* points to the IPv4 header. * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header (at least * **sizeof**\ (**struct tcphdr**)). * Return * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, * and the top 16 bits are unused. * * On failure, the returned value is one of the following: * * **-EINVAL** if *th_len* is invalid. * * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len) * Description * Try to issue a SYN cookie for the packet with corresponding * IPv6/TCP headers, *iph* and *th*, without depending on a * listening socket. * * *iph* points to the IPv6 header. * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header (at least * **sizeof**\ (**struct tcphdr**)). * Return * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, * and the top 16 bits are unused. * * On failure, the returned value is one of the following: * * **-EINVAL** if *th_len* is invalid. * * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. * * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK * without depending on a listening socket. * * *iph* points to the IPv4 header. * * *th* points to the TCP header. * Return * 0 if *iph* and *th* are a valid SYN cookie ACK. * * On failure, the returned value is one of the following: * * **-EACCES** if the SYN cookie is not valid. * * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK * without depending on a listening socket. * * *iph* points to the IPv6 header. * * *th* points to the TCP header. * Return * 0 if *iph* and *th* are a valid SYN cookie ACK. * * On failure, the returned value is one of the following: * * **-EACCES** if the SYN cookie is not valid. * * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. * * u64 bpf_ktime_get_tai_ns(void) * Description * A nonsettable system-wide clock derived from wall-clock time but * ignoring leap seconds. This clock does not experience * discontinuities and backwards jumps caused by NTP inserting leap * seconds as CLOCK_REALTIME does. * * See: **clock_gettime**\ (**CLOCK_TAI**) * Return * Current *ktime*. * * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags) * Description * Drain samples from the specified user ring buffer, and invoke * the provided callback for each such sample: * * long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx); * * If **callback_fn** returns 0, the helper will continue to try * and drain the next sample, up to a maximum of * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1, * the helper will skip the rest of the samples and return. Other * return values are not used now, and will be rejected by the * verifier. * Return * The number of drained samples if no error was encountered while * draining samples, or 0 if no samples were present in the ring * buffer. If a user-space producer was epoll-waiting on this map, * and at least one sample was drained, they will receive an event * notification notifying them of available space in the ring * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this * function, no wakeup notification will be sent. If the * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will * be sent even if no sample was drained. * * On failure, the returned value is one of the following: * * **-EBUSY** if the ring buffer is contended, and another calling * context was concurrently draining the ring buffer. * * **-EINVAL** if user-space is not properly tracking the ring * buffer due to the producer position not being aligned to 8 * bytes, a sample not being aligned to 8 bytes, or the producer * position not matching the advertised length of a sample. * * **-E2BIG** if user-space has tried to publish a sample which is * larger than the size of the ring buffer, or which cannot fit * within a struct bpf_dynptr. * * void *bpf_cgrp_storage_get(struct bpf_map *map, struct cgroup *cgroup, void *value, u64 flags) * Description * Get a bpf_local_storage from the *cgroup*. * * Logically, it could be thought of as getting the value from * a *map* with *cgroup* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this * helper enforces the key must be a cgroup struct and the map must also * be a **BPF_MAP_TYPE_CGRP_STORAGE**. * * In reality, the local-storage value is embedded directly inside of the * *cgroup* object itself, rather than being located in the * **BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is * queried for some *map* on a *cgroup* object, the kernel will perform an * O(n) iteration over all of the live local-storage values for that * *cgroup* object until the local-storage value for the *map* is found. * * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be * used such that a new bpf_local_storage will be * created if one does not exist. *value* can be used * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf_local_storage. If *value* is * **NULL**, the new bpf_local_storage will be zero initialized. * Return * A bpf_local_storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf_local_storage. * * long bpf_cgrp_storage_delete(struct bpf_map *map, struct cgroup *cgroup) * Description * Delete a bpf_local_storage from a *cgroup*. * Return * 0 on success. * * **-ENOENT** if the bpf_local_storage cannot be found. */ #define ___BPF_FUNC_MAPPER(FN, ctx...) \ FN(unspec, 0, ##ctx) \ FN(map_lookup_elem, 1, ##ctx) \ FN(map_update_elem, 2, ##ctx) \ FN(map_delete_elem, 3, ##ctx) \ FN(probe_read, 4, ##ctx) \ FN(ktime_get_ns, 5, ##ctx) \ FN(trace_printk, 6, ##ctx) \ FN(get_prandom_u32, 7, ##ctx) \ FN(get_smp_processor_id, 8, ##ctx) \ FN(skb_store_bytes, 9, ##ctx) \ FN(l3_csum_replace, 10, ##ctx) \ FN(l4_csum_replace, 11, ##ctx) \ FN(tail_call, 12, ##ctx) \ FN(clone_redirect, 13, ##ctx) \ FN(get_current_pid_tgid, 14, ##ctx) \ FN(get_current_uid_gid, 15, ##ctx) \ FN(get_current_comm, 16, ##ctx) \ FN(get_cgroup_classid, 17, ##ctx) \ FN(skb_vlan_push, 18, ##ctx) \ FN(skb_vlan_pop, 19, ##ctx) \ FN(skb_get_tunnel_key, 20, ##ctx) \ FN(skb_set_tunnel_key, 21, ##ctx) \ FN(perf_event_read, 22, ##ctx) \ FN(redirect, 23, ##ctx) \ FN(get_route_realm, 24, ##ctx) \ FN(perf_event_output, 25, ##ctx) \ FN(skb_load_bytes, 26, ##ctx) \ FN(get_stackid, 27, ##ctx) \ FN(csum_diff, 28, ##ctx) \ FN(skb_get_tunnel_opt, 29, ##ctx) \ FN(skb_set_tunnel_opt, 30, ##ctx) \ FN(skb_change_proto, 31, ##ctx) \ FN(skb_change_type, 32, ##ctx) \ FN(skb_under_cgroup, 33, ##ctx) \ FN(get_hash_recalc, 34, ##ctx) \ FN(get_current_task, 35, ##ctx) \ FN(probe_write_user, 36, ##ctx) \ FN(current_task_under_cgroup, 37, ##ctx) \ FN(skb_change_tail, 38, ##ctx) \ FN(skb_pull_data, 39, ##ctx) \ FN(csum_update, 40, ##ctx) \ FN(set_hash_invalid, 41, ##ctx) \ FN(get_numa_node_id, 42, ##ctx) \ FN(skb_change_head, 43, ##ctx) \ FN(xdp_adjust_head, 44, ##ctx) \ FN(probe_read_str, 45, ##ctx) \ FN(get_socket_cookie, 46, ##ctx) \ FN(get_socket_uid, 47, ##ctx) \ FN(set_hash, 48, ##ctx) \ FN(setsockopt, 49, ##ctx) \ FN(skb_adjust_room, 50, ##ctx) \ FN(redirect_map, 51, ##ctx) \ FN(sk_redirect_map, 52, ##ctx) \ FN(sock_map_update, 53, ##ctx) \ FN(xdp_adjust_meta, 54, ##ctx) \ FN(perf_event_read_value, 55, ##ctx) \ FN(perf_prog_read_value, 56, ##ctx) \ FN(getsockopt, 57, ##ctx) \ FN(override_return, 58, ##ctx) \ FN(sock_ops_cb_flags_set, 59, ##ctx) \ FN(msg_redirect_map, 60, ##ctx) \ FN(msg_apply_bytes, 61, ##ctx) \ FN(msg_cork_bytes, 62, ##ctx) \ FN(msg_pull_data, 63, ##ctx) \ FN(bind, 64, ##ctx) \ FN(xdp_adjust_tail, 65, ##ctx) \ FN(skb_get_xfrm_state, 66, ##ctx) \ FN(get_stack, 67, ##ctx) \ FN(skb_load_bytes_relative, 68, ##ctx) \ FN(fib_lookup, 69, ##ctx) \ FN(sock_hash_update, 70, ##ctx) \ FN(msg_redirect_hash, 71, ##ctx) \ FN(sk_redirect_hash, 72, ##ctx) \ FN(lwt_push_encap, 73, ##ctx) \ FN(lwt_seg6_store_bytes, 74, ##ctx) \ FN(lwt_seg6_adjust_srh, 75, ##ctx) \ FN(lwt_seg6_action, 76, ##ctx) \ FN(rc_repeat, 77, ##ctx) \ FN(rc_keydown, 78, ##ctx) \ FN(skb_cgroup_id, 79, ##ctx) \ FN(get_current_cgroup_id, 80, ##ctx) \ FN(get_local_storage, 81, ##ctx) \ FN(sk_select_reuseport, 82, ##ctx) \ FN(skb_ancestor_cgroup_id, 83, ##ctx) \ FN(sk_lookup_tcp, 84, ##ctx) \ FN(sk_lookup_udp, 85, ##ctx) \ FN(sk_release, 86, ##ctx) \ FN(map_push_elem, 87, ##ctx) \ FN(map_pop_elem, 88, ##ctx) \ FN(map_peek_elem, 89, ##ctx) \ FN(msg_push_data, 90, ##ctx) \ FN(msg_pop_data, 91, ##ctx) \ FN(rc_pointer_rel, 92, ##ctx) \ FN(spin_lock, 93, ##ctx) \ FN(spin_unlock, 94, ##ctx) \ FN(sk_fullsock, 95, ##ctx) \ FN(tcp_sock, 96, ##ctx) \ FN(skb_ecn_set_ce, 97, ##ctx) \ FN(get_listener_sock, 98, ##ctx) \ FN(skc_lookup_tcp, 99, ##ctx) \ FN(tcp_check_syncookie, 100, ##ctx) \ FN(sysctl_get_name, 101, ##ctx) \ FN(sysctl_get_current_value, 102, ##ctx) \ FN(sysctl_get_new_value, 103, ##ctx) \ FN(sysctl_set_new_value, 104, ##ctx) \ FN(strtol, 105, ##ctx) \ FN(strtoul, 106, ##ctx) \ FN(sk_storage_get, 107, ##ctx) \ FN(sk_storage_delete, 108, ##ctx) \ FN(send_signal, 109, ##ctx) \ FN(tcp_gen_syncookie, 110, ##ctx) \ FN(skb_output, 111, ##ctx) \ FN(probe_read_user, 112, ##ctx) \ FN(probe_read_kernel, 113, ##ctx) \ FN(probe_read_user_str, 114, ##ctx) \ FN(probe_read_kernel_str, 115, ##ctx) \ FN(tcp_send_ack, 116, ##ctx) \ FN(send_signal_thread, 117, ##ctx) \ FN(jiffies64, 118, ##ctx) \ FN(read_branch_records, 119, ##ctx) \ FN(get_ns_current_pid_tgid, 120, ##ctx) \ FN(xdp_output, 121, ##ctx) \ FN(get_netns_cookie, 122, ##ctx) \ FN(get_current_ancestor_cgroup_id, 123, ##ctx) \ FN(sk_assign, 124, ##ctx) \ FN(ktime_get_boot_ns, 125, ##ctx) \ FN(seq_printf, 126, ##ctx) \ FN(seq_write, 127, ##ctx) \ FN(sk_cgroup_id, 128, ##ctx) \ FN(sk_ancestor_cgroup_id, 129, ##ctx) \ FN(ringbuf_output, 130, ##ctx) \ FN(ringbuf_reserve, 131, ##ctx) \ FN(ringbuf_submit, 132, ##ctx) \ FN(ringbuf_discard, 133, ##ctx) \ FN(ringbuf_query, 134, ##ctx) \ FN(csum_level, 135, ##ctx) \ FN(skc_to_tcp6_sock, 136, ##ctx) \ FN(skc_to_tcp_sock, 137, ##ctx) \ FN(skc_to_tcp_timewait_sock, 138, ##ctx) \ FN(skc_to_tcp_request_sock, 139, ##ctx) \ FN(skc_to_udp6_sock, 140, ##ctx) \ FN(get_task_stack, 141, ##ctx) \ FN(load_hdr_opt, 142, ##ctx) \ FN(store_hdr_opt, 143, ##ctx) \ FN(reserve_hdr_opt, 144, ##ctx) \ FN(inode_storage_get, 145, ##ctx) \ FN(inode_storage_delete, 146, ##ctx) \ FN(d_path, 147, ##ctx) \ FN(copy_from_user, 148, ##ctx) \ FN(snprintf_btf, 149, ##ctx) \ FN(seq_printf_btf, 150, ##ctx) \ FN(skb_cgroup_classid, 151, ##ctx) \ FN(redirect_neigh, 152, ##ctx) \ FN(per_cpu_ptr, 153, ##ctx) \ FN(this_cpu_ptr, 154, ##ctx) \ FN(redirect_peer, 155, ##ctx) \ FN(task_storage_get, 156, ##ctx) \ FN(task_storage_delete, 157, ##ctx) \ FN(get_current_task_btf, 158, ##ctx) \ FN(bprm_opts_set, 159, ##ctx) \ FN(ktime_get_coarse_ns, 160, ##ctx) \ FN(ima_inode_hash, 161, ##ctx) \ FN(sock_from_file, 162, ##ctx) \ FN(check_mtu, 163, ##ctx) \ FN(for_each_map_elem, 164, ##ctx) \ FN(snprintf, 165, ##ctx) \ FN(sys_bpf, 166, ##ctx) \ FN(btf_find_by_name_kind, 167, ##ctx) \ FN(sys_close, 168, ##ctx) \ FN(timer_init, 169, ##ctx) \ FN(timer_set_callback, 170, ##ctx) \ FN(timer_start, 171, ##ctx) \ FN(timer_cancel, 172, ##ctx) \ FN(get_func_ip, 173, ##ctx) \ FN(get_attach_cookie, 174, ##ctx) \ FN(task_pt_regs, 175, ##ctx) \ FN(get_branch_snapshot, 176, ##ctx) \ FN(trace_vprintk, 177, ##ctx) \ FN(skc_to_unix_sock, 178, ##ctx) \ FN(kallsyms_lookup_name, 179, ##ctx) \ FN(find_vma, 180, ##ctx) \ FN(loop, 181, ##ctx) \ FN(strncmp, 182, ##ctx) \ FN(get_func_arg, 183, ##ctx) \ FN(get_func_ret, 184, ##ctx) \ FN(get_func_arg_cnt, 185, ##ctx) \ FN(get_retval, 186, ##ctx) \ FN(set_retval, 187, ##ctx) \ FN(xdp_get_buff_len, 188, ##ctx) \ FN(xdp_load_bytes, 189, ##ctx) \ FN(xdp_store_bytes, 190, ##ctx) \ FN(copy_from_user_task, 191, ##ctx) \ FN(skb_set_tstamp, 192, ##ctx) \ FN(ima_file_hash, 193, ##ctx) \ FN(kptr_xchg, 194, ##ctx) \ FN(map_lookup_percpu_elem, 195, ##ctx) \ FN(skc_to_mptcp_sock, 196, ##ctx) \ FN(dynptr_from_mem, 197, ##ctx) \ FN(ringbuf_reserve_dynptr, 198, ##ctx) \ FN(ringbuf_submit_dynptr, 199, ##ctx) \ FN(ringbuf_discard_dynptr, 200, ##ctx) \ FN(dynptr_read, 201, ##ctx) \ FN(dynptr_write, 202, ##ctx) \ FN(dynptr_data, 203, ##ctx) \ FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx) \ FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx) \ FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx) \ FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx) \ FN(ktime_get_tai_ns, 208, ##ctx) \ FN(user_ringbuf_drain, 209, ##ctx) \ FN(cgrp_storage_get, 210, ##ctx) \ FN(cgrp_storage_delete, 211, ##ctx) \ /* */ /* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't * know or care about integer value that is now passed as second argument */ #define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name), #define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN) /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call */ #define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y, enum bpf_func_id { ___BPF_FUNC_MAPPER(__BPF_ENUM_FN) __BPF_FUNC_MAX_ID, }; #undef __BPF_ENUM_FN /* All flags used by eBPF helper functions, placed here. */ /* BPF_FUNC_skb_store_bytes flags. */ enum { BPF_F_RECOMPUTE_CSUM = (1ULL << 0), BPF_F_INVALIDATE_HASH = (1ULL << 1), }; /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. * First 4 bits are for passing the header field size. */ enum { BPF_F_HDR_FIELD_MASK = 0xfULL, }; /* BPF_FUNC_l4_csum_replace flags. */ enum { BPF_F_PSEUDO_HDR = (1ULL << 4), BPF_F_MARK_MANGLED_0 = (1ULL << 5), BPF_F_MARK_ENFORCE = (1ULL << 6), }; /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ enum { BPF_F_TUNINFO_IPV6 = (1ULL << 0), }; /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ enum { BPF_F_SKIP_FIELD_MASK = 0xffULL, BPF_F_USER_STACK = (1ULL << 8), /* flags used by BPF_FUNC_get_stackid only. */ BPF_F_FAST_STACK_CMP = (1ULL << 9), BPF_F_REUSE_STACKID = (1ULL << 10), /* flags used by BPF_FUNC_get_stack only. */ BPF_F_USER_BUILD_ID = (1ULL << 11), }; /* BPF_FUNC_skb_set_tunnel_key flags. */ enum { BPF_F_ZERO_CSUM_TX = (1ULL << 1), BPF_F_DONT_FRAGMENT = (1ULL << 2), BPF_F_SEQ_NUMBER = (1ULL << 3), BPF_F_NO_TUNNEL_KEY = (1ULL << 4), }; /* BPF_FUNC_skb_get_tunnel_key flags. */ enum { BPF_F_TUNINFO_FLAGS = (1ULL << 4), }; /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. */ enum { BPF_F_INDEX_MASK = 0xffffffffULL, BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, /* BPF_FUNC_perf_event_output for sk_buff input context. */ BPF_F_CTXLEN_MASK = (0xfffffULL << 32), }; /* Current network namespace */ enum { BPF_F_CURRENT_NETNS = (-1L), }; /* BPF_FUNC_csum_level level values. */ enum { BPF_CSUM_LEVEL_QUERY, BPF_CSUM_LEVEL_INC, BPF_CSUM_LEVEL_DEC, BPF_CSUM_LEVEL_RESET, }; /* BPF_FUNC_skb_adjust_room flags. */ enum { BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6), BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7), BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8), }; enum { BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, }; #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ BPF_ADJ_ROOM_ENCAP_L2_MASK) \ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) /* BPF_FUNC_sysctl_get_name flags. */ enum { BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), }; /* BPF_FUNC__storage_get flags */ enum { BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0), /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead. */ BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE, }; /* BPF_FUNC_read_branch_records flags. */ enum { BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), }; /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and * BPF_FUNC_bpf_ringbuf_output flags. */ enum { BPF_RB_NO_WAKEUP = (1ULL << 0), BPF_RB_FORCE_WAKEUP = (1ULL << 1), }; /* BPF_FUNC_bpf_ringbuf_query flags */ enum { BPF_RB_AVAIL_DATA = 0, BPF_RB_RING_SIZE = 1, BPF_RB_CONS_POS = 2, BPF_RB_PROD_POS = 3, }; /* BPF ring buffer constants */ enum { BPF_RINGBUF_BUSY_BIT = (1U << 31), BPF_RINGBUF_DISCARD_BIT = (1U << 30), BPF_RINGBUF_HDR_SZ = 8, }; /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ enum { BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), }; /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, BPF_ADJ_ROOM_MAC, }; /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ enum bpf_hdr_start_off { BPF_HDR_START_MAC, BPF_HDR_START_NET, }; /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ enum bpf_lwt_encap_mode { BPF_LWT_ENCAP_SEG6, BPF_LWT_ENCAP_SEG6_INLINE, BPF_LWT_ENCAP_IP, }; /* Flags for bpf_bprm_opts_set helper */ enum { BPF_F_BPRM_SECUREEXEC = (1ULL << 0), }; /* Flags for bpf_redirect and bpf_redirect_map helpers */ enum { BPF_F_INGRESS = (1ULL << 0), /* used for skb path */ BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */ BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */ #define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS) }; #define __bpf_md_ptr(type, name) \ union { \ type name; \ __u64 :64; \ } __attribute__((aligned(8))) /* The enum used in skb->tstamp_type. It specifies the clock type * of the time stored in the skb->tstamp. */ enum { BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */ BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */ BPF_SKB_CLOCK_REALTIME = 0, BPF_SKB_CLOCK_MONOTONIC = 1, BPF_SKB_CLOCK_TAI = 2, /* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle, * the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid. */ }; /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ struct __sk_buff { __u32 len; __u32 pkt_type; __u32 mark; __u32 queue_mapping; __u32 protocol; __u32 vlan_present; __u32 vlan_tci; __u32 vlan_proto; __u32 priority; __u32 ingress_ifindex; __u32 ifindex; __u32 tc_index; __u32 cb[5]; __u32 hash; __u32 tc_classid; __u32 data; __u32 data_end; __u32 napi_id; /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ /* ... here. */ __u32 data_meta; __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); __u64 tstamp; __u32 wire_len; __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; struct bpf_tunnel_key { __u32 tunnel_id; union { __u32 remote_ipv4; __u32 remote_ipv6[4]; }; __u8 tunnel_tos; __u8 tunnel_ttl; union { __u16 tunnel_ext; /* compat */ __be16 tunnel_flags; }; __u32 tunnel_label; union { __u32 local_ipv4; __u32 local_ipv6[4]; }; }; /* user accessible mirror of in-kernel xfrm_state. * new fields can only be added to the end of this structure */ struct bpf_xfrm_state { __u32 reqid; __u32 spi; /* Stored in network byte order */ __u16 family; __u16 ext; /* Padding, future use. */ union { __u32 remote_ipv4; /* Stored in network byte order */ __u32 remote_ipv6[4]; /* Stored in network byte order */ }; }; /* Generic BPF return codes which all BPF program types may support. * The values are binary compatible with their TC_ACT_* counter-part to * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT * programs. * * XDP is handled seprately, see XDP_*. */ enum bpf_ret_code { BPF_OK = 0, /* 1 reserved */ BPF_DROP = 2, /* 3-6 reserved */ BPF_REDIRECT = 7, /* >127 are reserved for prog type specific return codes. * * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been * changed and should be routed based on its new L3 header. * (This is an L3 redirect, as opposed to L2 redirect * represented by BPF_REDIRECT above). */ BPF_LWT_REROUTE = 128, /* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR * to indicate that no custom dissection was performed, and * fallback to standard dissector is requested. */ BPF_FLOW_DISSECTOR_CONTINUE = 129, }; struct bpf_sock { __u32 bound_dev_if; __u32 family; __u32 type; __u32 protocol; __u32 mark; __u32 priority; /* IP address also allows 1 and 2 bytes access */ __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; /* host byte order */ __be16 dst_port; /* network byte order */ __u16 :16; /* zero padding */ __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; __s32 rx_queue_mapping; }; struct bpf_tcp_sock { __u32 snd_cwnd; /* Sending congestion window */ __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ __u32 rtt_min; __u32 snd_ssthresh; /* Slow start size threshold */ __u32 rcv_nxt; /* What we want to receive next */ __u32 snd_nxt; /* Next sequence we send */ __u32 snd_una; /* First byte we want an ack for */ __u32 mss_cache; /* Cached effective mss, not including SACKS */ __u32 ecn_flags; /* ECN status bits. */ __u32 rate_delivered; /* saved rate sample: packets delivered */ __u32 rate_interval_us; /* saved rate sample: time elapsed */ __u32 packets_out; /* Packets which are "in flight" */ __u32 retrans_out; /* Retransmitted packets out */ __u32 total_retrans; /* Total retransmits for entire connection */ __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn * total number of segments in. */ __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn * total number of data segments in. */ __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut * The total number of segments sent. */ __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut * total number of data segments sent. */ __u32 lost_out; /* Lost packets */ __u32 sacked_out; /* SACK'd packets */ __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived * sum(delta(rcv_nxt)), or how many bytes * were acked. */ __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked * sum(delta(snd_una)), or how many bytes * were acked. */ __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups * total number of DSACK blocks received */ __u32 delivered; /* Total data packets delivered incl. rexmits */ __u32 delivered_ce; /* Like the above but only ECE marked packets */ __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ }; struct bpf_sock_tuple { union { struct { __be32 saddr; __be32 daddr; __be16 sport; __be16 dport; } ipv4; struct { __be32 saddr[4]; __be32 daddr[4]; __be16 sport; __be16 dport; } ipv6; }; }; /* (Simplified) user return codes for tcx prog type. * A valid tcx program must return one of these defined values. All other * return codes are reserved for future use. Must remain compatible with * their TC_ACT_* counter-parts. For compatibility in behavior, unknown * return codes are mapped to TCX_NEXT. */ enum tcx_action_base { TCX_NEXT = -1, TCX_PASS = 0, TCX_DROP = 2, TCX_REDIRECT = 7, }; struct bpf_xdp_sock { __u32 queue_id; }; #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. * A valid XDP program must return one of these defined values. All other * return codes are reserved for future use. Unknown return codes will * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). */ enum xdp_action { XDP_ABORTED = 0, XDP_DROP, XDP_PASS, XDP_TX, XDP_REDIRECT, }; /* user accessible metadata for XDP packet hook * new fields must be added to the end of this structure */ struct xdp_md { __u32 data; __u32 data_end; __u32 data_meta; /* Below access go through struct xdp_rxq_info */ __u32 ingress_ifindex; /* rxq->dev->ifindex */ __u32 rx_queue_index; /* rxq->queue_index */ __u32 egress_ifindex; /* txq->dev->ifindex */ }; /* DEVMAP map-value layout * * The struct data-layout of map-value is a configuration interface. * New members can only be added to the end of this structure. */ struct bpf_devmap_val { __u32 ifindex; /* device index */ union { int fd; /* prog fd on map write */ __u32 id; /* prog id on map read */ } bpf_prog; }; /* CPUMAP map-value layout * * The struct data-layout of map-value is a configuration interface. * New members can only be added to the end of this structure. */ struct bpf_cpumap_val { __u32 qsize; /* queue size to remote target CPU */ union { int fd; /* prog fd on map write */ __u32 id; /* prog id on map read */ } bpf_prog; }; enum sk_action { SK_DROP = 0, SK_PASS, }; /* user accessible metadata for SK_MSG packet hook, new fields must * be added to the end of this structure */ struct sk_msg_md { __bpf_md_ptr(void *, data); __bpf_md_ptr(void *, data_end); __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 size; /* Total size of sk_msg */ __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ }; struct sk_reuseport_md { /* * Start of directly accessible data. It begins from * the tcp/udp header. */ __bpf_md_ptr(void *, data); /* End of directly accessible data */ __bpf_md_ptr(void *, data_end); /* * Total length of packet (starting from the tcp/udp header). * Note that the directly accessible bytes (data_end - data) * could be less than this "len". Those bytes could be * indirectly read by a helper "bpf_skb_load_bytes()". */ __u32 len; /* * Eth protocol in the mac header (network byte order). e.g. * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) */ __u32 eth_protocol; __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ /* When reuse->migrating_sk is NULL, it is selecting a sk for the * new incoming connection request (e.g. selecting a listen sk for * the received SYN in the TCP case). reuse->sk is one of the sk * in the reuseport group. The bpf prog can use reuse->sk to learn * the local listening ip/port without looking into the skb. * * When reuse->migrating_sk is not NULL, reuse->sk is closed and * reuse->migrating_sk is the socket that needs to be migrated * to another listening socket. migrating_sk could be a fullsock * sk that is fully established or a reqsk that is in-the-middle * of 3-way handshake. */ __bpf_md_ptr(struct bpf_sock *, sk); __bpf_md_ptr(struct bpf_sock *, migrating_sk); }; #define BPF_TAG_SIZE 8 struct bpf_prog_info { __u32 type; __u32 id; __u8 tag[BPF_TAG_SIZE]; __u32 jited_prog_len; __u32 xlated_prog_len; __aligned_u64 jited_prog_insns; __aligned_u64 xlated_prog_insns; __u64 load_time; /* ns since boottime */ __u32 created_by_uid; __u32 nr_map_ids; __aligned_u64 map_ids; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; __u32 :31; /* alignment pad */ __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; __u32 nr_jited_func_lens; __aligned_u64 jited_ksyms; __aligned_u64 jited_func_lens; __u32 btf_id; __u32 func_info_rec_size; __aligned_u64 func_info; __u32 nr_func_info; __u32 nr_line_info; __aligned_u64 line_info; __aligned_u64 jited_line_info; __u32 nr_jited_line_info; __u32 line_info_rec_size; __u32 jited_line_info_rec_size; __u32 nr_prog_tags; __aligned_u64 prog_tags; __u64 run_time_ns; __u64 run_cnt; __u64 recursion_misses; __u32 verified_insns; __u32 attach_btf_obj_id; __u32 attach_btf_id; } __attribute__((aligned(8))); struct bpf_map_info { __u32 type; __u32 id; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 btf_vmlinux_value_type_id; __u64 netns_dev; __u64 netns_ino; __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 btf_vmlinux_id; __u64 map_extra; } __attribute__((aligned(8))); struct bpf_btf_info { __aligned_u64 btf; __u32 btf_size; __u32 id; __aligned_u64 name; __u32 name_len; __u32 kernel_btf; } __attribute__((aligned(8))); struct bpf_link_info { __u32 type; __u32 id; __u32 prog_id; union { struct { __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ __u32 tp_name_len; /* in/out: tp_name buffer len */ } raw_tracepoint; struct { __u32 attach_type; __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */ __u32 target_btf_id; /* BTF type id inside the object */ } tracing; struct { __u64 cgroup_id; __u32 attach_type; } cgroup; struct { __aligned_u64 target_name; /* in/out: target_name buffer ptr */ __u32 target_name_len; /* in/out: target_name buffer len */ /* If the iter specific field is 32 bits, it can be put * in the first or second union. Otherwise it should be * put in the second union. */ union { struct { __u32 map_id; } map; }; union { struct { __u64 cgroup_id; __u32 order; } cgroup; struct { __u32 tid; __u32 pid; } task; }; } iter; struct { __u32 netns_ino; __u32 attach_type; } netns; struct { __u32 ifindex; } xdp; struct { __u32 map_id; } struct_ops; struct { __u32 pf; __u32 hooknum; __s32 priority; __u32 flags; } netfilter; struct { __aligned_u64 addrs; __u32 count; /* in/out: kprobe_multi function count */ __u32 flags; __u64 missed; __aligned_u64 cookies; } kprobe_multi; struct { __aligned_u64 path; __aligned_u64 offsets; __aligned_u64 ref_ctr_offsets; __aligned_u64 cookies; __u32 path_size; /* in/out: real path size on success, including zero byte */ __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */ __u32 flags; __u32 pid; } uprobe_multi; struct { __u32 type; /* enum bpf_perf_event_type */ __u32 :32; union { struct { __aligned_u64 file_name; /* in/out */ __u32 name_len; __u32 offset; /* offset from file_name */ __u64 cookie; } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */ struct { __aligned_u64 func_name; /* in/out */ __u32 name_len; __u32 offset; /* offset from func_name */ __u64 addr; __u64 missed; __u64 cookie; } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */ struct { __aligned_u64 tp_name; /* in/out */ __u32 name_len; __u32 :32; __u64 cookie; } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */ struct { __u64 config; __u32 type; __u32 :32; __u64 cookie; } event; /* BPF_PERF_EVENT_EVENT */ }; } perf_event; struct { __u32 ifindex; __u32 attach_type; } tcx; struct { __u32 ifindex; __u32 attach_type; } netkit; struct { __u32 map_id; __u32 attach_type; } sockmap; }; } __attribute__((aligned(8))); /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on * attach type). */ struct bpf_sock_addr { __u32 user_family; /* Allows 4-byte read, but no write. */ __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. * Stored in network byte order. */ __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order */ __u32 family; /* Allows 4-byte read, but no write */ __u32 type; /* Allows 4-byte read, but no write */ __u32 protocol; /* Allows 4-byte read, but no write */ __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. * Stored in network byte order. */ __bpf_md_ptr(struct bpf_sock *, sk); }; /* User bpf_sock_ops struct to access socket values and specify request ops * and their replies. * Some of this fields are in network (bigendian) byte order and may need * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). * New fields can only be added at the end of this structure */ struct bpf_sock_ops { __u32 op; union { __u32 args[4]; /* Optionally passed to bpf program */ __u32 reply; /* Returned by bpf program */ __u32 replylong[4]; /* Optionally returned by bpf prog */ }; __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 is_fullsock; /* Some TCP fields are only valid if * there is a full socket. If not, the * fields read as zero. */ __u32 snd_cwnd; __u32 srtt_us; /* Averaged RTT << 3 in usecs */ __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ __u32 state; __u32 rtt_min; __u32 snd_ssthresh; __u32 rcv_nxt; __u32 snd_nxt; __u32 snd_una; __u32 mss_cache; __u32 ecn_flags; __u32 rate_delivered; __u32 rate_interval_us; __u32 packets_out; __u32 retrans_out; __u32 total_retrans; __u32 segs_in; __u32 data_segs_in; __u32 segs_out; __u32 data_segs_out; __u32 lost_out; __u32 sacked_out; __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; __bpf_md_ptr(struct bpf_sock *, sk); /* [skb_data, skb_data_end) covers the whole TCP header. * * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the * header has not been written. * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have * been written so far. * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes * the 3WHS. * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes * the 3WHS. * * bpf_load_hdr_opt() can also be used to read a particular option. */ __bpf_md_ptr(void *, skb_data); __bpf_md_ptr(void *, skb_data_end); __u32 skb_len; /* The total length of a packet. * It includes the header, options, * and payload. */ __u32 skb_tcp_flags; /* tcp_flags of the header. It provides * an easy way to check for tcp_flags * without parsing skb_data. * * In particular, the skb_tcp_flags * will still be available in * BPF_SOCK_OPS_HDR_OPT_LEN even though * the outgoing header has not * been written yet. */ __u64 skb_hwtstamp; }; /* Definitions for bpf_sock_ops_cb_flags */ enum { BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), /* Call bpf for all received TCP headers. The bpf prog will be * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB * * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB * for the header option related helpers that will be useful * to the bpf programs. * * It could be used at the client/active side (i.e. connect() side) * when the server told it that the server was in syncookie * mode and required the active side to resend the bpf-written * options. The active side can keep writing the bpf-options until * it received a valid packet from the server side to confirm * the earlier packet (and options) has been received. The later * example patch is using it like this at the active side when the * server is in syncookie mode. * * The bpf prog will usually turn this off in the common cases. */ BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4), /* Call bpf when kernel has received a header option that * the kernel cannot handle. The bpf prog will be called under * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB. * * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB * for the header option related helpers that will be useful * to the bpf programs. */ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5), /* Call bpf when the kernel is writing header options for the * outgoing packet. The bpf prog will first be called * to reserve space in a skb under * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then * the bpf prog will be called to write the header option(s) * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB. * * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option * related helpers that will be useful to the bpf programs. * * The kernel gets its chance to reserve space and write * options first before the BPF program does. */ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6), /* Mask of all currently supported cb flags */ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, }; /* List of known BPF sock_ops operators. * New entries can only be added at the end */ enum { BPF_SOCK_OPS_VOID, BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or * -1 if default value should be used */ BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized * window (in packets) or -1 if default * value should be used */ BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an * active connection is initialized */ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an * active connection is * established */ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a * passive connection is * established */ BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control * needs ECN */ BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is * based on the path and may be * dependent on the congestion control * algorithm. In general it indicates * a congestion threshold. RTTs above * this indicate congestion */ BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. * Arg1: value of icsk_retransmits * Arg2: value of icsk_rto * Arg3: whether RTO has expired */ BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. * Arg1: sequence number of 1st byte * Arg2: # segments * Arg3: return value of * tcp_transmit_skb (0 => success) */ BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. * Arg1: old_state * Arg2: new_state */ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after * socket transition to LISTEN state. */ BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. * Arg1: measured RTT input (mrtt) * Arg2: updated srtt */ BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option. * It will be called to handle * the packets received at * an already established * connection. * * sock_ops->skb_data: * Referring to the received skb. * It covers the TCP header only. * * bpf_load_hdr_opt() can also * be used to search for a * particular option. */ BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the * header option later in * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. * Arg1: bool want_cookie. (in * writing SYNACK only) * * sock_ops->skb_data: * Not available because no header has * been written yet. * * sock_ops->skb_tcp_flags: * The tcp_flags of the * outgoing skb. (e.g. SYN, ACK, FIN). * * bpf_reserve_hdr_opt() should * be used to reserve space. */ BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options * Arg1: bool want_cookie. (in * writing SYNACK only) * * sock_ops->skb_data: * Referring to the outgoing skb. * It covers the TCP header * that has already been written * by the kernel and the * earlier bpf-progs. * * sock_ops->skb_tcp_flags: * The tcp_flags of the outgoing * skb. (e.g. SYN, ACK, FIN). * * bpf_store_hdr_opt() should * be used to write the * option. * * bpf_load_hdr_opt() can also * be used to search for a * particular option that * has already been written * by the kernel or the * earlier bpf-progs. */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect * changes between the TCP and BPF versions. Ideally this should never happen. * If it does, we need to add code to convert them before calling * the BPF sock_ops function. */ enum { BPF_TCP_ESTABLISHED = 1, BPF_TCP_SYN_SENT, BPF_TCP_SYN_RECV, BPF_TCP_FIN_WAIT1, BPF_TCP_FIN_WAIT2, BPF_TCP_TIME_WAIT, BPF_TCP_CLOSE, BPF_TCP_CLOSE_WAIT, BPF_TCP_LAST_ACK, BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ BPF_TCP_NEW_SYN_RECV, BPF_TCP_BOUND_INACTIVE, BPF_TCP_MAX_STATES /* Leave at the end! */ }; enum { TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */ TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */ /* Copy the SYN pkt to optval * * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit * to only getting from the saved_syn. It can either get the * syn packet from: * * 1. the just-received SYN packet (only available when writing the * SYNACK). It will be useful when it is not necessary to * save the SYN packet for latter use. It is also the only way * to get the SYN during syncookie mode because the syn * packet cannot be saved during syncookie. * * OR * * 2. the earlier saved syn which was done by * bpf_setsockopt(TCP_SAVE_SYN). * * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the * SYN packet is obtained. * * If the bpf-prog does not need the IP[46] header, the * bpf-prog can avoid parsing the IP header by using * TCP_BPF_SYN. Otherwise, the bpf-prog can get both * IP[46] and TCP header by using TCP_BPF_SYN_IP. * * >0: Total number of bytes copied * -ENOSPC: Not enough space in optval. Only optlen number of * bytes is copied. * -ENOENT: The SYN skb is not available now and the earlier SYN pkt * is not saved by setsockopt(TCP_SAVE_SYN). */ TCP_BPF_SYN = 1005, /* Copy the TCP header */ TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */ }; enum { BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0), }; /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. */ enum { BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the * total option spaces * required for an established * sk in order to calculate the * MSS. No skb is actually * sent. */ BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode * when sending a SYN. */ }; struct bpf_perf_event_value { __u64 counter; __u64 enabled; __u64 running; }; enum { BPF_DEVCG_ACC_MKNOD = (1ULL << 0), BPF_DEVCG_ACC_READ = (1ULL << 1), BPF_DEVCG_ACC_WRITE = (1ULL << 2), }; enum { BPF_DEVCG_DEV_BLOCK = (1ULL << 0), BPF_DEVCG_DEV_CHAR = (1ULL << 1), }; struct bpf_cgroup_dev_ctx { /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ __u32 access_type; __u32 major; __u32 minor; }; struct bpf_raw_tracepoint_args { __u64 args[0]; }; /* DIRECT: Skip the FIB rules and go to FIB table associated with device * OUTPUT: Do lookup from egress perspective; default is ingress */ enum { BPF_FIB_LOOKUP_DIRECT = (1U << 0), BPF_FIB_LOOKUP_OUTPUT = (1U << 1), BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), BPF_FIB_LOOKUP_TBID = (1U << 3), BPF_FIB_LOOKUP_SRC = (1U << 4), BPF_FIB_LOOKUP_MARK = (1U << 5), }; enum { BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */ }; struct bpf_fib_lookup { /* input: network family for lookup (AF_INET, AF_INET6) * output: network family of egress nexthop */ __u8 family; /* set if lookup is to consider L4 data - e.g., FIB rules */ __u8 l4_protocol; __be16 sport; __be16 dport; union { /* used for MTU check */ /* input to lookup */ __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */ /* output: MTU value */ __u16 mtu_result; } __attribute__((packed, aligned(2))); /* input: L3 device index for lookup * output: device index from FIB lookup */ __u32 ifindex; union { /* inputs to lookup */ __u8 tos; /* AF_INET */ __be32 flowinfo; /* AF_INET6, flow_label + priority */ /* output: metric of fib result (IPv4/IPv6 only) */ __u32 rt_metric; }; /* input: source address to consider for lookup * output: source address result from lookup */ union { __be32 ipv4_src; __u32 ipv6_src[4]; /* in6_addr; network order */ }; /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in * network header. output: bpf_fib_lookup sets to gateway address * if FIB lookup returns gateway route */ union { __be32 ipv4_dst; __u32 ipv6_dst[4]; /* in6_addr; network order */ }; union { struct { /* output */ __be16 h_vlan_proto; __be16 h_vlan_TCI; }; /* input: when accompanied with the * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a * specific routing table to use for the fib lookup. */ __u32 tbid; }; union { /* input */ struct { __u32 mark; /* policy routing */ /* 2 4-byte holes for input */ }; /* output: source and dest mac */ struct { __u8 smac[6]; /* ETH_ALEN */ __u8 dmac[6]; /* ETH_ALEN */ }; }; }; struct bpf_redir_neigh { /* network family for lookup (AF_INET, AF_INET6) */ __u32 nh_family; /* network address of nexthop; skips fib lookup to find gateway */ union { __be32 ipv4_nh; __u32 ipv6_nh[4]; /* in6_addr; network order */ }; }; /* bpf_check_mtu flags*/ enum bpf_check_mtu_flags { BPF_MTU_CHK_SEGS = (1U << 0), }; enum bpf_check_mtu_ret { BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */ BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */ BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */ }; enum bpf_task_fd_type { BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ BPF_FD_TYPE_TRACEPOINT, /* tp name */ BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ BPF_FD_TYPE_UPROBE, /* filename + offset */ BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; enum { BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), }; struct bpf_flow_keys { __u16 nhoff; __u16 thoff; __u16 addr_proto; /* ETH_P_* of valid addrs */ __u8 is_frag; __u8 is_first_frag; __u8 is_encap; __u8 ip_proto; __be16 n_proto; __be16 sport; __be16 dport; union { struct { __be32 ipv4_src; __be32 ipv4_dst; }; struct { __u32 ipv6_src[4]; /* in6_addr; network order */ __u32 ipv6_dst[4]; /* in6_addr; network order */ }; }; __u32 flags; __be32 flow_label; }; struct bpf_func_info { __u32 insn_off; __u32 type_id; }; #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) struct bpf_line_info { __u32 insn_off; __u32 file_name_off; __u32 line_off; __u32 line_col; }; struct bpf_spin_lock { __u32 val; }; struct bpf_timer { __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_wq { __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_dynptr { __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_list_head { __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_list_node { __u64 __opaque[3]; } __attribute__((aligned(8))); struct bpf_rb_root { __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_rb_node { __u64 __opaque[4]; } __attribute__((aligned(8))); struct bpf_refcount { __u32 __opaque[1]; } __attribute__((aligned(4))); struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. */ __u32 file_pos; /* Sysctl file position to read from, write to. * Allows 1,2,4-byte read an 4-byte write. */ }; struct bpf_sockopt { __bpf_md_ptr(struct bpf_sock *, sk); __bpf_md_ptr(void *, optval); __bpf_md_ptr(void *, optval_end); __s32 level; __s32 optname; __s32 optlen; __s32 retval; }; struct bpf_pidns_info { __u32 pid; __u32 tgid; }; /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ struct bpf_sk_lookup { union { __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */ }; __u32 family; /* Protocol family (AF_INET, AF_INET6) */ __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ __u32 remote_ip4; /* Network byte order */ __u32 remote_ip6[4]; /* Network byte order */ __be16 remote_port; /* Network byte order */ __u16 :16; /* Zero padding */ __u32 local_ip4; /* Network byte order */ __u32 local_ip6[4]; /* Network byte order */ __u32 local_port; /* Host byte order */ __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */ }; /* * struct btf_ptr is used for typed pointer representation; the * type id is used to render the pointer data as the appropriate type * via the bpf_snprintf_btf() helper described above. A flags field - * potentially to specify additional details about the BTF pointer * (rather than its mode of display) - is included for future use. * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately. */ struct btf_ptr { void *ptr; __u32 type_id; __u32 flags; /* BTF ptr flags; unused at present. */ }; /* * Flags to control bpf_snprintf_btf() behaviour. * - BTF_F_COMPACT: no formatting around type information * - BTF_F_NONAME: no struct/union member names/types * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values; * equivalent to %px. * - BTF_F_ZERO: show zero-valued struct/union members; they * are not displayed by default */ enum { BTF_F_COMPACT = (1ULL << 0), BTF_F_NONAME = (1ULL << 1), BTF_F_PTR_RAW = (1ULL << 2), BTF_F_ZERO = (1ULL << 3), }; /* bpf_core_relo_kind encodes which aspect of captured field/type/enum value * has to be adjusted by relocations. It is emitted by llvm and passed to * libbpf and later to the kernel. */ enum bpf_core_relo_kind { BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */ BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */ BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */ BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */ BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */ BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */ }; /* * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf * and from libbpf to the kernel. * * CO-RE relocation captures the following data: * - insn_off - instruction offset (in bytes) within a BPF program that needs * its insn->imm field to be relocated with actual field info; * - type_id - BTF type ID of the "root" (containing) entity of a relocatable * type or field; * - access_str_off - offset into corresponding .BTF string section. String * interpretation depends on specific relocation kind: * - for field-based relocations, string encodes an accessed field using * a sequence of field and array indices, separated by colon (:). It's * conceptually very close to LLVM's getelementptr ([0]) instruction's * arguments for identifying offset to a field. * - for type-based relocations, strings is expected to be just "0"; * - for enum value-based relocations, string contains an index of enum * value within its enum type; * - kind - one of enum bpf_core_relo_kind; * * Example: * struct sample { * int a; * struct { * int b[10]; * }; * }; * * struct sample *s = ...; * int *x = &s->a; // encoded as "0:0" (a is field #0) * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, * // b is field #0 inside anon struct, accessing elem #5) * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) * * type_id for all relocs in this example will capture BTF type id of * `struct sample`. * * Such relocation is emitted when using __builtin_preserve_access_index() * Clang built-in, passing expression that captures field address, e.g.: * * bpf_probe_read(&dst, sizeof(dst), * __builtin_preserve_access_index(&src->a.b.c)); * * In this case Clang will emit field relocation recording necessary data to * be able to find offset of embedded `a.b.c` field within `src` struct. * * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction */ struct bpf_core_relo { __u32 insn_off; __u32 type_id; __u32 access_str_off; enum bpf_core_relo_kind kind; }; /* * Flags to control bpf_timer_start() behaviour. * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is * relative to current time. * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller. */ enum { BPF_F_TIMER_ABS = (1ULL << 0), BPF_F_TIMER_CPU_PIN = (1ULL << 1), }; /* BPF numbers iterator state */ struct bpf_iter_num { /* opaque iterator state; having __u64 here allows to preserve correct * alignment requirements in vmlinux.h, generated from BTF */ __u64 __opaque[1]; } __attribute__((aligned(8))); /* * Flags to control BPF kfunc behaviour. * - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective * helper documentation for details.) */ enum bpf_kfunc_flags { BPF_F_PAD_ZEROS = (1ULL << 0), }; #endif /* _UAPI__LINUX_BPF_H__ */ xdp-tools-1.5.4/lib/libbpf/include/uapi/linux/pkt_sched.h0000644000175100001660000006042214706536574022703 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __LINUX_PKT_SCHED_H #define __LINUX_PKT_SCHED_H #include /* Logical priority bands not depending on specific packet scheduler. Every scheduler will map them to real traffic classes, if it has no more precise mechanism to classify packets. These numbers have no special meaning, though their coincidence with obsolete IPv6 values is not occasional :-). New IPv6 drafts preferred full anarchy inspired by diffserv group. Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy class, actually, as rule it will be handled with more care than filler or even bulk. */ #define TC_PRIO_BESTEFFORT 0 #define TC_PRIO_FILLER 1 #define TC_PRIO_BULK 2 #define TC_PRIO_INTERACTIVE_BULK 4 #define TC_PRIO_INTERACTIVE 6 #define TC_PRIO_CONTROL 7 #define TC_PRIO_MAX 15 /* Generic queue statistics, available for all the elements. Particular schedulers may have also their private records. */ struct tc_stats { __u64 bytes; /* Number of enqueued bytes */ __u32 packets; /* Number of enqueued packets */ __u32 drops; /* Packets dropped because of lack of resources */ __u32 overlimits; /* Number of throttle events when this * flow goes out of allocated bandwidth */ __u32 bps; /* Current flow byte rate */ __u32 pps; /* Current flow packet rate */ __u32 qlen; __u32 backlog; }; struct tc_estimator { signed char interval; unsigned char ewma_log; }; /* "Handles" --------- All the traffic control objects have 32bit identifiers, or "handles". They can be considered as opaque numbers from user API viewpoint, but actually they always consist of two fields: major and minor numbers, which are interpreted by kernel specially, that may be used by applications, though not recommended. F.e. qdisc handles always have minor number equal to zero, classes (or flows) have major equal to parent qdisc major, and minor uniquely identifying class inside qdisc. Macros to manipulate handles: */ #define TC_H_MAJ_MASK (0xFFFF0000U) #define TC_H_MIN_MASK (0x0000FFFFU) #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK)) #define TC_H_UNSPEC (0U) #define TC_H_ROOT (0xFFFFFFFFU) #define TC_H_INGRESS (0xFFFFFFF1U) #define TC_H_CLSACT TC_H_INGRESS #define TC_H_MIN_PRIORITY 0xFFE0U #define TC_H_MIN_INGRESS 0xFFF2U #define TC_H_MIN_EGRESS 0xFFF3U /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ enum tc_link_layer { TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ TC_LINKLAYER_ETHERNET, TC_LINKLAYER_ATM, }; #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ struct tc_ratespec { unsigned char cell_log; __u8 linklayer; /* lower 4 bits */ unsigned short overhead; short cell_align; unsigned short mpu; __u32 rate; }; #define TC_RTAB_SIZE 1024 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; }; enum { TCA_STAB_UNSPEC, TCA_STAB_BASE, TCA_STAB_DATA, __TCA_STAB_MAX }; #define TCA_STAB_MAX (__TCA_STAB_MAX - 1) /* FIFO section */ struct tc_fifo_qopt { __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ }; /* SKBPRIO section */ /* * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1). * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able * to map one to one the DS field of IPV4 and IPV6 headers. * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY. */ #define SKBPRIO_MAX_PRIORITY 64 struct tc_skbprio_qopt { __u32 limit; /* Queue length in packets. */ }; /* PRIO section */ #define TCQ_PRIO_BANDS 16 #define TCQ_MIN_PRIO_BANDS 2 struct tc_prio_qopt { int bands; /* Number of bands */ __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ }; /* MULTIQ section */ struct tc_multiq_qopt { __u16 bands; /* Number of bands */ __u16 max_bands; /* Maximum number of queues */ }; /* PLUG section */ #define TCQ_PLUG_BUFFER 0 #define TCQ_PLUG_RELEASE_ONE 1 #define TCQ_PLUG_RELEASE_INDEFINITE 2 #define TCQ_PLUG_LIMIT 3 struct tc_plug_qopt { /* TCQ_PLUG_BUFFER: Inset a plug into the queue and * buffer any incoming packets * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head * to beginning of the next plug. * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. * Stop buffering packets until the next TCQ_PLUG_BUFFER * command is received (just act as a pass-thru queue). * TCQ_PLUG_LIMIT: Increase/decrease queue size */ int action; __u32 limit; }; /* TBF section */ struct tc_tbf_qopt { struct tc_ratespec rate; struct tc_ratespec peakrate; __u32 limit; __u32 buffer; __u32 mtu; }; enum { TCA_TBF_UNSPEC, TCA_TBF_PARMS, TCA_TBF_RTAB, TCA_TBF_PTAB, TCA_TBF_RATE64, TCA_TBF_PRATE64, TCA_TBF_BURST, TCA_TBF_PBURST, TCA_TBF_PAD, __TCA_TBF_MAX, }; #define TCA_TBF_MAX (__TCA_TBF_MAX - 1) /* TEQL section */ /* TEQL does not require any parameters */ /* SFQ section */ struct tc_sfq_qopt { unsigned quantum; /* Bytes per round allocated to flow */ int perturb_period; /* Period of hash perturbation */ __u32 limit; /* Maximal packets in queue */ unsigned divisor; /* Hash divisor */ unsigned flows; /* Maximal number of flows */ }; struct tc_sfqred_stats { __u32 prob_drop; /* Early drops, below max threshold */ __u32 forced_drop; /* Early drops, after max threshold */ __u32 prob_mark; /* Marked packets, below max threshold */ __u32 forced_mark; /* Marked packets, after max threshold */ __u32 prob_mark_head; /* Marked packets, below max threshold */ __u32 forced_mark_head;/* Marked packets, after max threshold */ }; struct tc_sfq_qopt_v1 { struct tc_sfq_qopt v0; unsigned int depth; /* max number of packets per flow */ unsigned int headdrop; /* SFQRED parameters */ __u32 limit; /* HARD maximal flow queue length (bytes) */ __u32 qth_min; /* Min average length threshold (bytes) */ __u32 qth_max; /* Max average length threshold (bytes) */ unsigned char Wlog; /* log(W) */ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ unsigned char Scell_log; /* cell size for idle damping */ unsigned char flags; __u32 max_P; /* probability, high resolution */ /* SFQRED stats */ struct tc_sfqred_stats stats; }; struct tc_sfq_xstats { __s32 allot; }; /* RED section */ enum { TCA_RED_UNSPEC, TCA_RED_PARMS, TCA_RED_STAB, TCA_RED_MAX_P, __TCA_RED_MAX, }; #define TCA_RED_MAX (__TCA_RED_MAX - 1) struct tc_red_qopt { __u32 limit; /* HARD maximal queue length (bytes) */ __u32 qth_min; /* Min average length threshold (bytes) */ __u32 qth_max; /* Max average length threshold (bytes) */ unsigned char Wlog; /* log(W) */ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ unsigned char Scell_log; /* cell size for idle damping */ unsigned char flags; #define TC_RED_ECN 1 #define TC_RED_HARDDROP 2 #define TC_RED_ADAPTATIVE 4 }; struct tc_red_xstats { __u32 early; /* Early drops */ __u32 pdrop; /* Drops due to queue limits */ __u32 other; /* Drops due to drop() calls */ __u32 marked; /* Marked packets */ }; /* GRED section */ #define MAX_DPs 16 enum { TCA_GRED_UNSPEC, TCA_GRED_PARMS, TCA_GRED_STAB, TCA_GRED_DPS, TCA_GRED_MAX_P, TCA_GRED_LIMIT, TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */ __TCA_GRED_MAX, }; #define TCA_GRED_MAX (__TCA_GRED_MAX - 1) enum { TCA_GRED_VQ_ENTRY_UNSPEC, TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */ __TCA_GRED_VQ_ENTRY_MAX, }; #define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1) enum { TCA_GRED_VQ_UNSPEC, TCA_GRED_VQ_PAD, TCA_GRED_VQ_DP, /* u32 */ TCA_GRED_VQ_STAT_BYTES, /* u64 */ TCA_GRED_VQ_STAT_PACKETS, /* u32 */ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */ TCA_GRED_VQ_STAT_PDROP, /* u32 */ TCA_GRED_VQ_STAT_OTHER, /* u32 */ TCA_GRED_VQ_FLAGS, /* u32 */ __TCA_GRED_VQ_MAX }; #define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1) struct tc_gred_qopt { __u32 limit; /* HARD maximal queue length (bytes) */ __u32 qth_min; /* Min average length threshold (bytes) */ __u32 qth_max; /* Max average length threshold (bytes) */ __u32 DP; /* up to 2^32 DPs */ __u32 backlog; __u32 qave; __u32 forced; __u32 early; __u32 other; __u32 pdrop; __u8 Wlog; /* log(W) */ __u8 Plog; /* log(P_max/(qth_max-qth_min)) */ __u8 Scell_log; /* cell size for idle damping */ __u8 prio; /* prio of this VQ */ __u32 packets; __u32 bytesin; }; /* gred setup */ struct tc_gred_sopt { __u32 DPs; __u32 def_DP; __u8 grio; __u8 flags; __u16 pad1; }; /* CHOKe section */ enum { TCA_CHOKE_UNSPEC, TCA_CHOKE_PARMS, TCA_CHOKE_STAB, TCA_CHOKE_MAX_P, __TCA_CHOKE_MAX, }; #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1) struct tc_choke_qopt { __u32 limit; /* Hard queue length (packets) */ __u32 qth_min; /* Min average threshold (packets) */ __u32 qth_max; /* Max average threshold (packets) */ unsigned char Wlog; /* log(W) */ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ unsigned char Scell_log; /* cell size for idle damping */ unsigned char flags; /* see RED flags */ }; struct tc_choke_xstats { __u32 early; /* Early drops */ __u32 pdrop; /* Drops due to queue limits */ __u32 other; /* Drops due to drop() calls */ __u32 marked; /* Marked packets */ __u32 matched; /* Drops due to flow match */ }; /* HTB section */ #define TC_HTB_NUMPRIO 8 #define TC_HTB_MAXDEPTH 8 #define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */ struct tc_htb_opt { struct tc_ratespec rate; struct tc_ratespec ceil; __u32 buffer; __u32 cbuffer; __u32 quantum; __u32 level; /* out only */ __u32 prio; }; struct tc_htb_glob { __u32 version; /* to match HTB/TC */ __u32 rate2quantum; /* bps->quantum divisor */ __u32 defcls; /* default class number */ __u32 debug; /* debug flags */ /* stats */ __u32 direct_pkts; /* count of non shaped packets */ }; enum { TCA_HTB_UNSPEC, TCA_HTB_PARMS, TCA_HTB_INIT, TCA_HTB_CTAB, TCA_HTB_RTAB, TCA_HTB_DIRECT_QLEN, TCA_HTB_RATE64, TCA_HTB_CEIL64, TCA_HTB_PAD, TCA_HTB_OFFLOAD, __TCA_HTB_MAX, }; #define TCA_HTB_MAX (__TCA_HTB_MAX - 1) struct tc_htb_xstats { __u32 lends; __u32 borrows; __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */ __s32 tokens; __s32 ctokens; }; /* HFSC section */ struct tc_hfsc_qopt { __u16 defcls; /* default class */ }; struct tc_service_curve { __u32 m1; /* slope of the first segment in bps */ __u32 d; /* x-projection of the first segment in us */ __u32 m2; /* slope of the second segment in bps */ }; struct tc_hfsc_stats { __u64 work; /* total work done */ __u64 rtwork; /* work done by real-time criteria */ __u32 period; /* current period */ __u32 level; /* class level in hierarchy */ }; enum { TCA_HFSC_UNSPEC, TCA_HFSC_RSC, TCA_HFSC_FSC, TCA_HFSC_USC, __TCA_HFSC_MAX, }; #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) /* Network emulator */ enum { TCA_NETEM_UNSPEC, TCA_NETEM_CORR, TCA_NETEM_DELAY_DIST, TCA_NETEM_REORDER, TCA_NETEM_CORRUPT, TCA_NETEM_LOSS, TCA_NETEM_RATE, TCA_NETEM_ECN, TCA_NETEM_RATE64, TCA_NETEM_PAD, TCA_NETEM_LATENCY64, TCA_NETEM_JITTER64, TCA_NETEM_SLOT, TCA_NETEM_SLOT_DIST, __TCA_NETEM_MAX, }; #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1) struct tc_netem_qopt { __u32 latency; /* added delay (us) */ __u32 limit; /* fifo limit (packets) */ __u32 loss; /* random packet loss (0=none ~0=100%) */ __u32 gap; /* re-ordering gap (0 for none) */ __u32 duplicate; /* random packet dup (0=none ~0=100%) */ __u32 jitter; /* random jitter in latency (us) */ }; struct tc_netem_corr { __u32 delay_corr; /* delay correlation */ __u32 loss_corr; /* packet loss correlation */ __u32 dup_corr; /* duplicate correlation */ }; struct tc_netem_reorder { __u32 probability; __u32 correlation; }; struct tc_netem_corrupt { __u32 probability; __u32 correlation; }; struct tc_netem_rate { __u32 rate; /* byte/s */ __s32 packet_overhead; __u32 cell_size; __s32 cell_overhead; }; struct tc_netem_slot { __s64 min_delay; /* nsec */ __s64 max_delay; __s32 max_packets; __s32 max_bytes; __s64 dist_delay; /* nsec */ __s64 dist_jitter; /* nsec */ }; enum { NETEM_LOSS_UNSPEC, NETEM_LOSS_GI, /* General Intuitive - 4 state model */ NETEM_LOSS_GE, /* Gilbert Elliot models */ __NETEM_LOSS_MAX }; #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) /* State transition probabilities for 4 state model */ struct tc_netem_gimodel { __u32 p13; __u32 p31; __u32 p32; __u32 p14; __u32 p23; }; /* Gilbert-Elliot models */ struct tc_netem_gemodel { __u32 p; __u32 r; __u32 h; __u32 k1; }; #define NETEM_DIST_SCALE 8192 #define NETEM_DIST_MAX 16384 /* DRR */ enum { TCA_DRR_UNSPEC, TCA_DRR_QUANTUM, __TCA_DRR_MAX }; #define TCA_DRR_MAX (__TCA_DRR_MAX - 1) struct tc_drr_stats { __u32 deficit; }; /* MQPRIO */ #define TC_QOPT_BITMASK 15 #define TC_QOPT_MAX_QUEUE 16 enum { TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */ TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */ __TC_MQPRIO_HW_OFFLOAD_MAX }; #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1) enum { TC_MQPRIO_MODE_DCB, TC_MQPRIO_MODE_CHANNEL, __TC_MQPRIO_MODE_MAX }; #define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1) enum { TC_MQPRIO_SHAPER_DCB, TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */ __TC_MQPRIO_SHAPER_MAX }; #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1) struct tc_mqprio_qopt { __u8 num_tc; __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; __u8 hw; __u16 count[TC_QOPT_MAX_QUEUE]; __u16 offset[TC_QOPT_MAX_QUEUE]; }; #define TC_MQPRIO_F_MODE 0x1 #define TC_MQPRIO_F_SHAPER 0x2 #define TC_MQPRIO_F_MIN_RATE 0x4 #define TC_MQPRIO_F_MAX_RATE 0x8 enum { TCA_MQPRIO_UNSPEC, TCA_MQPRIO_MODE, TCA_MQPRIO_SHAPER, TCA_MQPRIO_MIN_RATE64, TCA_MQPRIO_MAX_RATE64, __TCA_MQPRIO_MAX, }; #define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1) /* SFB */ enum { TCA_SFB_UNSPEC, TCA_SFB_PARMS, __TCA_SFB_MAX, }; #define TCA_SFB_MAX (__TCA_SFB_MAX - 1) /* * Note: increment, decrement are Q0.16 fixed-point values. */ struct tc_sfb_qopt { __u32 rehash_interval; /* delay between hash move, in ms */ __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */ __u32 max; /* max len of qlen_min */ __u32 bin_size; /* maximum queue length per bin */ __u32 increment; /* probability increment, (d1 in Blue) */ __u32 decrement; /* probability decrement, (d2 in Blue) */ __u32 limit; /* max SFB queue length */ __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */ __u32 penalty_burst; }; struct tc_sfb_xstats { __u32 earlydrop; __u32 penaltydrop; __u32 bucketdrop; __u32 queuedrop; __u32 childdrop; /* drops in child qdisc */ __u32 marked; __u32 maxqlen; __u32 maxprob; __u32 avgprob; }; #define SFB_MAX_PROB 0xFFFF /* QFQ */ enum { TCA_QFQ_UNSPEC, TCA_QFQ_WEIGHT, TCA_QFQ_LMAX, __TCA_QFQ_MAX }; #define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1) struct tc_qfq_stats { __u32 weight; __u32 lmax; }; /* CODEL */ enum { TCA_CODEL_UNSPEC, TCA_CODEL_TARGET, TCA_CODEL_LIMIT, TCA_CODEL_INTERVAL, TCA_CODEL_ECN, TCA_CODEL_CE_THRESHOLD, __TCA_CODEL_MAX }; #define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1) struct tc_codel_xstats { __u32 maxpacket; /* largest packet we've seen so far */ __u32 count; /* how many drops we've done since the last time we * entered dropping state */ __u32 lastcount; /* count at entry to dropping state */ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */ __s32 drop_next; /* time to drop next packet */ __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */ __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */ __u32 dropping; /* are we in dropping state ? */ __u32 ce_mark; /* number of CE marked packets because of ce_threshold */ }; /* FQ_CODEL */ enum { TCA_FQ_CODEL_UNSPEC, TCA_FQ_CODEL_TARGET, TCA_FQ_CODEL_LIMIT, TCA_FQ_CODEL_INTERVAL, TCA_FQ_CODEL_ECN, TCA_FQ_CODEL_FLOWS, TCA_FQ_CODEL_QUANTUM, TCA_FQ_CODEL_CE_THRESHOLD, TCA_FQ_CODEL_DROP_BATCH_SIZE, TCA_FQ_CODEL_MEMORY_LIMIT, __TCA_FQ_CODEL_MAX }; #define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1) enum { TCA_FQ_CODEL_XSTATS_QDISC, TCA_FQ_CODEL_XSTATS_CLASS, }; struct tc_fq_codel_qd_stats { __u32 maxpacket; /* largest packet we've seen so far */ __u32 drop_overlimit; /* number of time max qdisc * packet limit was hit */ __u32 ecn_mark; /* number of packets we ECN marked * instead of being dropped */ __u32 new_flow_count; /* number of time packets * created a 'new flow' */ __u32 new_flows_len; /* count of flows in new list */ __u32 old_flows_len; /* count of flows in old list */ __u32 ce_mark; /* packets above ce_threshold */ __u32 memory_usage; /* in bytes */ __u32 drop_overmemory; }; struct tc_fq_codel_cl_stats { __s32 deficit; __u32 ldelay; /* in-queue delay seen by most recently * dequeued packet */ __u32 count; __u32 lastcount; __u32 dropping; __s32 drop_next; }; struct tc_fq_codel_xstats { __u32 type; union { struct tc_fq_codel_qd_stats qdisc_stats; struct tc_fq_codel_cl_stats class_stats; }; }; /* FQ */ enum { TCA_FQ_UNSPEC, TCA_FQ_PLIMIT, /* limit of total number of packets in queue */ TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */ TCA_FQ_QUANTUM, /* RR quantum */ TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */ TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */ TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */ __TCA_FQ_MAX }; #define TCA_FQ_MAX (__TCA_FQ_MAX - 1) struct tc_fq_qd_stats { __u64 gc_flows; __u64 highprio_packets; __u64 tcp_retrans; __u64 throttled; __u64 flows_plimit; __u64 pkts_too_long; __u64 allocation_errors; __s64 time_next_delayed_flow; __u32 flows; __u32 inactive_flows; __u32 throttled_flows; __u32 unthrottle_latency_ns; __u64 ce_mark; /* packets above ce_threshold */ }; /* Heavy-Hitter Filter */ enum { TCA_HHF_UNSPEC, TCA_HHF_BACKLOG_LIMIT, TCA_HHF_QUANTUM, TCA_HHF_HH_FLOWS_LIMIT, TCA_HHF_RESET_TIMEOUT, TCA_HHF_ADMIT_BYTES, TCA_HHF_EVICT_TIMEOUT, TCA_HHF_NON_HH_WEIGHT, __TCA_HHF_MAX }; #define TCA_HHF_MAX (__TCA_HHF_MAX - 1) struct tc_hhf_xstats { __u32 drop_overlimit; /* number of times max qdisc packet limit * was hit */ __u32 hh_overlimit; /* number of times max heavy-hitters was hit */ __u32 hh_tot_count; /* number of captured heavy-hitters so far */ __u32 hh_cur_count; /* number of current heavy-hitters */ }; /* PIE */ enum { TCA_PIE_UNSPEC, TCA_PIE_TARGET, TCA_PIE_LIMIT, TCA_PIE_TUPDATE, TCA_PIE_ALPHA, TCA_PIE_BETA, TCA_PIE_ECN, TCA_PIE_BYTEMODE, __TCA_PIE_MAX }; #define TCA_PIE_MAX (__TCA_PIE_MAX - 1) struct tc_pie_xstats { __u32 prob; /* current probability */ __u32 delay; /* current delay in ms */ __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */ __u32 packets_in; /* total number of packets enqueued */ __u32 dropped; /* packets dropped due to pie_action */ __u32 overlimit; /* dropped due to lack of space in queue */ __u32 maxq; /* maximum queue size */ __u32 ecn_mark; /* packets marked with ecn*/ }; /* CBS */ struct tc_cbs_qopt { __u8 offload; __u8 _pad[3]; __s32 hicredit; __s32 locredit; __s32 idleslope; __s32 sendslope; }; enum { TCA_CBS_UNSPEC, TCA_CBS_PARMS, __TCA_CBS_MAX, }; #define TCA_CBS_MAX (__TCA_CBS_MAX - 1) /* ETF */ struct tc_etf_qopt { __s32 delta; __s32 clockid; __u32 flags; #define TC_ETF_DEADLINE_MODE_ON BIT(0) #define TC_ETF_OFFLOAD_ON BIT(1) }; enum { TCA_ETF_UNSPEC, TCA_ETF_PARMS, __TCA_ETF_MAX, }; #define TCA_ETF_MAX (__TCA_ETF_MAX - 1) /* CAKE */ enum { TCA_CAKE_UNSPEC, TCA_CAKE_PAD, TCA_CAKE_BASE_RATE64, TCA_CAKE_DIFFSERV_MODE, TCA_CAKE_ATM, TCA_CAKE_FLOW_MODE, TCA_CAKE_OVERHEAD, TCA_CAKE_RTT, TCA_CAKE_TARGET, TCA_CAKE_AUTORATE, TCA_CAKE_MEMORY, TCA_CAKE_NAT, TCA_CAKE_RAW, TCA_CAKE_WASH, TCA_CAKE_MPU, TCA_CAKE_INGRESS, TCA_CAKE_ACK_FILTER, TCA_CAKE_SPLIT_GSO, __TCA_CAKE_MAX }; #define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1) enum { __TCA_CAKE_STATS_INVALID, TCA_CAKE_STATS_PAD, TCA_CAKE_STATS_CAPACITY_ESTIMATE64, TCA_CAKE_STATS_MEMORY_LIMIT, TCA_CAKE_STATS_MEMORY_USED, TCA_CAKE_STATS_AVG_NETOFF, TCA_CAKE_STATS_MIN_NETLEN, TCA_CAKE_STATS_MAX_NETLEN, TCA_CAKE_STATS_MIN_ADJLEN, TCA_CAKE_STATS_MAX_ADJLEN, TCA_CAKE_STATS_TIN_STATS, TCA_CAKE_STATS_DEFICIT, TCA_CAKE_STATS_COBALT_COUNT, TCA_CAKE_STATS_DROPPING, TCA_CAKE_STATS_DROP_NEXT_US, TCA_CAKE_STATS_P_DROP, TCA_CAKE_STATS_BLUE_TIMER_US, __TCA_CAKE_STATS_MAX }; #define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1) enum { __TCA_CAKE_TIN_STATS_INVALID, TCA_CAKE_TIN_STATS_PAD, TCA_CAKE_TIN_STATS_SENT_PACKETS, TCA_CAKE_TIN_STATS_SENT_BYTES64, TCA_CAKE_TIN_STATS_DROPPED_PACKETS, TCA_CAKE_TIN_STATS_DROPPED_BYTES64, TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, TCA_CAKE_TIN_STATS_BACKLOG_BYTES, TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, TCA_CAKE_TIN_STATS_TARGET_US, TCA_CAKE_TIN_STATS_INTERVAL_US, TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, TCA_CAKE_TIN_STATS_WAY_MISSES, TCA_CAKE_TIN_STATS_WAY_COLLISIONS, TCA_CAKE_TIN_STATS_PEAK_DELAY_US, TCA_CAKE_TIN_STATS_AVG_DELAY_US, TCA_CAKE_TIN_STATS_BASE_DELAY_US, TCA_CAKE_TIN_STATS_SPARSE_FLOWS, TCA_CAKE_TIN_STATS_BULK_FLOWS, TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, TCA_CAKE_TIN_STATS_MAX_SKBLEN, TCA_CAKE_TIN_STATS_FLOW_QUANTUM, __TCA_CAKE_TIN_STATS_MAX }; #define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1) #define TC_CAKE_MAX_TINS (8) enum { CAKE_FLOW_NONE = 0, CAKE_FLOW_SRC_IP, CAKE_FLOW_DST_IP, CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */ CAKE_FLOW_FLOWS, CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */ CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */ CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */ CAKE_FLOW_MAX, }; enum { CAKE_DIFFSERV_DIFFSERV3 = 0, CAKE_DIFFSERV_DIFFSERV4, CAKE_DIFFSERV_DIFFSERV8, CAKE_DIFFSERV_BESTEFFORT, CAKE_DIFFSERV_PRECEDENCE, CAKE_DIFFSERV_MAX }; enum { CAKE_ACK_NONE = 0, CAKE_ACK_FILTER, CAKE_ACK_AGGRESSIVE, CAKE_ACK_MAX }; enum { CAKE_ATM_NONE = 0, CAKE_ATM_ATM, CAKE_ATM_PTM, CAKE_ATM_MAX }; /* TAPRIO */ enum { TC_TAPRIO_CMD_SET_GATES = 0x00, TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, }; enum { TCA_TAPRIO_SCHED_ENTRY_UNSPEC, TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */ TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */ TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */ TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */ __TCA_TAPRIO_SCHED_ENTRY_MAX, }; #define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1) /* The format for schedule entry list is: * [TCA_TAPRIO_SCHED_ENTRY_LIST] * [TCA_TAPRIO_SCHED_ENTRY] * [TCA_TAPRIO_SCHED_ENTRY_CMD] * [TCA_TAPRIO_SCHED_ENTRY_GATES] * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] */ enum { TCA_TAPRIO_SCHED_UNSPEC, TCA_TAPRIO_SCHED_ENTRY, __TCA_TAPRIO_SCHED_MAX, }; #define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1) enum { TCA_TAPRIO_ATTR_UNSPEC, TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */ TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */ TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */ TCA_TAPRIO_PAD, __TCA_TAPRIO_ATTR_MAX, }; #define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1) #endif xdp-tools-1.5.4/lib/libbpf/include/uapi/linux/netdev.h0000644000175100001660000001324214706536574022222 0ustar runnerdocker/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/netdev.yaml */ /* YNL-GEN uapi header */ #ifndef _UAPI_LINUX_NETDEV_H #define _UAPI_LINUX_NETDEV_H #define NETDEV_FAMILY_NAME "netdev" #define NETDEV_FAMILY_VERSION 1 /** * enum netdev_xdp_act * @NETDEV_XDP_ACT_BASIC: XDP features set supported by all drivers * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX) * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements * ndo_xdp_xmit callback. * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP * in zero copy mode. * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw * offloading. * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear * XDP buffer support in the driver napi callback. * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements * non-linear XDP buffer support in ndo_xdp_xmit callback. */ enum netdev_xdp_act { NETDEV_XDP_ACT_BASIC = 1, NETDEV_XDP_ACT_REDIRECT = 2, NETDEV_XDP_ACT_NDO_XMIT = 4, NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, NETDEV_XDP_ACT_HW_OFFLOAD = 16, NETDEV_XDP_ACT_RX_SG = 32, NETDEV_XDP_ACT_NDO_XMIT_SG = 64, /* private: */ NETDEV_XDP_ACT_MASK = 127, }; /** * enum netdev_xdp_rx_metadata * @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW * timestamp via bpf_xdp_metadata_rx_timestamp(). * @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet * hash via bpf_xdp_metadata_rx_hash(). * @NETDEV_XDP_RX_METADATA_VLAN_TAG: Device is capable of exposing receive * packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag(). */ enum netdev_xdp_rx_metadata { NETDEV_XDP_RX_METADATA_TIMESTAMP = 1, NETDEV_XDP_RX_METADATA_HASH = 2, NETDEV_XDP_RX_METADATA_VLAN_TAG = 4, }; /** * enum netdev_xsk_flags * @NETDEV_XSK_FLAGS_TX_TIMESTAMP: HW timestamping egress packets is supported * by the driver. * @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the * driver. */ enum netdev_xsk_flags { NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, }; enum netdev_queue_type { NETDEV_QUEUE_TYPE_RX, NETDEV_QUEUE_TYPE_TX, }; enum netdev_qstats_scope { NETDEV_QSTATS_SCOPE_QUEUE = 1, }; enum { NETDEV_A_DEV_IFINDEX = 1, NETDEV_A_DEV_PAD, NETDEV_A_DEV_XDP_FEATURES, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, NETDEV_A_DEV_XSK_FEATURES, __NETDEV_A_DEV_MAX, NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1) }; enum { NETDEV_A_PAGE_POOL_ID = 1, NETDEV_A_PAGE_POOL_IFINDEX, NETDEV_A_PAGE_POOL_NAPI_ID, NETDEV_A_PAGE_POOL_INFLIGHT, NETDEV_A_PAGE_POOL_INFLIGHT_MEM, NETDEV_A_PAGE_POOL_DETACH_TIME, NETDEV_A_PAGE_POOL_DMABUF, __NETDEV_A_PAGE_POOL_MAX, NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1) }; enum { NETDEV_A_PAGE_POOL_STATS_INFO = 1, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT, __NETDEV_A_PAGE_POOL_STATS_MAX, NETDEV_A_PAGE_POOL_STATS_MAX = (__NETDEV_A_PAGE_POOL_STATS_MAX - 1) }; enum { NETDEV_A_NAPI_IFINDEX = 1, NETDEV_A_NAPI_ID, NETDEV_A_NAPI_IRQ, NETDEV_A_NAPI_PID, __NETDEV_A_NAPI_MAX, NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1) }; enum { NETDEV_A_QUEUE_ID = 1, NETDEV_A_QUEUE_IFINDEX, NETDEV_A_QUEUE_TYPE, NETDEV_A_QUEUE_NAPI_ID, NETDEV_A_QUEUE_DMABUF, __NETDEV_A_QUEUE_MAX, NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1) }; enum { NETDEV_A_QSTATS_IFINDEX = 1, NETDEV_A_QSTATS_QUEUE_TYPE, NETDEV_A_QSTATS_QUEUE_ID, NETDEV_A_QSTATS_SCOPE, NETDEV_A_QSTATS_RX_PACKETS = 8, NETDEV_A_QSTATS_RX_BYTES, NETDEV_A_QSTATS_TX_PACKETS, NETDEV_A_QSTATS_TX_BYTES, NETDEV_A_QSTATS_RX_ALLOC_FAIL, NETDEV_A_QSTATS_RX_HW_DROPS, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, NETDEV_A_QSTATS_RX_CSUM_COMPLETE, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, NETDEV_A_QSTATS_RX_CSUM_NONE, NETDEV_A_QSTATS_RX_CSUM_BAD, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, NETDEV_A_QSTATS_TX_HW_DROPS, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, NETDEV_A_QSTATS_TX_CSUM_NONE, NETDEV_A_QSTATS_TX_NEEDS_CSUM, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, NETDEV_A_QSTATS_TX_STOP, NETDEV_A_QSTATS_TX_WAKE, __NETDEV_A_QSTATS_MAX, NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1) }; enum { NETDEV_A_DMABUF_IFINDEX = 1, NETDEV_A_DMABUF_QUEUES, NETDEV_A_DMABUF_FD, NETDEV_A_DMABUF_ID, __NETDEV_A_DMABUF_MAX, NETDEV_A_DMABUF_MAX = (__NETDEV_A_DMABUF_MAX - 1) }; enum { NETDEV_CMD_DEV_GET = 1, NETDEV_CMD_DEV_ADD_NTF, NETDEV_CMD_DEV_DEL_NTF, NETDEV_CMD_DEV_CHANGE_NTF, NETDEV_CMD_PAGE_POOL_GET, NETDEV_CMD_PAGE_POOL_ADD_NTF, NETDEV_CMD_PAGE_POOL_DEL_NTF, NETDEV_CMD_PAGE_POOL_CHANGE_NTF, NETDEV_CMD_PAGE_POOL_STATS_GET, NETDEV_CMD_QUEUE_GET, NETDEV_CMD_NAPI_GET, NETDEV_CMD_QSTATS_GET, NETDEV_CMD_BIND_RX, __NETDEV_CMD_MAX, NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1) }; #define NETDEV_MCGRP_MGMT "mgmt" #define NETDEV_MCGRP_PAGE_POOL "page-pool" #endif /* _UAPI_LINUX_NETDEV_H */ xdp-tools-1.5.4/lib/libbpf/include/uapi/linux/netlink.h0000644000175100001660000001732414706536574022406 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__LINUX_NETLINK_H #define _UAPI__LINUX_NETLINK_H #include #include /* for __kernel_sa_family_t */ #include #define NETLINK_ROUTE 0 /* Routing/device hook */ #define NETLINK_UNUSED 1 /* Unused number */ #define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ #define NETLINK_FIREWALL 3 /* Unused number, formerly ip_queue */ #define NETLINK_SOCK_DIAG 4 /* socket monitoring */ #define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ #define NETLINK_XFRM 6 /* ipsec */ #define NETLINK_SELINUX 7 /* SELinux event notifications */ #define NETLINK_ISCSI 8 /* Open-iSCSI */ #define NETLINK_AUDIT 9 /* auditing */ #define NETLINK_FIB_LOOKUP 10 #define NETLINK_CONNECTOR 11 #define NETLINK_NETFILTER 12 /* netfilter subsystem */ #define NETLINK_IP6_FW 13 #define NETLINK_DNRTMSG 14 /* DECnet routing messages */ #define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */ #define NETLINK_GENERIC 16 /* leave room for NETLINK_DM (DM Events) */ #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ #define NETLINK_ECRYPTFS 19 #define NETLINK_RDMA 20 #define NETLINK_CRYPTO 21 /* Crypto layer */ #define NETLINK_SMC 22 /* SMC monitoring */ #define NETLINK_INET_DIAG NETLINK_SOCK_DIAG #define MAX_LINKS 32 struct sockaddr_nl { __kernel_sa_family_t nl_family; /* AF_NETLINK */ unsigned short nl_pad; /* zero */ __u32 nl_pid; /* port ID */ __u32 nl_groups; /* multicast groups mask */ }; struct nlmsghdr { __u32 nlmsg_len; /* Length of message including header */ __u16 nlmsg_type; /* Message content */ __u16 nlmsg_flags; /* Additional flags */ __u32 nlmsg_seq; /* Sequence number */ __u32 nlmsg_pid; /* Sending process port ID */ }; /* Flags values */ #define NLM_F_REQUEST 0x01 /* It is request message. */ #define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */ #define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */ #define NLM_F_ECHO 0x08 /* Echo this request */ #define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */ #define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */ /* Modifiers to GET request */ #define NLM_F_ROOT 0x100 /* specify tree root */ #define NLM_F_MATCH 0x200 /* return all matching */ #define NLM_F_ATOMIC 0x400 /* atomic GET */ #define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH) /* Modifiers to NEW request */ #define NLM_F_REPLACE 0x100 /* Override existing */ #define NLM_F_EXCL 0x200 /* Do not touch, if it exists */ #define NLM_F_CREATE 0x400 /* Create, if it does not exist */ #define NLM_F_APPEND 0x800 /* Add to end of list */ /* Modifiers to DELETE request */ #define NLM_F_NONREC 0x100 /* Do not delete recursively */ /* Flags for ACK message */ #define NLM_F_CAPPED 0x100 /* request was capped */ #define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */ /* 4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL 4.4BSD CHANGE NLM_F_REPLACE True CHANGE NLM_F_CREATE|NLM_F_REPLACE Append NLM_F_CREATE Check NLM_F_EXCL */ #define NLMSG_ALIGNTO 4U #define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) ) #define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) #define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN) #define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len)) #define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0))) #define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \ (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len))) #define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \ (nlh)->nlmsg_len <= (len)) #define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len))) #define NLMSG_NOOP 0x1 /* Nothing. */ #define NLMSG_ERROR 0x2 /* Error */ #define NLMSG_DONE 0x3 /* End of a dump */ #define NLMSG_OVERRUN 0x4 /* Data lost */ #define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */ struct nlmsgerr { int error; struct nlmsghdr msg; /* * followed by the message contents unless NETLINK_CAP_ACK was set * or the ACK indicates success (error == 0) * message length is aligned with NLMSG_ALIGN() */ /* * followed by TLVs defined in enum nlmsgerr_attrs * if NETLINK_EXT_ACK was set */ }; /** * enum nlmsgerr_attrs - nlmsgerr attributes * @NLMSGERR_ATTR_UNUSED: unused * @NLMSGERR_ATTR_MSG: error message string (string) * @NLMSGERR_ATTR_OFFS: offset of the invalid attribute in the original * message, counting from the beginning of the header (u32) * @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to * be used - in the success case - to identify a created * object or operation or similar (binary) * @__NLMSGERR_ATTR_MAX: number of attributes * @NLMSGERR_ATTR_MAX: highest attribute number */ enum nlmsgerr_attrs { NLMSGERR_ATTR_UNUSED, NLMSGERR_ATTR_MSG, NLMSGERR_ATTR_OFFS, NLMSGERR_ATTR_COOKIE, __NLMSGERR_ATTR_MAX, NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1 }; #define NETLINK_ADD_MEMBERSHIP 1 #define NETLINK_DROP_MEMBERSHIP 2 #define NETLINK_PKTINFO 3 #define NETLINK_BROADCAST_ERROR 4 #define NETLINK_NO_ENOBUFS 5 #ifndef __KERNEL__ #define NETLINK_RX_RING 6 #define NETLINK_TX_RING 7 #endif #define NETLINK_LISTEN_ALL_NSID 8 #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 #define NETLINK_EXT_ACK 11 #define NETLINK_GET_STRICT_CHK 12 struct nl_pktinfo { __u32 group; }; struct nl_mmap_req { unsigned int nm_block_size; unsigned int nm_block_nr; unsigned int nm_frame_size; unsigned int nm_frame_nr; }; struct nl_mmap_hdr { unsigned int nm_status; unsigned int nm_len; __u32 nm_group; /* credentials */ __u32 nm_pid; __u32 nm_uid; __u32 nm_gid; }; #ifndef __KERNEL__ enum nl_mmap_status { NL_MMAP_STATUS_UNUSED, NL_MMAP_STATUS_RESERVED, NL_MMAP_STATUS_VALID, NL_MMAP_STATUS_COPY, NL_MMAP_STATUS_SKIP, }; #define NL_MMAP_MSG_ALIGNMENT NLMSG_ALIGNTO #define NL_MMAP_MSG_ALIGN(sz) __ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT) #define NL_MMAP_HDRLEN NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr)) #endif #define NET_MAJOR 36 /* Major 36 is reserved for networking */ enum { NETLINK_UNCONNECTED = 0, NETLINK_CONNECTED, }; /* * <------- NLA_HDRLEN ------> <-- NLA_ALIGN(payload)--> * +---------------------+- - -+- - - - - - - - - -+- - -+ * | Header | Pad | Payload | Pad | * | (struct nlattr) | ing | | ing | * +---------------------+- - -+- - - - - - - - - -+- - -+ * <-------------- nlattr->nla_len --------------> */ struct nlattr { __u16 nla_len; __u16 nla_type; }; /* * nla_type (16 bits) * +---+---+-------------------------------+ * | N | O | Attribute Type | * +---+---+-------------------------------+ * N := Carries nested attributes * O := Payload stored in network byte order * * Note: The N and O flag are mutually exclusive. */ #define NLA_F_NESTED (1 << 15) #define NLA_F_NET_BYTEORDER (1 << 14) #define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER) #define NLA_ALIGNTO 4 #define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1)) #define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr))) /* Generic 32 bitflags attribute content sent to the kernel. * * The value is a bitmap that defines the values being set * The selector is a bitmask that defines which value is legit * * Examples: * value = 0x0, and selector = 0x1 * implies we are selecting bit 1 and we want to set its value to 0. * * value = 0x2, and selector = 0x2 * implies we are selecting bit 2 and we want to set its value to 1. * */ struct nla_bitfield32 { __u32 value; __u32 selector; }; #endif /* _UAPI__LINUX_NETLINK_H */ xdp-tools-1.5.4/lib/libbpf/include/asm/0000755000175100001660000000000014706536574017245 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/include/asm/barrier.h0000644000175100001660000000021714706536574021044 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H #include #endif xdp-tools-1.5.4/lib/libbpf/include/linux/0000755000175100001660000000000014706536574017624 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/include/linux/list.h0000644000175100001660000000464214706536574020756 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_LIST_H #define __LINUX_LIST_H #define LIST_HEAD_INIT(name) { &(name), &(name) } #define LIST_HEAD(name) \ struct list_head name = LIST_HEAD_INIT(name) #define POISON_POINTER_DELTA 0 #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) static inline void INIT_LIST_HEAD(struct list_head *list) { list->next = list; list->prev = list; } static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { next->prev = new; new->next = next; new->prev = prev; prev->next = new; } /** * list_add - add a new entry * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. */ static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } /* * Delete a list entry by making the prev/next entries * point to each other. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; prev->next = next; } /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ static inline void __list_del_entry(struct list_head *entry) { __list_del(entry->prev, entry->next); } static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } static inline int list_empty(const struct list_head *head) { return head->next == head; } #define list_entry(ptr, type, member) \ container_of(ptr, type, member) #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ &pos->member != (head); \ pos = list_next_entry(pos, member)) #endif xdp-tools-1.5.4/lib/libbpf/include/linux/err.h0000644000175100001660000000125614706536574020571 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_ERR_H #define __LINUX_ERR_H #include #include #define MAX_ERRNO 4095 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO) static inline void * ERR_PTR(long error_) { return (void *) error_; } static inline long PTR_ERR(const void *ptr) { return (long) ptr; } static inline bool IS_ERR(const void *ptr) { return IS_ERR_VALUE((unsigned long)ptr); } static inline bool IS_ERR_OR_NULL(const void *ptr) { return (!ptr) || IS_ERR_VALUE((unsigned long)ptr); } static inline long PTR_ERR_OR_ZERO(const void *ptr) { return IS_ERR(ptr) ? PTR_ERR(ptr) : 0; } #endif xdp-tools-1.5.4/lib/libbpf/include/linux/types.h0000644000175100001660000000120314706536574021135 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_TYPES_H #define __LINUX_TYPES_H #include #include #include #include #include #include #define __bitwise__ #define __bitwise __bitwise__ typedef __u16 __bitwise __le16; typedef __u16 __bitwise __be16; typedef __u32 __bitwise __le32; typedef __u32 __bitwise __be32; typedef __u64 __bitwise __le64; typedef __u64 __bitwise __be64; #ifndef __aligned_u64 # define __aligned_u64 __u64 __attribute__((aligned(8))) #endif struct list_head { struct list_head *next, *prev; }; #endif xdp-tools-1.5.4/lib/libbpf/include/linux/kernel.h0000644000175100001660000000176414706536574021265 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_KERNEL_H #define __LINUX_KERNEL_H #include #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif #ifndef container_of #define container_of(ptr, type, member) ({ \ const typeof(((type *)0)->member) * __mptr = (ptr); \ (type *)((char *)__mptr - offsetof(type, member)); }) #endif #ifndef max #define max(x, y) ({ \ typeof(x) _max1 = (x); \ typeof(y) _max2 = (y); \ (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) #endif #ifndef min #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) #endif #ifndef roundup #define roundup(x, y) ( \ { \ const typeof(y) __y = y; \ (((x) + (__y - 1)) / __y) * __y; \ } \ ) #endif #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #endif xdp-tools-1.5.4/lib/libbpf/include/linux/compiler.h0000644000175100001660000000264014706536574021611 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_COMPILER_H #define __LINUX_COMPILER_H #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define READ_ONCE(x) (*(volatile typeof(x) *)&x) #define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v) #define barrier() asm volatile("" ::: "memory") #if defined(__x86_64__) # define smp_rmb() barrier() # define smp_wmb() barrier() # define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc") # define smp_store_release(p, v) \ do { \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) # define smp_load_acquire(p) \ ({ \ typeof(*p) ___p = READ_ONCE(*p); \ barrier(); \ ___p; \ }) #elif defined(__aarch64__) # define smp_rmb() asm volatile("dmb ishld" ::: "memory") # define smp_wmb() asm volatile("dmb ishst" ::: "memory") # define smp_mb() asm volatile("dmb ish" ::: "memory") #endif #ifndef smp_mb # define smp_mb() __sync_synchronize() #endif #ifndef smp_rmb # define smp_rmb() smp_mb() #endif #ifndef smp_wmb # define smp_wmb() smp_mb() #endif #ifndef smp_store_release # define smp_store_release(p, v) \ do { \ smp_mb(); \ WRITE_ONCE(*p, v); \ } while (0) #endif #ifndef smp_load_acquire # define smp_load_acquire(p) \ ({ \ typeof(*p) ___p = READ_ONCE(*p); \ smp_mb(); \ ___p; \ }) #endif #endif /* __LINUX_COMPILER_H */ xdp-tools-1.5.4/lib/libbpf/include/linux/overflow.h0000644000175100001660000000526114706536574021644 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_OVERFLOW_H #define __LINUX_OVERFLOW_H #define is_signed_type(type) (((type)(-1)) < (type)1) #define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) #define type_min(T) ((T)((T)-type_max(T)-(T)1)) #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) #endif #ifdef __GNUC__ #define GCC_VERSION (__GNUC__ * 10000 \ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif #endif #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW #define check_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ __builtin_mul_overflow(__a, __b, __d); \ }) #else /* * If one of a or b is a compile-time constant, this avoids a division. */ #define __unsigned_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = __a * __b; \ __builtin_constant_p(__b) ? \ __b > 0 && __a > type_max(typeof(__a)) / __b : \ __a > 0 && __b > type_max(typeof(__b)) / __a; \ }) /* * Signed multiplication is rather hard. gcc always follows C99, so * division is truncated towards 0. This means that we can write the * overflow check like this: * * (a > 0 && (b > MAX/a || b < MIN/a)) || * (a < -1 && (b > MIN/a || b < MAX/a) || * (a == -1 && b == MIN) * * The redundant casts of -1 are to silence an annoying -Wtype-limits * (included in -Wextra) warning: When the type is u8 or u16, the * __b_c_e in check_mul_overflow obviously selects * __unsigned_mul_overflow, but unfortunately gcc still parses this * code and warns about the limited range of __b. */ #define __signed_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ typeof(a) __tmax = type_max(typeof(a)); \ typeof(a) __tmin = type_min(typeof(a)); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = (__u64)__a * (__u64)__b; \ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ (__b == (typeof(__b))-1 && __a == __tmin); \ }) #define check_mul_overflow(a, b, d) \ __builtin_choose_expr(is_signed_type(typeof(a)), \ __signed_mul_overflow(a, b, d), \ __unsigned_mul_overflow(a, b, d)) #endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ #endif xdp-tools-1.5.4/lib/libbpf/include/linux/ring_buffer.h0000644000175100001660000000073014706536574022265 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef _TOOLS_LINUX_RING_BUFFER_H_ #define _TOOLS_LINUX_RING_BUFFER_H_ #include static inline __u64 ring_buffer_read_head(struct perf_event_mmap_page *base) { return smp_load_acquire(&base->data_head); } static inline void ring_buffer_write_tail(struct perf_event_mmap_page *base, __u64 tail) { smp_store_release(&base->data_tail, tail); } #endif /* _TOOLS_LINUX_RING_BUFFER_H_ */ xdp-tools-1.5.4/lib/libbpf/include/linux/filter.h0000644000175100001660000000667214706536574021275 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_FILTER_H #define __LINUX_FILTER_H #include #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ ((struct bpf_insn) { \ .code = CODE, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = IMM }) #define BPF_ALU32_IMM(OP, DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_ALU64_IMM(OP, DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_MOV64_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_CALL_REL(DST) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = BPF_PSEUDO_CALL, \ .off = 0, \ .imm = DST }) #define BPF_EXIT_INSN() \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_EXIT, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = 0 }) #define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((FUNC) - BPF_FUNC_unspec) }) #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ ((struct bpf_insn) { \ .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #define BPF_MOV64_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_MOV32_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_LD_IMM64_RAW_FULL(DST, SRC, OFF1, OFF2, IMM1, IMM2) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_DW | BPF_IMM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF1, \ .imm = IMM1 }), \ ((struct bpf_insn) { \ .code = 0, \ .dst_reg = 0, \ .src_reg = 0, \ .off = OFF2, \ .imm = IMM2 }) #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_FD, 0, 0, \ MAP_FD, 0) #define BPF_LD_MAP_VALUE(DST, MAP_FD, VALUE_OFF) \ BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_VALUE, 0, 0, \ MAP_FD, VALUE_OFF) #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #endif xdp-tools-1.5.4/lib/libbpf/scripts/0000755000175100001660000000000014706536574016531 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/scripts/coverity.sh0000755000175100001660000001062614706536574020741 0ustar runnerdocker#!/bin/bash # Taken from: https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh # Local changes are annotated with "#[local]" set -e # Environment check echo -e "\033[33;1mNote: COVERITY_SCAN_PROJECT_NAME and COVERITY_SCAN_TOKEN are available on Project Settings page on scan.coverity.com\033[0m" [ -z "$COVERITY_SCAN_PROJECT_NAME" ] && echo "ERROR: COVERITY_SCAN_PROJECT_NAME must be set" && exit 1 [ -z "$COVERITY_SCAN_NOTIFICATION_EMAIL" ] && echo "ERROR: COVERITY_SCAN_NOTIFICATION_EMAIL must be set" && exit 1 [ -z "$COVERITY_SCAN_BRANCH_PATTERN" ] && echo "ERROR: COVERITY_SCAN_BRANCH_PATTERN must be set" && exit 1 [ -z "$COVERITY_SCAN_BUILD_COMMAND" ] && echo "ERROR: COVERITY_SCAN_BUILD_COMMAND must be set" && exit 1 [ -z "$COVERITY_SCAN_TOKEN" ] && echo "ERROR: COVERITY_SCAN_TOKEN must be set" && exit 1 PLATFORM=`uname` #[local] Use /var/tmp for TOOL_ARCHIVE and TOOL_BASE, as on certain systems # /tmp is tmpfs and is sometimes too small to handle all necessary tooling TOOL_ARCHIVE=/var//tmp/cov-analysis-${PLATFORM}.tgz TOOL_URL=https://scan.coverity.com/download/${PLATFORM} TOOL_BASE=/var/tmp/coverity-scan-analysis UPLOAD_URL="https://scan.coverity.com/builds" SCAN_URL="https://scan.coverity.com" # Do not run on pull requests if [ "${TRAVIS_PULL_REQUEST}" = "true" ]; then echo -e "\033[33;1mINFO: Skipping Coverity Analysis: branch is a pull request.\033[0m" exit 0 fi # Verify this branch should run IS_COVERITY_SCAN_BRANCH=`ruby -e "puts '${TRAVIS_BRANCH}' =~ /\\A$COVERITY_SCAN_BRANCH_PATTERN\\z/ ? 1 : 0"` if [ "$IS_COVERITY_SCAN_BRANCH" = "1" ]; then echo -e "\033[33;1mCoverity Scan configured to run on branch ${TRAVIS_BRANCH}\033[0m" else echo -e "\033[33;1mCoverity Scan NOT configured to run on branch ${TRAVIS_BRANCH}\033[0m" exit 1 fi # Verify upload is permitted AUTH_RES=`curl -s --form project="$COVERITY_SCAN_PROJECT_NAME" --form token="$COVERITY_SCAN_TOKEN" $SCAN_URL/api/upload_permitted` if [ "$AUTH_RES" = "Access denied" ]; then echo -e "\033[33;1mCoverity Scan API access denied. Check COVERITY_SCAN_PROJECT_NAME and COVERITY_SCAN_TOKEN.\033[0m" exit 1 else AUTH=`echo $AUTH_RES | ruby -e "require 'rubygems'; require 'json'; puts JSON[STDIN.read]['upload_permitted']"` if [ "$AUTH" = "true" ]; then echo -e "\033[33;1mCoverity Scan analysis authorized per quota.\033[0m" else WHEN=`echo $AUTH_RES | ruby -e "require 'rubygems'; require 'json'; puts JSON[STDIN.read]['next_upload_permitted_at']"` echo -e "\033[33;1mCoverity Scan analysis NOT authorized until $WHEN.\033[0m" exit 0 fi fi if [ ! -d $TOOL_BASE ]; then # Download Coverity Scan Analysis Tool if [ ! -e $TOOL_ARCHIVE ]; then echo -e "\033[33;1mDownloading Coverity Scan Analysis Tool...\033[0m" wget -nv -O $TOOL_ARCHIVE $TOOL_URL --post-data "project=$COVERITY_SCAN_PROJECT_NAME&token=$COVERITY_SCAN_TOKEN" fi # Extract Coverity Scan Analysis Tool echo -e "\033[33;1mExtracting Coverity Scan Analysis Tool...\033[0m" mkdir -p $TOOL_BASE pushd $TOOL_BASE tar xzf $TOOL_ARCHIVE popd fi TOOL_DIR=`find $TOOL_BASE -type d -name 'cov-analysis*'` export PATH=$TOOL_DIR/bin:$PATH # Build echo -e "\033[33;1mRunning Coverity Scan Analysis Tool...\033[0m" COV_BUILD_OPTIONS="" #COV_BUILD_OPTIONS="--return-emit-failures 8 --parse-error-threshold 85" RESULTS_DIR="cov-int" eval "${COVERITY_SCAN_BUILD_COMMAND_PREPEND}" COVERITY_UNSUPPORTED=1 cov-build --dir $RESULTS_DIR $COV_BUILD_OPTIONS $COVERITY_SCAN_BUILD_COMMAND cov-import-scm --dir $RESULTS_DIR --scm git --log $RESULTS_DIR/scm_log.txt 2>&1 # Upload results echo -e "\033[33;1mTarring Coverity Scan Analysis results...\033[0m" RESULTS_ARCHIVE=analysis-results.tgz tar czf $RESULTS_ARCHIVE $RESULTS_DIR SHA=`git rev-parse --short HEAD` echo -e "\033[33;1mUploading Coverity Scan Analysis results...\033[0m" response=$(curl \ --silent --write-out "\n%{http_code}\n" \ --form project=$COVERITY_SCAN_PROJECT_NAME \ --form token=$COVERITY_SCAN_TOKEN \ --form email=$COVERITY_SCAN_NOTIFICATION_EMAIL \ --form file=@$RESULTS_ARCHIVE \ --form version=$SHA \ --form description="Travis CI build" \ $UPLOAD_URL) status_code=$(echo "$response" | sed -n '$p') #[local] Coverity used to return 201 on success, but it's 200 now # See https://github.com/systemd/systemd/blob/master/tools/coverity.sh#L145 if [ "$status_code" != "200" ]; then TEXT=$(echo "$response" | sed '$d') echo -e "\033[33;1mCoverity Scan upload failed: $TEXT.\033[0m" exit 1 fi xdp-tools-1.5.4/lib/libbpf/scripts/mailmap-update.sh0000755000175100001660000000133014706536574021765 0ustar runnerdocker#!/usr/bin/env bash set -eu usage () { echo "USAGE: ./mailmap-update.sh " exit 1 } LIBBPF_REPO="${1-""}" LINUX_REPO="${2-""}" if [ -z "${LIBBPF_REPO}" ] || [ -z "${LINUX_REPO}" ]; then echo "Error: libbpf or linux repos are not specified" usage fi LIBBPF_MAILMAP="${LIBBPF_REPO}/.mailmap" LINUX_MAILMAP="${LINUX_REPO}/.mailmap" tmpfile="$(mktemp)" cleanup() { rm -f "${tmpfile}" } trap cleanup EXIT grep_lines() { local pattern="$1" local file="$2" grep "${pattern}" "${file}" || true } while read -r email; do grep_lines "${email}$" "${LINUX_MAILMAP}" >> "${tmpfile}" done < <(git log --format='<%ae>' | sort -u) sort -u "${tmpfile}" > "${LIBBPF_MAILMAP}" xdp-tools-1.5.4/lib/libbpf/scripts/sync-kernel.sh0000755000175100001660000003243314706536574021327 0ustar runnerdocker#!/bin/bash usage () { echo "USAGE: ./sync-kernel.sh " echo "" echo "Set BPF_NEXT_BASELINE to override bpf-next tree commit, otherwise read from /CHECKPOINT-COMMIT." echo "Set BPF_BASELINE to override bpf tree commit, otherwise read from /BPF-CHECKPOINT-COMMIT." echo "Set MANUAL_MODE to 1 to manually control every cherry-picked commits." exit 1 } set -eu LIBBPF_REPO=${1-""} LINUX_REPO=${2-""} BPF_BRANCH=${3-""} BASELINE_COMMIT=${BPF_NEXT_BASELINE:-$(cat ${LIBBPF_REPO}/CHECKPOINT-COMMIT)} BPF_BASELINE_COMMIT=${BPF_BASELINE:-$(cat ${LIBBPF_REPO}/BPF-CHECKPOINT-COMMIT)} if [ -z "${LIBBPF_REPO}" ] || [ -z "${LINUX_REPO}" ]; then echo "Error: libbpf or linux repos are not specified" usage fi if [ -z "${BPF_BRANCH}" ]; then echo "Error: linux's bpf tree branch is not specified" usage fi if [ -z "${BASELINE_COMMIT}" ] || [ -z "${BPF_BASELINE_COMMIT}" ]; then echo "Error: bpf or bpf-next baseline commits are not provided" usage fi SUFFIX=$(date --utc +%Y-%m-%dT%H-%M-%S.%3NZ) WORKDIR=$(pwd) TMP_DIR=$(mktemp -d) trap "cd ${WORKDIR}; exit" INT TERM EXIT declare -A PATH_MAP PATH_MAP=( \ [tools/lib/bpf]=src \ [tools/include/uapi/linux/bpf_common.h]=include/uapi/linux/bpf_common.h \ [tools/include/uapi/linux/bpf.h]=include/uapi/linux/bpf.h \ [tools/include/uapi/linux/btf.h]=include/uapi/linux/btf.h \ [tools/include/uapi/linux/fcntl.h]=include/uapi/linux/fcntl.h \ [tools/include/uapi/linux/openat2.h]=include/uapi/linux/openat2.h \ [tools/include/uapi/linux/if_link.h]=include/uapi/linux/if_link.h \ [tools/include/uapi/linux/if_xdp.h]=include/uapi/linux/if_xdp.h \ [tools/include/uapi/linux/netdev.h]=include/uapi/linux/netdev.h \ [tools/include/uapi/linux/netlink.h]=include/uapi/linux/netlink.h \ [tools/include/uapi/linux/pkt_cls.h]=include/uapi/linux/pkt_cls.h \ [tools/include/uapi/linux/pkt_sched.h]=include/uapi/linux/pkt_sched.h \ [include/uapi/linux/perf_event.h]=include/uapi/linux/perf_event.h \ [Documentation/bpf/libbpf]=docs \ ) LIBBPF_PATHS=("${!PATH_MAP[@]}" ":^tools/lib/bpf/Makefile" ":^tools/lib/bpf/Build" ":^tools/lib/bpf/.gitignore" ":^tools/include/tools/libc_compat.h") LIBBPF_VIEW_PATHS=("${PATH_MAP[@]}") LIBBPF_VIEW_EXCLUDE_REGEX='^src/(Makefile|Build|test_libbpf\.c|bpf_helper_defs\.h|\.gitignore)$|^docs/(\.gitignore|api\.rst|conf\.py)$|^docs/sphinx/.*' LINUX_VIEW_EXCLUDE_REGEX='^include/tools/libc_compat.h$' LIBBPF_TREE_FILTER="mkdir -p __libbpf/include/uapi/linux __libbpf/include/tools && "$'\\\n' for p in "${!PATH_MAP[@]}"; do LIBBPF_TREE_FILTER+="git mv -kf ${p} __libbpf/${PATH_MAP[${p}]} && "$'\\\n' done LIBBPF_TREE_FILTER+="git rm --ignore-unmatch -f __libbpf/src/{Makefile,Build,test_libbpf.c,.gitignore} >/dev/null" cd_to() { cd ${WORKDIR} && cd "$1" } # Output brief single-line commit description # $1 - commit ref commit_desc() { git log -n1 --pretty='%h ("%s")' $1 } # Create commit single-line signature, which consists of: # - full commit subject # - author date in ISO8601 format # - full commit body with newlines replaced with vertical bars (|) # - shortstat appended at the end # The idea is that this single-line signature is good enough to make final # decision about whether two commits are the same, across different repos. # $1 - commit ref # $2 - paths filter commit_signature() { local ref=$1 shift git show --pretty='("%s")|%aI|%b' --shortstat $ref -- "${@-.}" | tr '\n' '|' } # Cherry-pick commits touching libbpf-related files # $1 - baseline_tag # $2 - tip_tag cherry_pick_commits() { local manual_mode=${MANUAL_MODE:-0} local baseline_tag=$1 local tip_tag=$2 local new_commits local signature local should_skip local synced_cnt local manual_check local libbpf_conflict_cnt local desc new_commits=$(git rev-list --no-merges --topo-order --reverse ${baseline_tag}..${tip_tag} -- "${LIBBPF_PATHS[@]}") for new_commit in ${new_commits}; do desc="$(commit_desc ${new_commit})" signature="$(commit_signature ${new_commit} "${LIBBPF_PATHS[@]}")" synced_cnt=$(grep -F "${signature}" ${TMP_DIR}/libbpf_commits.txt | wc -l) manual_check=0 if ((${synced_cnt} > 0)); then # commit with the same subject is already in libbpf, but it's # not 100% the same commit, so check with user echo "Commit '${desc}' is synced into libbpf as:" grep -F "${signature}" ${TMP_DIR}/libbpf_commits.txt | \ cut -d'|' -f1 | sed -e 's/^/- /' if ((${manual_mode} != 1 && ${synced_cnt} == 1)); then echo "Skipping '${desc}' due to unique match..." continue fi if ((${synced_cnt} > 1)); then echo "'${desc} matches multiple commits, please, double-check!" manual_check=1 fi fi if ((${manual_mode} == 1 || ${manual_check} == 1)); then read -p "Do you want to skip '${desc}'? [y/N]: " should_skip case "${should_skip}" in "y" | "Y") echo "Skipping '${desc}'..." continue ;; esac fi # commit hasn't been synced into libbpf yet echo "Picking '${desc}'..." if ! git cherry-pick ${new_commit} &>/dev/null; then echo "Warning! Cherry-picking '${desc} failed, checking if it's non-libbpf files causing problems..." libbpf_conflict_cnt=$(git diff --name-only --diff-filter=U -- "${LIBBPF_PATHS[@]}" | wc -l) conflict_cnt=$(git diff --name-only | wc -l) prompt_resolution=1 if ((${libbpf_conflict_cnt} == 0)); then echo "Looks like only non-libbpf files have conflicts, ignoring..." if ((${conflict_cnt} == 0)); then echo "Empty cherry-pick, skipping it..." git cherry-pick --abort continue fi git add . # GIT_EDITOR=true to avoid editor popping up to edit commit message if ! GIT_EDITOR=true git cherry-pick --continue &>/dev/null; then echo "Error! That still failed! Please resolve manually." else echo "Success! All cherry-pick conflicts were resolved for '${desc}'!" prompt_resolution=0 fi fi if ((${prompt_resolution} == 1)); then read -p "Error! Cherry-picking '${desc}' failed, please fix manually and press to proceed..." fi fi # Append signature of just cherry-picked commit to avoid # potentially cherry-picking the same commit twice later when # processing bpf tree commits. At this point we don't know yet # the final commit sha in libbpf repo, so we record Linux SHA # instead as LINUX_. echo LINUX_$(git log --pretty='%h' -n1) "${signature}" >> ${TMP_DIR}/libbpf_commits.txt done } cleanup() { echo "Cleaning up..." rm -r ${TMP_DIR} cd_to ${LINUX_REPO} git checkout ${TIP_SYM_REF} git branch -D ${BASELINE_TAG} ${TIP_TAG} ${BPF_BASELINE_TAG} ${BPF_TIP_TAG} \ ${SQUASH_BASE_TAG} ${SQUASH_TIP_TAG} ${VIEW_TAG} || true cd_to . echo "DONE." } cd_to ${LIBBPF_REPO} GITHUB_ABS_DIR=$(pwd) echo "Dumping existing libbpf commit signatures..." for h in $(git log --pretty='%h' -n500); do echo $h "$(commit_signature $h)" >> ${TMP_DIR}/libbpf_commits.txt done # Use current kernel repo HEAD as a source of patches cd_to ${LINUX_REPO} LINUX_ABS_DIR=$(pwd) TIP_SYM_REF=$(git symbolic-ref -q --short HEAD || git rev-parse HEAD) TIP_COMMIT=$(git rev-parse HEAD) BPF_TIP_COMMIT=$(git rev-parse ${BPF_BRANCH}) BASELINE_TAG=libbpf-baseline-${SUFFIX} TIP_TAG=libbpf-tip-${SUFFIX} BPF_BASELINE_TAG=libbpf-bpf-baseline-${SUFFIX} BPF_TIP_TAG=libbpf-bpf-tip-${SUFFIX} VIEW_TAG=libbpf-view-${SUFFIX} LIBBPF_SYNC_TAG=libbpf-sync-${SUFFIX} # Squash state of kernel repo at baseline into single commit SQUASH_BASE_TAG=libbpf-squash-base-${SUFFIX} SQUASH_TIP_TAG=libbpf-squash-tip-${SUFFIX} SQUASH_COMMIT=$(git commit-tree ${BASELINE_COMMIT}^{tree} -m "BASELINE SQUASH ${BASELINE_COMMIT}") echo "WORKDIR: ${WORKDIR}" echo "LINUX REPO: ${LINUX_REPO}" echo "LIBBPF REPO: ${LIBBPF_REPO}" echo "TEMP DIR: ${TMP_DIR}" echo "SUFFIX: ${SUFFIX}" echo "BASE COMMIT: '$(commit_desc ${BASELINE_COMMIT})'" echo "TIP COMMIT: '$(commit_desc ${TIP_COMMIT})'" echo "BPF BASE COMMIT: '$(commit_desc ${BPF_BASELINE_COMMIT})'" echo "BPF TIP COMMIT: '$(commit_desc ${BPF_TIP_COMMIT})'" echo "SQUASH COMMIT: ${SQUASH_COMMIT}" echo "BASELINE TAG: ${BASELINE_TAG}" echo "TIP TAG: ${TIP_TAG}" echo "BPF BASELINE TAG: ${BPF_BASELINE_TAG}" echo "BPF TIP TAG: ${BPF_TIP_TAG}" echo "SQUASH BASE TAG: ${SQUASH_BASE_TAG}" echo "SQUASH TIP TAG: ${SQUASH_TIP_TAG}" echo "VIEW TAG: ${VIEW_TAG}" echo "LIBBPF SYNC TAG: ${LIBBPF_SYNC_TAG}" echo "PATCHES: ${TMP_DIR}/patches" git branch ${BASELINE_TAG} ${BASELINE_COMMIT} git branch ${TIP_TAG} ${TIP_COMMIT} git branch ${BPF_BASELINE_TAG} ${BPF_BASELINE_COMMIT} git branch ${BPF_TIP_TAG} ${BPF_TIP_COMMIT} git branch ${SQUASH_BASE_TAG} ${SQUASH_COMMIT} git checkout -b ${SQUASH_TIP_TAG} ${SQUASH_COMMIT} # Cherry-pick new commits onto squashed baseline commit cherry_pick_commits ${BASELINE_TAG} ${TIP_TAG} cherry_pick_commits ${BPF_BASELINE_TAG} ${BPF_TIP_TAG} # Move all libbpf files into __libbpf directory. FILTER_BRANCH_SQUELCH_WARNING=1 git filter-branch --prune-empty -f --tree-filter "${LIBBPF_TREE_FILTER}" ${SQUASH_TIP_TAG} ${SQUASH_BASE_TAG} # Make __libbpf a new root directory FILTER_BRANCH_SQUELCH_WARNING=1 git filter-branch --prune-empty -f --subdirectory-filter __libbpf ${SQUASH_TIP_TAG} ${SQUASH_BASE_TAG} # If there are no new commits with libbpf-related changes, bail out COMMIT_CNT=$(git rev-list --count ${SQUASH_BASE_TAG}..${SQUASH_TIP_TAG}) if ((${COMMIT_CNT} <= 0)); then echo "No new changes to apply, we are done!" cleanup exit 2 fi # Exclude baseline commit and generate nice cover letter with summary git format-patch --no-signature ${SQUASH_BASE_TAG}..${SQUASH_TIP_TAG} --cover-letter -o ${TMP_DIR}/patches # Now is time to re-apply libbpf-related linux patches to libbpf repo cd_to ${LIBBPF_REPO} git checkout -b ${LIBBPF_SYNC_TAG} for patch in $(ls -1 ${TMP_DIR}/patches | tail -n +2); do if ! git am -3 --committer-date-is-author-date "${TMP_DIR}/patches/${patch}"; then if ! patch -p1 --merge < "${TMP_DIR}/patches/${patch}"; then read -p "Applying ${TMP_DIR}/patches/${patch} failed, please resolve manually and press to proceed..." fi git am --continue fi done # Generate bpf_helper_defs.h and commit, if anything changed # restore Linux tip to use bpf_doc.py cd_to ${LINUX_REPO} git checkout ${TIP_TAG} # re-generate bpf_helper_defs.h cd_to ${LIBBPF_REPO} "${LINUX_ABS_DIR}/scripts/bpf_doc.py" --header \ --file include/uapi/linux/bpf.h > src/bpf_helper_defs.h # if anything changed, commit it helpers_changes=$(git status --porcelain src/bpf_helper_defs.h | wc -l) if ((${helpers_changes} == 1)); then git add src/bpf_helper_defs.h git commit -s -m "sync: auto-generate latest BPF helpers Latest changes to BPF helper definitions. " -- src/bpf_helper_defs.h fi echo "Regenerating .mailmap..." cd_to "${LINUX_REPO}" git checkout "${TIP_SYM_REF}" cd_to "${LIBBPF_REPO}" "${LIBBPF_REPO}"/scripts/mailmap-update.sh "${LIBBPF_REPO}" "${LINUX_REPO}" # if anything changed, commit it mailmap_changes=$(git status --porcelain .mailmap | wc -l) if ((${mailmap_changes} == 1)); then git add .mailmap git commit -s -m "sync: update .mailmap Update .mailmap based on libbpf's list of contributors and on the latest .mailmap version in the upstream repository. " -- .mailmap fi # Use generated cover-letter as a template for "sync commit" with # baseline and checkpoint commits from kernel repo (and leave summary # from cover letter intact, of course) echo ${TIP_COMMIT} > CHECKPOINT-COMMIT && \ echo ${BPF_TIP_COMMIT} > BPF-CHECKPOINT-COMMIT && \ git add CHECKPOINT-COMMIT && \ git add BPF-CHECKPOINT-COMMIT && \ awk '/\*\*\* BLURB HERE \*\*\*/ {p=1} p' ${TMP_DIR}/patches/0000-cover-letter.patch | \ sed "s/\*\*\* BLURB HERE \*\*\*/\ sync: latest libbpf changes from kernel\n\ \n\ Syncing latest libbpf commits from kernel repository.\n\ Baseline bpf-next commit: ${BASELINE_COMMIT}\n\ Checkpoint bpf-next commit: ${TIP_COMMIT}\n\ Baseline bpf commit: ${BPF_BASELINE_COMMIT}\n\ Checkpoint bpf commit: ${BPF_TIP_COMMIT}/" | \ git commit -s --file=- echo "SUCCESS! ${COMMIT_CNT} commits synced." echo "Verifying Linux's and Github's libbpf state" cd_to ${LINUX_REPO} git checkout -b ${VIEW_TAG} ${TIP_COMMIT} FILTER_BRANCH_SQUELCH_WARNING=1 git filter-branch -f --tree-filter "${LIBBPF_TREE_FILTER}" ${VIEW_TAG}^..${VIEW_TAG} FILTER_BRANCH_SQUELCH_WARNING=1 git filter-branch -f --subdirectory-filter __libbpf ${VIEW_TAG}^..${VIEW_TAG} git ls-files -- "${LIBBPF_VIEW_PATHS[@]}" | grep -v -E "${LINUX_VIEW_EXCLUDE_REGEX}" > ${TMP_DIR}/linux-view.ls cd_to ${LIBBPF_REPO} git ls-files -- "${LIBBPF_VIEW_PATHS[@]}" | grep -v -E "${LIBBPF_VIEW_EXCLUDE_REGEX}" > ${TMP_DIR}/github-view.ls echo "Comparing list of files..." diff -u ${TMP_DIR}/linux-view.ls ${TMP_DIR}/github-view.ls echo "Comparing file contents..." CONSISTENT=1 for F in $(cat ${TMP_DIR}/linux-view.ls); do if ! diff -u "${LINUX_ABS_DIR}/${F}" "${GITHUB_ABS_DIR}/${F}"; then echo "${LINUX_ABS_DIR}/${F} and ${GITHUB_ABS_DIR}/${F} are different!" CONSISTENT=0 fi done if ((${CONSISTENT} == 1)); then echo "Great! Content is identical!" else ignore_inconsistency=n echo "Unfortunately, there are some inconsistencies, please double check." read -p "Does everything look good? [y/N]: " ignore_inconsistency case "${ignore_inconsistency}" in "y" | "Y") echo "Ok, proceeding..." ;; *) echo "Oops, exiting with error..." exit 4 esac fi cleanup xdp-tools-1.5.4/lib/libbpf/scripts/build-fuzzers.sh0000755000175100001660000000611214706536574021675 0ustar runnerdocker#!/bin/bash set -eux SANITIZER=${SANITIZER:-address} flags="-O1 -fno-omit-frame-pointer -g -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=$SANITIZER -fsanitize=fuzzer-no-link" export CC=${CC:-clang} export CFLAGS=${CFLAGS:-$flags} export CXX=${CXX:-clang++} export CXXFLAGS=${CXXFLAGS:-$flags} cd "$(dirname -- "$0")/.." export OUT=${OUT:-"$(pwd)/out"} mkdir -p "$OUT" export LIB_FUZZING_ENGINE=${LIB_FUZZING_ENGINE:--fsanitize=fuzzer} # libelf is compiled with _FORTIFY_SOURCE by default and it # isn't compatible with MSan. It was borrowed # from https://github.com/google/oss-fuzz/pull/7422 if [[ "$SANITIZER" == memory ]]; then CFLAGS+=" -U_FORTIFY_SOURCE" CXXFLAGS+=" -U_FORTIFY_SOURCE" fi # The alignment check is turned off by default on OSS-Fuzz/CFLite so it should be # turned on explicitly there. It was borrowed from # https://github.com/google/oss-fuzz/pull/7092 if [[ "$SANITIZER" == undefined ]]; then additional_ubsan_checks=alignment UBSAN_FLAGS="-fsanitize=$additional_ubsan_checks -fno-sanitize-recover=$additional_ubsan_checks" CFLAGS+=" $UBSAN_FLAGS" CXXFLAGS+=" $UBSAN_FLAGS" fi # Ideally libbelf should be built using release tarballs available # at https://sourceware.org/elfutils/ftp/. Unfortunately sometimes they # fail to compile (for example, elfutils-0.185 fails to compile with LDFLAGS enabled # due to https://bugs.gentoo.org/794601) so let's just point the script to # commits referring to versions of libelf that actually can be built rm -rf elfutils git clone https://sourceware.org/git/elfutils.git ( cd elfutils git checkout 67a187d4c1790058fc7fd218317851cb68bb087c git log --oneline -1 # ASan isn't compatible with -Wl,--no-undefined: https://github.com/google/sanitizers/issues/380 sed -i 's/^\(NO_UNDEFINED=\).*/\1/' configure.ac # ASan isn't compatible with -Wl,-z,defs either: # https://clang.llvm.org/docs/AddressSanitizer.html#usage sed -i 's/^\(ZDEFS_LDFLAGS=\).*/\1/' configure.ac if [[ "$SANITIZER" == undefined ]]; then # That's basicaly what --enable-sanitize-undefined does to turn off unaligned access # elfutils heavily relies on on i386/x86_64 but without changing compiler flags along the way sed -i 's/\(check_undefined_val\)=[0-9]/\1=1/' configure.ac fi autoreconf -i -f if ! ./configure --enable-maintainer-mode --disable-debuginfod --disable-libdebuginfod \ --disable-demangler --without-bzlib --without-lzma --without-zstd \ CC="$CC" CFLAGS="-Wno-error $CFLAGS" CXX="$CXX" CXXFLAGS="-Wno-error $CXXFLAGS" LDFLAGS="$CFLAGS"; then cat config.log exit 1 fi make -C config -j$(nproc) V=1 make -C lib -j$(nproc) V=1 make -C libelf -j$(nproc) V=1 ) make -C src BUILD_STATIC_ONLY=y V=1 clean make -C src -j$(nproc) CFLAGS="-I$(pwd)/elfutils/libelf $CFLAGS" BUILD_STATIC_ONLY=y V=1 $CC $CFLAGS -Isrc -Iinclude -Iinclude/uapi -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -c fuzz/bpf-object-fuzzer.c -o bpf-object-fuzzer.o $CXX $CXXFLAGS $LIB_FUZZING_ENGINE bpf-object-fuzzer.o src/libbpf.a "$(pwd)/elfutils/libelf/libelf.a" -l:libz.a -o "$OUT/bpf-object-fuzzer" cp fuzz/bpf-object-fuzzer_seed_corpus.zip "$OUT" xdp-tools-1.5.4/lib/libbpf/assets/0000755000175100001660000000000014706536574016344 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/fuzz/0000755000175100001660000000000014706536574016040 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/fuzz/bpf-object-fuzzer.c0000644000175100001660000000076014706536574021545 0ustar runnerdocker#include "libbpf.h" static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) { return 0; } int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { struct bpf_object *obj = NULL; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); int err; libbpf_set_print(libbpf_print_fn); opts.object_name = "fuzz-object"; obj = bpf_object__open_mem(data, size, &opts); err = libbpf_get_error(obj); if (err) return 0; bpf_object__close(obj); return 0; } xdp-tools-1.5.4/lib/libbpf/fuzz/bpf-object-fuzzer_seed_corpus.zip0000644000175100001660000000210314706536574024511 0ustar runnerdockerPKv6TIB @ minimal.bpf.oUT WaWaux UoP?狤_P%d@V-vU`CBEm@8U;l- $$` 6bafaA xgߋ.t~w~w E! Er:wF sU:.Vd1zC㏩]nj~eA?W*"ΗUߵu5jm9jiiA=X7Ɖ7:MDkd&s*\ ۑ/ - fm\~?B< $7qxv+w9a=1v#@̞%;*:XMFjoֽ bضf6}e7]7AoSa]U.k˭uiۦWtjZakA@PVU=r>\w]q+VujZ=uJ=2;)twRu6arHk<L;Pi.0vfy%tV/ AqlJ5ƥ=4U5|lb6=3R*m.R&p[wƎclrtu+ DgIvxb}+S r=l(]˾}[oA d~./'?t/޵6Z%,=}]ȑ__Ow+qM1l/K6{< -PKv6TIB @ minimal.bpf.oUTWaux PKSxdp-tools-1.5.4/lib/libbpf/.gitattributes0000644000175100001660000000003014706536574017726 0ustar runnerdockerassets/** export-ignore xdp-tools-1.5.4/lib/libbpf/src/0000755000175100001660000000000014706536574015631 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/src/bpf_core_read.h0000644000175100001660000005362014706536574020562 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __BPF_CORE_READ_H__ #define __BPF_CORE_READ_H__ #include "bpf_helpers.h" /* * enum bpf_field_info_kind is passed as a second argument into * __builtin_preserve_field_info() built-in to get a specific aspect of * a field, captured as a first argument. __builtin_preserve_field_info(field, * info_kind) returns __u32 integer and produces BTF field relocation, which * is understood and processed by libbpf during BPF object loading. See * selftests/bpf for examples. */ enum bpf_field_info_kind { BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ BPF_FIELD_BYTE_SIZE = 1, BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ BPF_FIELD_SIGNED = 3, BPF_FIELD_LSHIFT_U64 = 4, BPF_FIELD_RSHIFT_U64 = 5, }; /* second argument to __builtin_btf_type_id() built-in */ enum bpf_type_id_kind { BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */ BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */ }; /* second argument to __builtin_preserve_type_info() built-in */ enum bpf_type_info_kind { BPF_TYPE_EXISTS = 0, /* type existence in target kernel */ BPF_TYPE_SIZE = 1, /* type size in target kernel */ BPF_TYPE_MATCHES = 2, /* type match in target kernel */ }; /* second argument to __builtin_preserve_enum_value() built-in */ enum bpf_enum_value_kind { BPF_ENUMVAL_EXISTS = 0, /* enum value existence in kernel */ BPF_ENUMVAL_VALUE = 1, /* enum value value relocation */ }; #define __CORE_RELO(src, field, info) \ __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ bpf_probe_read_kernel( \ (void *)dst, \ __CORE_RELO(src, fld, BYTE_SIZE), \ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) #else /* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so * for big-endian we need to adjust destination pointer accordingly, based on * field byte size */ #define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ bpf_probe_read_kernel( \ (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ __CORE_RELO(src, fld, BYTE_SIZE), \ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) #endif /* * Extract bitfield, identified by s->field, and return its value as u64. * All this is done in relocatable manner, so bitfield changes such as * signedness, bit size, offset changes, this will be handled automatically. * This version of macro is using bpf_probe_read_kernel() to read underlying * integer storage. Macro functions as an expression and its return type is * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error. */ #define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \ unsigned long long val = 0; \ \ __CORE_BITFIELD_PROBE_READ(&val, s, field); \ val <<= __CORE_RELO(s, field, LSHIFT_U64); \ if (__CORE_RELO(s, field, SIGNED)) \ val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ else \ val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ val; \ }) /* * Extract bitfield, identified by s->field, and return its value as u64. * This version of macro is using direct memory reads and should be used from * BPF program types that support such functionality (e.g., typed raw * tracepoints). */ #define BPF_CORE_READ_BITFIELD(s, field) ({ \ const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ unsigned long long val; \ \ /* This is a so-called barrier_var() operation that makes specified \ * variable "a black box" for optimizing compiler. \ * It forces compiler to perform BYTE_OFFSET relocation on p and use \ * its calculated value in the switch below, instead of applying \ * the same relocation 4 times for each individual memory load. \ */ \ asm volatile("" : "=r"(p) : "0"(p)); \ \ switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ case 1: val = *(const unsigned char *)p; break; \ case 2: val = *(const unsigned short *)p; break; \ case 4: val = *(const unsigned int *)p; break; \ case 8: val = *(const unsigned long long *)p; break; \ default: val = 0; break; \ } \ val <<= __CORE_RELO(s, field, LSHIFT_U64); \ if (__CORE_RELO(s, field, SIGNED)) \ val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ else \ val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ val; \ }) /* * Write to a bitfield, identified by s->field. * This is the inverse of BPF_CORE_WRITE_BITFIELD(). */ #define BPF_CORE_WRITE_BITFIELD(s, field, new_val) ({ \ void *p = (void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ unsigned int byte_size = __CORE_RELO(s, field, BYTE_SIZE); \ unsigned int lshift = __CORE_RELO(s, field, LSHIFT_U64); \ unsigned int rshift = __CORE_RELO(s, field, RSHIFT_U64); \ unsigned long long mask, val, nval = new_val; \ unsigned int rpad = rshift - lshift; \ \ asm volatile("" : "+r"(p)); \ \ switch (byte_size) { \ case 1: val = *(unsigned char *)p; break; \ case 2: val = *(unsigned short *)p; break; \ case 4: val = *(unsigned int *)p; break; \ case 8: val = *(unsigned long long *)p; break; \ } \ \ mask = (~0ULL << rshift) >> lshift; \ val = (val & ~mask) | ((nval << rpad) & mask); \ \ switch (byte_size) { \ case 1: *(unsigned char *)p = val; break; \ case 2: *(unsigned short *)p = val; break; \ case 4: *(unsigned int *)p = val; break; \ case 8: *(unsigned long long *)p = val; break; \ } \ }) /* Differentiator between compilers builtin implementations. This is a * requirement due to the compiler parsing differences where GCC optimizes * early in parsing those constructs of type pointers to the builtin specific * type, resulting in not being possible to collect the required type * information in the builtin expansion. */ #ifdef __clang__ #define ___bpf_typeof(type) ((typeof(type) *) 0) #else #define ___bpf_typeof1(type, NR) ({ \ extern typeof(type) *___concat(bpf_type_tmp_, NR); \ ___concat(bpf_type_tmp_, NR); \ }) #define ___bpf_typeof(type) ___bpf_typeof1(type, __COUNTER__) #endif #ifdef __clang__ #define ___bpf_field_ref1(field) (field) #define ___bpf_field_ref2(type, field) (___bpf_typeof(type)->field) #else #define ___bpf_field_ref1(field) (&(field)) #define ___bpf_field_ref2(type, field) (&(___bpf_typeof(type)->field)) #endif #define ___bpf_field_ref(args...) \ ___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args) /* * Convenience macro to check that field actually exists in target kernel's. * Returns: * 1, if matching field is present in target kernel; * 0, if no matching field found. * * Supports two forms: * - field reference through variable access: * bpf_core_field_exists(p->my_field); * - field reference through type and field names: * bpf_core_field_exists(struct my_type, my_field). */ #define bpf_core_field_exists(field...) \ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_EXISTS) /* * Convenience macro to get the byte size of a field. Works for integers, * struct/unions, pointers, arrays, and enums. * * Supports two forms: * - field reference through variable access: * bpf_core_field_size(p->my_field); * - field reference through type and field names: * bpf_core_field_size(struct my_type, my_field). */ #define bpf_core_field_size(field...) \ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_SIZE) /* * Convenience macro to get field's byte offset. * * Supports two forms: * - field reference through variable access: * bpf_core_field_offset(p->my_field); * - field reference through type and field names: * bpf_core_field_offset(struct my_type, my_field). */ #define bpf_core_field_offset(field...) \ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_OFFSET) /* * Convenience macro to get BTF type ID of a specified type, using a local BTF * information. Return 32-bit unsigned integer with type ID from program's own * BTF. Always succeeds. */ #define bpf_core_type_id_local(type) \ __builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_LOCAL) /* * Convenience macro to get BTF type ID of a target kernel's type that matches * specified local type. * Returns: * - valid 32-bit unsigned type ID in kernel BTF; * - 0, if no matching type was found in a target kernel BTF. */ #define bpf_core_type_id_kernel(type) \ __builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_TARGET) /* * Convenience macro to check that provided named type * (struct/union/enum/typedef) exists in a target kernel. * Returns: * 1, if such type is present in target kernel's BTF; * 0, if no matching type is found. */ #define bpf_core_type_exists(type) \ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_EXISTS) /* * Convenience macro to check that provided named type * (struct/union/enum/typedef) "matches" that in a target kernel. * Returns: * 1, if the type matches in the target kernel's BTF; * 0, if the type does not match any in the target kernel */ #define bpf_core_type_matches(type) \ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_MATCHES) /* * Convenience macro to get the byte size of a provided named type * (struct/union/enum/typedef) in a target kernel. * Returns: * >= 0 size (in bytes), if type is present in target kernel's BTF; * 0, if no matching type is found. */ #define bpf_core_type_size(type) \ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_SIZE) /* * Convenience macro to check that provided enumerator value is defined in * a target kernel. * Returns: * 1, if specified enum type and its enumerator value are present in target * kernel's BTF; * 0, if no matching enum and/or enum value within that enum is found. */ #ifdef __clang__ #define bpf_core_enum_value_exists(enum_type, enum_value) \ __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS) #else #define bpf_core_enum_value_exists(enum_type, enum_value) \ __builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_EXISTS) #endif /* * Convenience macro to get the integer value of an enumerator value in * a target kernel. * Returns: * 64-bit value, if specified enum type and its enumerator value are * present in target kernel's BTF; * 0, if no matching enum and/or enum value within that enum is found. */ #ifdef __clang__ #define bpf_core_enum_value(enum_type, enum_value) \ __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE) #else #define bpf_core_enum_value(enum_type, enum_value) \ __builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_VALUE) #endif /* * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures * offset relocation for source address using __builtin_preserve_access_index() * built-in, provided by Clang. * * __builtin_preserve_access_index() takes as an argument an expression of * taking an address of a field within struct/union. It makes compiler emit * a relocation, which records BTF type ID describing root struct/union and an * accessor string which describes exact embedded field that was used to take * an address. See detailed description of this relocation format and * semantics in comments to struct bpf_core_relo in include/uapi/linux/bpf.h. * * This relocation allows libbpf to adjust BPF instruction to use correct * actual field offset, based on target kernel BTF type that matches original * (local) BTF, used to record relocation. */ #define bpf_core_read(dst, sz, src) \ bpf_probe_read_kernel(dst, sz, (const void *)__builtin_preserve_access_index(src)) /* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ #define bpf_core_read_user(dst, sz, src) \ bpf_probe_read_user(dst, sz, (const void *)__builtin_preserve_access_index(src)) /* * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str() * additionally emitting BPF CO-RE field relocation for specified source * argument. */ #define bpf_core_read_str(dst, sz, src) \ bpf_probe_read_kernel_str(dst, sz, (const void *)__builtin_preserve_access_index(src)) /* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ #define bpf_core_read_user_str(dst, sz, src) \ bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src)) extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak; /* * Cast provided pointer *ptr* into a pointer to a specified *type* in such * a way that BPF verifier will become aware of associated kernel-side BTF * type. This allows to access members of kernel types directly without the * need to use BPF_CORE_READ() macros. */ #define bpf_core_cast(ptr, type) \ ((typeof(type) *)bpf_rdonly_cast((ptr), bpf_core_type_id_kernel(type))) #define ___concat(a, b) a ## b #define ___apply(fn, n) ___concat(fn, n) #define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N /* * return number of provided arguments; used for switch-based variadic macro * definitions (see ___last, ___arrow, etc below) */ #define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) /* * return 0 if no arguments are passed, N - otherwise; used for * recursively-defined macros to specify termination (0) case, and generic * (N) case (e.g., ___read_ptrs, ___core_read) */ #define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) #define ___last1(x) x #define ___last2(a, x) x #define ___last3(a, b, x) x #define ___last4(a, b, c, x) x #define ___last5(a, b, c, d, x) x #define ___last6(a, b, c, d, e, x) x #define ___last7(a, b, c, d, e, f, x) x #define ___last8(a, b, c, d, e, f, g, x) x #define ___last9(a, b, c, d, e, f, g, h, x) x #define ___last10(a, b, c, d, e, f, g, h, i, x) x #define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__) #define ___nolast2(a, _) a #define ___nolast3(a, b, _) a, b #define ___nolast4(a, b, c, _) a, b, c #define ___nolast5(a, b, c, d, _) a, b, c, d #define ___nolast6(a, b, c, d, e, _) a, b, c, d, e #define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f #define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g #define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h #define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i #define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__) #define ___arrow1(a) a #define ___arrow2(a, b) a->b #define ___arrow3(a, b, c) a->b->c #define ___arrow4(a, b, c, d) a->b->c->d #define ___arrow5(a, b, c, d, e) a->b->c->d->e #define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f #define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g #define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h #define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i #define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j #define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) #define ___type(...) typeof(___arrow(__VA_ARGS__)) #define ___read(read_fn, dst, src_type, src, accessor) \ read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) /* "recursively" read a sequence of inner pointers using local __t var */ #define ___rd_first(fn, src, a) ___read(fn, &__t, ___type(src), src, a); #define ___rd_last(fn, ...) \ ___read(fn, &__t, ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__)); #define ___rd_p1(fn, ...) const void *__t; ___rd_first(fn, __VA_ARGS__) #define ___rd_p2(fn, ...) ___rd_p1(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) #define ___rd_p3(fn, ...) ___rd_p2(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) #define ___rd_p4(fn, ...) ___rd_p3(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) #define ___rd_p5(fn, ...) ___rd_p4(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) #define ___rd_p6(fn, ...) ___rd_p5(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) #define ___rd_p7(fn, ...) ___rd_p6(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) #define ___rd_p8(fn, ...) ___rd_p7(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) #define ___rd_p9(fn, ...) ___rd_p8(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) #define ___read_ptrs(fn, src, ...) \ ___apply(___rd_p, ___narg(__VA_ARGS__))(fn, src, __VA_ARGS__) #define ___core_read0(fn, fn_ptr, dst, src, a) \ ___read(fn, dst, ___type(src), src, a); #define ___core_readN(fn, fn_ptr, dst, src, ...) \ ___read_ptrs(fn_ptr, src, ___nolast(__VA_ARGS__)) \ ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \ ___last(__VA_ARGS__)); #define ___core_read(fn, fn_ptr, dst, src, a, ...) \ ___apply(___core_read, ___empty(__VA_ARGS__))(fn, fn_ptr, dst, \ src, a, ##__VA_ARGS__) /* * BPF_CORE_READ_INTO() is a more performance-conscious variant of * BPF_CORE_READ(), in which final field is read into user-provided storage. * See BPF_CORE_READ() below for more details on general usage. */ #define BPF_CORE_READ_INTO(dst, src, a, ...) ({ \ ___core_read(bpf_core_read, bpf_core_read, \ dst, (src), a, ##__VA_ARGS__) \ }) /* * Variant of BPF_CORE_READ_INTO() for reading from user-space memory. * * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ #define BPF_CORE_READ_USER_INTO(dst, src, a, ...) ({ \ ___core_read(bpf_core_read_user, bpf_core_read_user, \ dst, (src), a, ##__VA_ARGS__) \ }) /* Non-CO-RE variant of BPF_CORE_READ_INTO() */ #define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \ ___core_read(bpf_probe_read_kernel, bpf_probe_read_kernel, \ dst, (src), a, ##__VA_ARGS__) \ }) /* Non-CO-RE variant of BPF_CORE_READ_USER_INTO(). * * As no CO-RE relocations are emitted, source types can be arbitrary and are * not restricted to kernel types only. */ #define BPF_PROBE_READ_USER_INTO(dst, src, a, ...) ({ \ ___core_read(bpf_probe_read_user, bpf_probe_read_user, \ dst, (src), a, ##__VA_ARGS__) \ }) /* * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as * BPF_CORE_READ() for intermediate pointers, but then executes (and returns * corresponding error code) bpf_core_read_str() for final string read. */ #define BPF_CORE_READ_STR_INTO(dst, src, a, ...) ({ \ ___core_read(bpf_core_read_str, bpf_core_read, \ dst, (src), a, ##__VA_ARGS__) \ }) /* * Variant of BPF_CORE_READ_STR_INTO() for reading from user-space memory. * * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ #define BPF_CORE_READ_USER_STR_INTO(dst, src, a, ...) ({ \ ___core_read(bpf_core_read_user_str, bpf_core_read_user, \ dst, (src), a, ##__VA_ARGS__) \ }) /* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */ #define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \ ___core_read(bpf_probe_read_kernel_str, bpf_probe_read_kernel, \ dst, (src), a, ##__VA_ARGS__) \ }) /* * Non-CO-RE variant of BPF_CORE_READ_USER_STR_INTO(). * * As no CO-RE relocations are emitted, source types can be arbitrary and are * not restricted to kernel types only. */ #define BPF_PROBE_READ_USER_STR_INTO(dst, src, a, ...) ({ \ ___core_read(bpf_probe_read_user_str, bpf_probe_read_user, \ dst, (src), a, ##__VA_ARGS__) \ }) /* * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially * when there are few pointer chasing steps. * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like: * int x = s->a.b.c->d.e->f->g; * can be succinctly achieved using BPF_CORE_READ as: * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g); * * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically * equivalent to: * 1. const void *__t = s->a.b.c; * 2. __t = __t->d.e; * 3. __t = __t->f; * 4. return __t->g; * * Equivalence is logical, because there is a heavy type casting/preservation * involved, as well as all the reads are happening through * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to * emit CO-RE relocations. * * N.B. Only up to 9 "field accessors" are supported, which should be more * than enough for any practical purpose. */ #define BPF_CORE_READ(src, a, ...) ({ \ ___type((src), a, ##__VA_ARGS__) __r; \ BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ __r; \ }) /* * Variant of BPF_CORE_READ() for reading from user-space memory. * * NOTE: all the source types involved are still *kernel types* and need to * exist in kernel (or kernel module) BTF, otherwise CO-RE relocation will * fail. Custom user types are not relocatable with CO-RE. * The typical situation in which BPF_CORE_READ_USER() might be used is to * read kernel UAPI types from the user-space memory passed in as a syscall * input argument. */ #define BPF_CORE_READ_USER(src, a, ...) ({ \ ___type((src), a, ##__VA_ARGS__) __r; \ BPF_CORE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \ __r; \ }) /* Non-CO-RE variant of BPF_CORE_READ() */ #define BPF_PROBE_READ(src, a, ...) ({ \ ___type((src), a, ##__VA_ARGS__) __r; \ BPF_PROBE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ __r; \ }) /* * Non-CO-RE variant of BPF_CORE_READ_USER(). * * As no CO-RE relocations are emitted, source types can be arbitrary and are * not restricted to kernel types only. */ #define BPF_PROBE_READ_USER(src, a, ...) ({ \ ___type((src), a, ##__VA_ARGS__) __r; \ BPF_PROBE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \ __r; \ }) #endif xdp-tools-1.5.4/lib/libbpf/src/ringbuf.c0000644000175100001660000003755514706536574017450 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * Ring buffer operations. * * Copyright (C) 2020 Facebook, Inc. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include "libbpf.h" #include "libbpf_internal.h" #include "bpf.h" struct ring { ring_buffer_sample_fn sample_cb; void *ctx; void *data; unsigned long *consumer_pos; unsigned long *producer_pos; unsigned long mask; int map_fd; }; struct ring_buffer { struct epoll_event *events; struct ring **rings; size_t page_size; int epoll_fd; int ring_cnt; }; struct user_ring_buffer { struct epoll_event event; unsigned long *consumer_pos; unsigned long *producer_pos; void *data; unsigned long mask; size_t page_size; int map_fd; int epoll_fd; }; /* 8-byte ring buffer header structure */ struct ringbuf_hdr { __u32 len; __u32 pad; }; static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r) { if (r->consumer_pos) { munmap(r->consumer_pos, rb->page_size); r->consumer_pos = NULL; } if (r->producer_pos) { munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1)); r->producer_pos = NULL; } free(r); } /* Add extra RINGBUF maps to this ring buffer manager */ int ring_buffer__add(struct ring_buffer *rb, int map_fd, ring_buffer_sample_fn sample_cb, void *ctx) { struct bpf_map_info info; __u32 len = sizeof(info); struct epoll_event *e; struct ring *r; __u64 mmap_sz; void *tmp; int err; memset(&info, 0, sizeof(info)); err = bpf_map_get_info_by_fd(map_fd, &info, &len); if (err) { err = -errno; pr_warn("ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err); return libbpf_err(err); } if (info.type != BPF_MAP_TYPE_RINGBUF) { pr_warn("ringbuf: map fd=%d is not BPF_MAP_TYPE_RINGBUF\n", map_fd); return libbpf_err(-EINVAL); } tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); if (!tmp) return libbpf_err(-ENOMEM); rb->rings = tmp; tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); if (!tmp) return libbpf_err(-ENOMEM); rb->events = tmp; r = calloc(1, sizeof(*r)); if (!r) return libbpf_err(-ENOMEM); rb->rings[rb->ring_cnt] = r; r->map_fd = map_fd; r->sample_cb = sample_cb; r->ctx = ctx; r->mask = info.max_entries - 1; /* Map writable consumer page */ tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); if (tmp == MAP_FAILED) { err = -errno; pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n", map_fd, err); goto err_out; } r->consumer_pos = tmp; /* Map read-only producer page and data pages. We map twice as big * data size to allow simple reading of samples that wrap around the * end of a ring buffer. See kernel implementation for details. */ mmap_sz = rb->page_size + 2 * (__u64)info.max_entries; if (mmap_sz != (__u64)(size_t)mmap_sz) { err = -E2BIG; pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries); goto err_out; } tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size); if (tmp == MAP_FAILED) { err = -errno; pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n", map_fd, err); goto err_out; } r->producer_pos = tmp; r->data = tmp + rb->page_size; e = &rb->events[rb->ring_cnt]; memset(e, 0, sizeof(*e)); e->events = EPOLLIN; e->data.fd = rb->ring_cnt; if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) { err = -errno; pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err); goto err_out; } rb->ring_cnt++; return 0; err_out: ringbuf_free_ring(rb, r); return libbpf_err(err); } void ring_buffer__free(struct ring_buffer *rb) { int i; if (!rb) return; for (i = 0; i < rb->ring_cnt; ++i) ringbuf_free_ring(rb, rb->rings[i]); if (rb->epoll_fd >= 0) close(rb->epoll_fd); free(rb->events); free(rb->rings); free(rb); } struct ring_buffer * ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, const struct ring_buffer_opts *opts) { struct ring_buffer *rb; int err; if (!OPTS_VALID(opts, ring_buffer_opts)) return errno = EINVAL, NULL; rb = calloc(1, sizeof(*rb)); if (!rb) return errno = ENOMEM, NULL; rb->page_size = getpagesize(); rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (rb->epoll_fd < 0) { err = -errno; pr_warn("ringbuf: failed to create epoll instance: %d\n", err); goto err_out; } err = ring_buffer__add(rb, map_fd, sample_cb, ctx); if (err) goto err_out; return rb; err_out: ring_buffer__free(rb); return errno = -err, NULL; } static inline int roundup_len(__u32 len) { /* clear out top 2 bits (discard and busy, if set) */ len <<= 2; len >>= 2; /* add length prefix */ len += BPF_RINGBUF_HDR_SZ; /* round up to 8 byte alignment */ return (len + 7) / 8 * 8; } static int64_t ringbuf_process_ring(struct ring *r, size_t n) { int *len_ptr, len, err; /* 64-bit to avoid overflow in case of extreme application behavior */ int64_t cnt = 0; unsigned long cons_pos, prod_pos; bool got_new_data; void *sample; cons_pos = smp_load_acquire(r->consumer_pos); do { got_new_data = false; prod_pos = smp_load_acquire(r->producer_pos); while (cons_pos < prod_pos) { len_ptr = r->data + (cons_pos & r->mask); len = smp_load_acquire(len_ptr); /* sample not committed yet, bail out for now */ if (len & BPF_RINGBUF_BUSY_BIT) goto done; got_new_data = true; cons_pos += roundup_len(len); if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) { sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ; err = r->sample_cb(r->ctx, sample, len); if (err < 0) { /* update consumer pos and bail out */ smp_store_release(r->consumer_pos, cons_pos); return err; } cnt++; } smp_store_release(r->consumer_pos, cons_pos); if (cnt >= n) goto done; } } while (got_new_data); done: return cnt; } /* Consume available ring buffer(s) data without event polling, up to n * records. * * Returns number of records consumed across all registered ring buffers (or * n, whichever is less), or negative number if any of the callbacks return * error. */ int ring_buffer__consume_n(struct ring_buffer *rb, size_t n) { int64_t err, res = 0; int i; for (i = 0; i < rb->ring_cnt; i++) { struct ring *ring = rb->rings[i]; err = ringbuf_process_ring(ring, n); if (err < 0) return libbpf_err(err); res += err; n -= err; if (n == 0) break; } return res > INT_MAX ? INT_MAX : res; } /* Consume available ring buffer(s) data without event polling. * Returns number of records consumed across all registered ring buffers (or * INT_MAX, whichever is less), or negative number if any of the callbacks * return error. */ int ring_buffer__consume(struct ring_buffer *rb) { int64_t err, res = 0; int i; for (i = 0; i < rb->ring_cnt; i++) { struct ring *ring = rb->rings[i]; err = ringbuf_process_ring(ring, INT_MAX); if (err < 0) return libbpf_err(err); res += err; if (res > INT_MAX) { res = INT_MAX; break; } } return res; } /* Poll for available data and consume records, if any are available. * Returns number of records consumed (or INT_MAX, whichever is less), or * negative number, if any of the registered callbacks returned error. */ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms) { int i, cnt; int64_t err, res = 0; cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms); if (cnt < 0) return libbpf_err(-errno); for (i = 0; i < cnt; i++) { __u32 ring_id = rb->events[i].data.fd; struct ring *ring = rb->rings[ring_id]; err = ringbuf_process_ring(ring, INT_MAX); if (err < 0) return libbpf_err(err); res += err; } if (res > INT_MAX) res = INT_MAX; return res; } /* Get an fd that can be used to sleep until data is available in the ring(s) */ int ring_buffer__epoll_fd(const struct ring_buffer *rb) { return rb->epoll_fd; } struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx) { if (idx >= rb->ring_cnt) return errno = ERANGE, NULL; return rb->rings[idx]; } unsigned long ring__consumer_pos(const struct ring *r) { /* Synchronizes with smp_store_release() in ringbuf_process_ring(). */ return smp_load_acquire(r->consumer_pos); } unsigned long ring__producer_pos(const struct ring *r) { /* Synchronizes with smp_store_release() in __bpf_ringbuf_reserve() in * the kernel. */ return smp_load_acquire(r->producer_pos); } size_t ring__avail_data_size(const struct ring *r) { unsigned long cons_pos, prod_pos; cons_pos = ring__consumer_pos(r); prod_pos = ring__producer_pos(r); return prod_pos - cons_pos; } size_t ring__size(const struct ring *r) { return r->mask + 1; } int ring__map_fd(const struct ring *r) { return r->map_fd; } int ring__consume_n(struct ring *r, size_t n) { int64_t res; res = ringbuf_process_ring(r, n); if (res < 0) return libbpf_err(res); return res > INT_MAX ? INT_MAX : res; } int ring__consume(struct ring *r) { return ring__consume_n(r, INT_MAX); } static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb) { if (rb->consumer_pos) { munmap(rb->consumer_pos, rb->page_size); rb->consumer_pos = NULL; } if (rb->producer_pos) { munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1)); rb->producer_pos = NULL; } } void user_ring_buffer__free(struct user_ring_buffer *rb) { if (!rb) return; user_ringbuf_unmap_ring(rb); if (rb->epoll_fd >= 0) close(rb->epoll_fd); free(rb); } static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd) { struct bpf_map_info info; __u32 len = sizeof(info); __u64 mmap_sz; void *tmp; struct epoll_event *rb_epoll; int err; memset(&info, 0, sizeof(info)); err = bpf_map_get_info_by_fd(map_fd, &info, &len); if (err) { err = -errno; pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err); return err; } if (info.type != BPF_MAP_TYPE_USER_RINGBUF) { pr_warn("user ringbuf: map fd=%d is not BPF_MAP_TYPE_USER_RINGBUF\n", map_fd); return -EINVAL; } rb->map_fd = map_fd; rb->mask = info.max_entries - 1; /* Map read-only consumer page */ tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0); if (tmp == MAP_FAILED) { err = -errno; pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %d\n", map_fd, err); return err; } rb->consumer_pos = tmp; /* Map read-write the producer page and data pages. We map the data * region as twice the total size of the ring buffer to allow the * simple reading and writing of samples that wrap around the end of * the buffer. See the kernel implementation for details. */ mmap_sz = rb->page_size + 2 * (__u64)info.max_entries; if (mmap_sz != (__u64)(size_t)mmap_sz) { pr_warn("user ringbuf: ring buf size (%u) is too big\n", info.max_entries); return -E2BIG; } tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, rb->page_size); if (tmp == MAP_FAILED) { err = -errno; pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n", map_fd, err); return err; } rb->producer_pos = tmp; rb->data = tmp + rb->page_size; rb_epoll = &rb->event; rb_epoll->events = EPOLLOUT; if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) { err = -errno; pr_warn("user ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err); return err; } return 0; } struct user_ring_buffer * user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts) { struct user_ring_buffer *rb; int err; if (!OPTS_VALID(opts, user_ring_buffer_opts)) return errno = EINVAL, NULL; rb = calloc(1, sizeof(*rb)); if (!rb) return errno = ENOMEM, NULL; rb->page_size = getpagesize(); rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (rb->epoll_fd < 0) { err = -errno; pr_warn("user ringbuf: failed to create epoll instance: %d\n", err); goto err_out; } err = user_ringbuf_map(rb, map_fd); if (err) goto err_out; return rb; err_out: user_ring_buffer__free(rb); return errno = -err, NULL; } static void user_ringbuf_commit(struct user_ring_buffer *rb, void *sample, bool discard) { __u32 new_len; struct ringbuf_hdr *hdr; uintptr_t hdr_offset; hdr_offset = rb->mask + 1 + (sample - rb->data) - BPF_RINGBUF_HDR_SZ; hdr = rb->data + (hdr_offset & rb->mask); new_len = hdr->len & ~BPF_RINGBUF_BUSY_BIT; if (discard) new_len |= BPF_RINGBUF_DISCARD_BIT; /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in * the kernel. */ __atomic_exchange_n(&hdr->len, new_len, __ATOMIC_ACQ_REL); } void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample) { user_ringbuf_commit(rb, sample, true); } void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample) { user_ringbuf_commit(rb, sample, false); } void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size) { __u32 avail_size, total_size, max_size; /* 64-bit to avoid overflow in case of extreme application behavior */ __u64 cons_pos, prod_pos; struct ringbuf_hdr *hdr; /* The top two bits are used as special flags */ if (size & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT)) return errno = E2BIG, NULL; /* Synchronizes with smp_store_release() in __bpf_user_ringbuf_peek() in * the kernel. */ cons_pos = smp_load_acquire(rb->consumer_pos); /* Synchronizes with smp_store_release() in user_ringbuf_commit() */ prod_pos = smp_load_acquire(rb->producer_pos); max_size = rb->mask + 1; avail_size = max_size - (prod_pos - cons_pos); /* Round up total size to a multiple of 8. */ total_size = (size + BPF_RINGBUF_HDR_SZ + 7) / 8 * 8; if (total_size > max_size) return errno = E2BIG, NULL; if (avail_size < total_size) return errno = ENOSPC, NULL; hdr = rb->data + (prod_pos & rb->mask); hdr->len = size | BPF_RINGBUF_BUSY_BIT; hdr->pad = 0; /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in * the kernel. */ smp_store_release(rb->producer_pos, prod_pos + total_size); return (void *)rb->data + ((prod_pos + BPF_RINGBUF_HDR_SZ) & rb->mask); } static __u64 ns_elapsed_timespec(const struct timespec *start, const struct timespec *end) { __u64 start_ns, end_ns, ns_per_s = 1000000000; start_ns = (__u64)start->tv_sec * ns_per_s + start->tv_nsec; end_ns = (__u64)end->tv_sec * ns_per_s + end->tv_nsec; return end_ns - start_ns; } void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms) { void *sample; int err, ms_remaining = timeout_ms; struct timespec start; if (timeout_ms < 0 && timeout_ms != -1) return errno = EINVAL, NULL; if (timeout_ms != -1) { err = clock_gettime(CLOCK_MONOTONIC, &start); if (err) return NULL; } do { int cnt, ms_elapsed; struct timespec curr; __u64 ns_per_ms = 1000000; sample = user_ring_buffer__reserve(rb, size); if (sample) return sample; else if (errno != ENOSPC) return NULL; /* The kernel guarantees at least one event notification * delivery whenever at least one sample is drained from the * ring buffer in an invocation to bpf_ringbuf_drain(). Other * additional events may be delivered at any time, but only one * event is guaranteed per bpf_ringbuf_drain() invocation, * provided that a sample is drained, and the BPF program did * not pass BPF_RB_NO_WAKEUP to bpf_ringbuf_drain(). If * BPF_RB_FORCE_WAKEUP is passed to bpf_ringbuf_drain(), a * wakeup event will be delivered even if no samples are * drained. */ cnt = epoll_wait(rb->epoll_fd, &rb->event, 1, ms_remaining); if (cnt < 0) return NULL; if (timeout_ms == -1) continue; err = clock_gettime(CLOCK_MONOTONIC, &curr); if (err) return NULL; ms_elapsed = ns_elapsed_timespec(&start, &curr) / ns_per_ms; ms_remaining = timeout_ms - ms_elapsed; } while (ms_remaining > 0); /* Try one more time to reserve a sample after the specified timeout has elapsed. */ return user_ring_buffer__reserve(rb, size); } xdp-tools-1.5.4/lib/libbpf/src/btf_relocate.c0000644000175100001660000003446214706536574020437 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2024, Oracle and/or its affiliates. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifdef __KERNEL__ #include #include #include #include #include #include #define btf_type_by_id (struct btf_type *)btf_type_by_id #define btf__type_cnt btf_nr_types #define btf__base_btf btf_base_btf #define btf__name_by_offset btf_name_by_offset #define btf__str_by_offset btf_str_by_offset #define btf_kflag btf_type_kflag #define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN) #define free(ptr) kvfree(ptr) #define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL) #else #include "btf.h" #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" #endif /* __KERNEL__ */ struct btf; struct btf_relocate { struct btf *btf; const struct btf *base_btf; const struct btf *dist_base_btf; unsigned int nr_base_types; unsigned int nr_split_types; unsigned int nr_dist_base_types; int dist_str_len; int base_str_len; __u32 *id_map; __u32 *str_map; }; /* Set temporarily in relocation id_map if distilled base struct/union is * embedded in a split BTF struct/union; in such a case, size information must * match between distilled base BTF and base BTF representation of type. */ #define BTF_IS_EMBEDDED ((__u32)-1) /* triple used in sorting/searching distilled base BTF. */ struct btf_name_info { const char *name; /* set when search requires a size match */ bool needs_size: 1; unsigned int size: 31; __u32 id; }; static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i) { struct btf_type *t = btf_type_by_id(r->btf, i); struct btf_field_iter it; __u32 *id; int err; err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (err) return err; while ((id = btf_field_iter_next(&it))) *id = r->id_map[*id]; return 0; } /* Simple string comparison used for sorting within BTF, since all distilled * types are named. If strings match, and size is non-zero for both elements * fall back to using size for ordering. */ static int cmp_btf_name_size(const void *n1, const void *n2) { const struct btf_name_info *ni1 = n1; const struct btf_name_info *ni2 = n2; int name_diff = strcmp(ni1->name, ni2->name); if (!name_diff && ni1->needs_size && ni2->needs_size) return ni2->size - ni1->size; return name_diff; } /* Binary search with a small twist; find leftmost element that matches * so that we can then iterate through all exact matches. So for example * searching { "a", "bb", "bb", "c" } we would always match on the * leftmost "bb". */ static struct btf_name_info *search_btf_name_size(struct btf_name_info *key, struct btf_name_info *vals, int nelems) { struct btf_name_info *ret = NULL; int high = nelems - 1; int low = 0; while (low <= high) { int mid = (low + high)/2; struct btf_name_info *val = &vals[mid]; int diff = cmp_btf_name_size(key, val); if (diff == 0) ret = val; /* even if found, keep searching for leftmost match */ if (diff <= 0) high = mid - 1; else low = mid + 1; } return ret; } /* If a member of a split BTF struct/union refers to a base BTF * struct/union, mark that struct/union id temporarily in the id_map * with BTF_IS_EMBEDDED. Members can be const/restrict/volatile/typedef * reference types, but if a pointer is encountered, the type is no longer * considered embedded. */ static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i) { struct btf_type *t = btf_type_by_id(r->btf, i); struct btf_field_iter it; __u32 *id; int err; if (!btf_is_composite(t)) return 0; err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (err) return err; while ((id = btf_field_iter_next(&it))) { __u32 next_id = *id; while (next_id) { t = btf_type_by_id(r->btf, next_id); switch (btf_kind(t)) { case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VOLATILE: case BTF_KIND_TYPEDEF: case BTF_KIND_TYPE_TAG: next_id = t->type; break; case BTF_KIND_ARRAY: { struct btf_array *a = btf_array(t); next_id = a->type; break; } case BTF_KIND_STRUCT: case BTF_KIND_UNION: if (next_id < r->nr_dist_base_types) r->id_map[next_id] = BTF_IS_EMBEDDED; next_id = 0; break; default: next_id = 0; break; } } } return 0; } /* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate * through base BTF looking up distilled type (using binary search) equivalents. */ static int btf_relocate_map_distilled_base(struct btf_relocate *r) { struct btf_name_info *info, *info_end; struct btf_type *base_t, *dist_t; __u8 *base_name_cnt = NULL; int err = 0; __u32 id; /* generate a sort index array of name/type ids sorted by name for * distilled base BTF to speed name-based lookups. */ info = calloc(r->nr_dist_base_types, sizeof(*info)); if (!info) { err = -ENOMEM; goto done; } info_end = info + r->nr_dist_base_types; for (id = 0; id < r->nr_dist_base_types; id++) { dist_t = btf_type_by_id(r->dist_base_btf, id); info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off); info[id].id = id; info[id].size = dist_t->size; info[id].needs_size = true; } qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size); /* Mark distilled base struct/union members of split BTF structs/unions * in id_map with BTF_IS_EMBEDDED; this signals that these types * need to match both name and size, otherwise embedding the base * struct/union in the split type is invalid. */ for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) { err = btf_mark_embedded_composite_type_ids(r, id); if (err) goto done; } /* Collect name counts for composite types in base BTF. If multiple * instances of a struct/union of the same name exist, we need to use * size to determine which to map to since name alone is ambiguous. */ base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt)); if (!base_name_cnt) { err = -ENOMEM; goto done; } for (id = 1; id < r->nr_base_types; id++) { base_t = btf_type_by_id(r->base_btf, id); if (!btf_is_composite(base_t) || !base_t->name_off) continue; if (base_name_cnt[base_t->name_off] < 255) base_name_cnt[base_t->name_off]++; } /* Now search base BTF for matching distilled base BTF types. */ for (id = 1; id < r->nr_base_types; id++) { struct btf_name_info *dist_info, base_info = {}; int dist_kind, base_kind; base_t = btf_type_by_id(r->base_btf, id); /* distilled base consists of named types only. */ if (!base_t->name_off) continue; base_kind = btf_kind(base_t); base_info.id = id; base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off); switch (base_kind) { case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: /* These types should match both name and size */ base_info.needs_size = true; base_info.size = base_t->size; break; case BTF_KIND_FWD: /* No size considerations for fwds. */ break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: /* Size only needs to be used for struct/union if there * are multiple types in base BTF with the same name. * If there are multiple _distilled_ types with the same * name (a very unlikely scenario), that doesn't matter * unless corresponding _base_ types to match them are * missing. */ base_info.needs_size = base_name_cnt[base_t->name_off] > 1; base_info.size = base_t->size; break; default: continue; } /* iterate over all matching distilled base types */ for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types); dist_info != NULL && dist_info < info_end && cmp_btf_name_size(&base_info, dist_info) == 0; dist_info++) { if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) { pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n", id, dist_info->id); err = -EINVAL; goto done; } dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id); dist_kind = btf_kind(dist_t); /* Validate that the found distilled type is compatible. * Do not error out on mismatch as another match may * occur for an identically-named type. */ switch (dist_kind) { case BTF_KIND_FWD: switch (base_kind) { case BTF_KIND_FWD: if (btf_kflag(dist_t) != btf_kflag(base_t)) continue; break; case BTF_KIND_STRUCT: if (btf_kflag(base_t)) continue; break; case BTF_KIND_UNION: if (!btf_kflag(base_t)) continue; break; default: continue; } break; case BTF_KIND_INT: if (dist_kind != base_kind || btf_int_encoding(base_t) != btf_int_encoding(dist_t)) continue; break; case BTF_KIND_FLOAT: if (dist_kind != base_kind) continue; break; case BTF_KIND_ENUM: /* ENUM and ENUM64 are encoded as sized ENUM in * distilled base BTF. */ if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64) continue; break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: /* size verification is required for embedded * struct/unions. */ if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED && base_t->size != dist_t->size) continue; break; default: continue; } if (r->id_map[dist_info->id] && r->id_map[dist_info->id] != BTF_IS_EMBEDDED) { /* we already have a match; this tells us that * multiple base types of the same name * have the same size, since for cases where * multiple types have the same name we match * on name and size. In this case, we have * no way of determining which to relocate * to in base BTF, so error out. */ pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n", base_info.name, dist_info->id, base_t->size, id, r->id_map[dist_info->id]); err = -EINVAL; goto done; } /* map id and name */ r->id_map[dist_info->id] = id; r->str_map[dist_t->name_off] = base_t->name_off; } } /* ensure all distilled BTF ids now have a mapping... */ for (id = 1; id < r->nr_dist_base_types; id++) { const char *name; if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED) continue; dist_t = btf_type_by_id(r->dist_base_btf, id); name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off); pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n", name, id); err = -EINVAL; break; } done: free(base_name_cnt); free(info); return err; } /* distilled base should only have named int/float/enum/fwd/struct/union types. */ static int btf_relocate_validate_distilled_base(struct btf_relocate *r) { unsigned int i; for (i = 1; i < r->nr_dist_base_types; i++) { struct btf_type *t = btf_type_by_id(r->dist_base_btf, i); int kind = btf_kind(t); switch (kind) { case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_ENUM: case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_FWD: if (t->name_off) break; pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n", i, kind); return -EINVAL; default: pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n", i, kind); return -EINVAL; } } return 0; } static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i) { struct btf_type *t = btf_type_by_id(r->btf, i); struct btf_field_iter it; __u32 *str_off; int off, err; err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); if (err) return err; while ((str_off = btf_field_iter_next(&it))) { if (!*str_off) continue; if (*str_off >= r->dist_str_len) { *str_off += r->base_str_len - r->dist_str_len; } else { off = r->str_map[*str_off]; if (!off) { pr_warn("string '%s' [offset %u] is not mapped to base BTF\n", btf__str_by_offset(r->btf, off), *str_off); return -ENOENT; } *str_off = off; } } return 0; } /* If successful, output of relocation is updated BTF with base BTF pointing * at base_btf, and type ids, strings adjusted accordingly. */ int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map) { unsigned int nr_types = btf__type_cnt(btf); const struct btf_header *dist_base_hdr; const struct btf_header *base_hdr; struct btf_relocate r = {}; int err = 0; __u32 id, i; r.dist_base_btf = btf__base_btf(btf); if (!base_btf || r.dist_base_btf == base_btf) return -EINVAL; r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf); r.nr_base_types = btf__type_cnt(base_btf); r.nr_split_types = nr_types - r.nr_dist_base_types; r.btf = btf; r.base_btf = base_btf; r.id_map = calloc(nr_types, sizeof(*r.id_map)); r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map)); dist_base_hdr = btf_header(r.dist_base_btf); base_hdr = btf_header(r.base_btf); r.dist_str_len = dist_base_hdr->str_len; r.base_str_len = base_hdr->str_len; if (!r.id_map || !r.str_map) { err = -ENOMEM; goto err_out; } err = btf_relocate_validate_distilled_base(&r); if (err) goto err_out; /* Split BTF ids need to be adjusted as base and distilled base * have different numbers of types, changing the start id of split * BTF. */ for (id = r.nr_dist_base_types; id < nr_types; id++) r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types; /* Build a map from distilled base ids to actual base BTF ids; it is used * to update split BTF id references. Also build a str_map mapping from * distilled base BTF names to base BTF names. */ err = btf_relocate_map_distilled_base(&r); if (err) goto err_out; /* Next, rewrite type ids in split BTF, replacing split ids with updated * ids based on number of types in base BTF, and base ids with * relocated ids from base_btf. */ for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) { err = btf_relocate_rewrite_type_id(&r, id); if (err) goto err_out; } /* String offsets now need to be updated using the str_map. */ for (i = 0; i < r.nr_split_types; i++) { err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types); if (err) goto err_out; } /* Finally reset base BTF to be base_btf */ btf_set_base_btf(btf, base_btf); if (id_map) { *id_map = r.id_map; r.id_map = NULL; } err_out: free(r.id_map); free(r.str_map); return err; } xdp-tools-1.5.4/lib/libbpf/src/libbpf_version.h0000644000175100001660000000036214706536574021006 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (C) 2021 Facebook */ #ifndef __LIBBPF_VERSION_H #define __LIBBPF_VERSION_H #define LIBBPF_MAJOR_VERSION 1 #define LIBBPF_MINOR_VERSION 5 #endif /* __LIBBPF_VERSION_H */ xdp-tools-1.5.4/lib/libbpf/src/nlattr.h0000644000175100001660000001036014706536574017306 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * NETLINK Netlink attributes * * Copyright (c) 2003-2013 Thomas Graf */ #ifndef __LIBBPF_NLATTR_H #define __LIBBPF_NLATTR_H #include #include #include #include #include #include /* avoid multiple definition of netlink features */ #define __LINUX_NETLINK_H /** * Standard attribute types to specify validation policy */ enum { LIBBPF_NLA_UNSPEC, /**< Unspecified type, binary data chunk */ LIBBPF_NLA_U8, /**< 8 bit integer */ LIBBPF_NLA_U16, /**< 16 bit integer */ LIBBPF_NLA_U32, /**< 32 bit integer */ LIBBPF_NLA_U64, /**< 64 bit integer */ LIBBPF_NLA_STRING, /**< NUL terminated character string */ LIBBPF_NLA_FLAG, /**< Flag */ LIBBPF_NLA_MSECS, /**< Micro seconds (64bit) */ LIBBPF_NLA_NESTED, /**< Nested attributes */ __LIBBPF_NLA_TYPE_MAX, }; #define LIBBPF_NLA_TYPE_MAX (__LIBBPF_NLA_TYPE_MAX - 1) /** * @ingroup attr * Attribute validation policy. * * See section @core_doc{core_attr_parse,Attribute Parsing} for more details. */ struct libbpf_nla_policy { /** Type of attribute or LIBBPF_NLA_UNSPEC */ uint16_t type; /** Minimal length of payload required */ uint16_t minlen; /** Maximal length of payload allowed */ uint16_t maxlen; }; struct libbpf_nla_req { struct nlmsghdr nh; union { struct ifinfomsg ifinfo; struct tcmsg tc; struct genlmsghdr gnl; }; char buf[128]; }; /** * @ingroup attr * Iterate over a stream of attributes * @arg pos loop counter, set to current attribute * @arg head head of attribute stream * @arg len length of attribute stream * @arg rem initialized to len, holds bytes currently remaining in stream */ #define libbpf_nla_for_each_attr(pos, head, len, rem) \ for (pos = head, rem = len; \ nla_ok(pos, rem); \ pos = nla_next(pos, &(rem))) /** * libbpf_nla_data - head of payload * @nla: netlink attribute */ static inline void *libbpf_nla_data(const struct nlattr *nla) { return (void *)nla + NLA_HDRLEN; } static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla) { return *(uint8_t *)libbpf_nla_data(nla); } static inline uint16_t libbpf_nla_getattr_u16(const struct nlattr *nla) { return *(uint16_t *)libbpf_nla_data(nla); } static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla) { return *(uint32_t *)libbpf_nla_data(nla); } static inline uint64_t libbpf_nla_getattr_u64(const struct nlattr *nla) { return *(uint64_t *)libbpf_nla_data(nla); } static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla) { return (const char *)libbpf_nla_data(nla); } /** * libbpf_nla_len - length of payload * @nla: netlink attribute */ static inline int libbpf_nla_len(const struct nlattr *nla) { return nla->nla_len - NLA_HDRLEN; } int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct libbpf_nla_policy *policy); int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, struct libbpf_nla_policy *policy); int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh); static inline struct nlattr *nla_data(struct nlattr *nla) { return (struct nlattr *)((void *)nla + NLA_HDRLEN); } static inline struct nlattr *req_tail(struct libbpf_nla_req *req) { return (struct nlattr *)((void *)req + NLMSG_ALIGN(req->nh.nlmsg_len)); } static inline int nlattr_add(struct libbpf_nla_req *req, int type, const void *data, int len) { struct nlattr *nla; if (NLMSG_ALIGN(req->nh.nlmsg_len) + NLA_ALIGN(NLA_HDRLEN + len) > sizeof(*req)) return -EMSGSIZE; if (!!data != !!len) return -EINVAL; nla = req_tail(req); nla->nla_type = type; nla->nla_len = NLA_HDRLEN + len; if (data) memcpy(nla_data(nla), data, len); req->nh.nlmsg_len = NLMSG_ALIGN(req->nh.nlmsg_len) + NLA_ALIGN(nla->nla_len); return 0; } static inline struct nlattr *nlattr_begin_nested(struct libbpf_nla_req *req, int type) { struct nlattr *tail; tail = req_tail(req); if (nlattr_add(req, type | NLA_F_NESTED, NULL, 0)) return NULL; return tail; } static inline void nlattr_end_nested(struct libbpf_nla_req *req, struct nlattr *tail) { tail->nla_len = (void *)req_tail(req) - (void *)tail; } #endif /* __LIBBPF_NLATTR_H */ xdp-tools-1.5.4/lib/libbpf/src/libbpf.pc.template0000644000175100001660000000037414706536574021231 0ustar runnerdocker# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) prefix=@PREFIX@ libdir=@LIBDIR@ includedir=${prefix}/include Name: libbpf Description: BPF library Version: @VERSION@ Libs: -L${libdir} -lbpf Requires.private: libelf zlib Cflags: -I${includedir} xdp-tools-1.5.4/lib/libbpf/src/zip.c0000644000175100001660000002045314706536574016603 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * Routines for dealing with .zip archives. * * Copyright (c) Meta Platforms, Inc. and affiliates. */ #include #include #include #include #include #include #include #include "libbpf_internal.h" #include "zip.h" #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpacked" #pragma GCC diagnostic ignored "-Wattributes" /* Specification of ZIP file format can be found here: * https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT * For a high level overview of the structure of a ZIP file see * sections 4.3.1 - 4.3.6. * * Data structures appearing in ZIP files do not contain any * padding and they might be misaligned. To allow us to safely * operate on pointers to such structures and their members, we * declare the types as packed. */ #define END_OF_CD_RECORD_MAGIC 0x06054b50 /* See section 4.3.16 of the spec. */ struct end_of_cd_record { /* Magic value equal to END_OF_CD_RECORD_MAGIC */ __u32 magic; /* Number of the file containing this structure or 0xFFFF if ZIP64 archive. * Zip archive might span multiple files (disks). */ __u16 this_disk; /* Number of the file containing the beginning of the central directory or * 0xFFFF if ZIP64 archive. */ __u16 cd_disk; /* Number of central directory records on this disk or 0xFFFF if ZIP64 * archive. */ __u16 cd_records; /* Number of central directory records on all disks or 0xFFFF if ZIP64 * archive. */ __u16 cd_records_total; /* Size of the central directory record or 0xFFFFFFFF if ZIP64 archive. */ __u32 cd_size; /* Offset of the central directory from the beginning of the archive or * 0xFFFFFFFF if ZIP64 archive. */ __u32 cd_offset; /* Length of comment data following end of central directory record. */ __u16 comment_length; /* Up to 64k of arbitrary bytes. */ /* uint8_t comment[comment_length] */ } __attribute__((packed)); #define CD_FILE_HEADER_MAGIC 0x02014b50 #define FLAG_ENCRYPTED (1 << 0) #define FLAG_HAS_DATA_DESCRIPTOR (1 << 3) /* See section 4.3.12 of the spec. */ struct cd_file_header { /* Magic value equal to CD_FILE_HEADER_MAGIC. */ __u32 magic; __u16 version; /* Minimum zip version needed to extract the file. */ __u16 min_version; __u16 flags; __u16 compression; __u16 last_modified_time; __u16 last_modified_date; __u32 crc; __u32 compressed_size; __u32 uncompressed_size; __u16 file_name_length; __u16 extra_field_length; __u16 file_comment_length; /* Number of the disk where the file starts or 0xFFFF if ZIP64 archive. */ __u16 disk; __u16 internal_attributes; __u32 external_attributes; /* Offset from the start of the disk containing the local file header to the * start of the local file header. */ __u32 offset; } __attribute__((packed)); #define LOCAL_FILE_HEADER_MAGIC 0x04034b50 /* See section 4.3.7 of the spec. */ struct local_file_header { /* Magic value equal to LOCAL_FILE_HEADER_MAGIC. */ __u32 magic; /* Minimum zip version needed to extract the file. */ __u16 min_version; __u16 flags; __u16 compression; __u16 last_modified_time; __u16 last_modified_date; __u32 crc; __u32 compressed_size; __u32 uncompressed_size; __u16 file_name_length; __u16 extra_field_length; } __attribute__((packed)); #pragma GCC diagnostic pop struct zip_archive { void *data; __u32 size; __u32 cd_offset; __u32 cd_records; }; static void *check_access(struct zip_archive *archive, __u32 offset, __u32 size) { if (offset + size > archive->size || offset > offset + size) return NULL; return archive->data + offset; } /* Returns 0 on success, -EINVAL on error and -ENOTSUP if the eocd indicates the * archive uses features which are not supported. */ static int try_parse_end_of_cd(struct zip_archive *archive, __u32 offset) { __u16 comment_length, cd_records; struct end_of_cd_record *eocd; __u32 cd_offset, cd_size; eocd = check_access(archive, offset, sizeof(*eocd)); if (!eocd || eocd->magic != END_OF_CD_RECORD_MAGIC) return -EINVAL; comment_length = eocd->comment_length; if (offset + sizeof(*eocd) + comment_length != archive->size) return -EINVAL; cd_records = eocd->cd_records; if (eocd->this_disk != 0 || eocd->cd_disk != 0 || eocd->cd_records_total != cd_records) /* This is a valid eocd, but we only support single-file non-ZIP64 archives. */ return -ENOTSUP; cd_offset = eocd->cd_offset; cd_size = eocd->cd_size; if (!check_access(archive, cd_offset, cd_size)) return -EINVAL; archive->cd_offset = cd_offset; archive->cd_records = cd_records; return 0; } static int find_cd(struct zip_archive *archive) { int64_t limit, offset; int rc = -EINVAL; if (archive->size <= sizeof(struct end_of_cd_record)) return -EINVAL; /* Because the end of central directory ends with a variable length array of * up to 0xFFFF bytes we can't know exactly where it starts and need to * search for it at the end of the file, scanning the (limit, offset] range. */ offset = archive->size - sizeof(struct end_of_cd_record); limit = (int64_t)offset - (1 << 16); for (; offset >= 0 && offset > limit && rc != 0; offset--) { rc = try_parse_end_of_cd(archive, offset); if (rc == -ENOTSUP) break; } return rc; } struct zip_archive *zip_archive_open(const char *path) { struct zip_archive *archive; int err, fd; off_t size; void *data; fd = open(path, O_RDONLY | O_CLOEXEC); if (fd < 0) return ERR_PTR(-errno); size = lseek(fd, 0, SEEK_END); if (size == (off_t)-1 || size > UINT32_MAX) { close(fd); return ERR_PTR(-EINVAL); } data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); err = -errno; close(fd); if (data == MAP_FAILED) return ERR_PTR(err); archive = malloc(sizeof(*archive)); if (!archive) { munmap(data, size); return ERR_PTR(-ENOMEM); } archive->data = data; archive->size = size; err = find_cd(archive); if (err) { munmap(data, size); free(archive); return ERR_PTR(err); } return archive; } void zip_archive_close(struct zip_archive *archive) { munmap(archive->data, archive->size); free(archive); } static struct local_file_header *local_file_header_at_offset(struct zip_archive *archive, __u32 offset) { struct local_file_header *lfh; lfh = check_access(archive, offset, sizeof(*lfh)); if (!lfh || lfh->magic != LOCAL_FILE_HEADER_MAGIC) return NULL; return lfh; } static int get_entry_at_offset(struct zip_archive *archive, __u32 offset, struct zip_entry *out) { struct local_file_header *lfh; __u32 compressed_size; const char *name; void *data; lfh = local_file_header_at_offset(archive, offset); if (!lfh) return -EINVAL; offset += sizeof(*lfh); if ((lfh->flags & FLAG_ENCRYPTED) || (lfh->flags & FLAG_HAS_DATA_DESCRIPTOR)) return -EINVAL; name = check_access(archive, offset, lfh->file_name_length); if (!name) return -EINVAL; offset += lfh->file_name_length; if (!check_access(archive, offset, lfh->extra_field_length)) return -EINVAL; offset += lfh->extra_field_length; compressed_size = lfh->compressed_size; data = check_access(archive, offset, compressed_size); if (!data) return -EINVAL; out->compression = lfh->compression; out->name_length = lfh->file_name_length; out->name = name; out->data = data; out->data_length = compressed_size; out->data_offset = offset; return 0; } int zip_archive_find_entry(struct zip_archive *archive, const char *file_name, struct zip_entry *out) { size_t file_name_length = strlen(file_name); __u32 i, offset = archive->cd_offset; for (i = 0; i < archive->cd_records; ++i) { __u16 cdfh_name_length, cdfh_flags; struct cd_file_header *cdfh; const char *cdfh_name; cdfh = check_access(archive, offset, sizeof(*cdfh)); if (!cdfh || cdfh->magic != CD_FILE_HEADER_MAGIC) return -EINVAL; offset += sizeof(*cdfh); cdfh_name_length = cdfh->file_name_length; cdfh_name = check_access(archive, offset, cdfh_name_length); if (!cdfh_name) return -EINVAL; cdfh_flags = cdfh->flags; if ((cdfh_flags & FLAG_ENCRYPTED) == 0 && (cdfh_flags & FLAG_HAS_DATA_DESCRIPTOR) == 0 && file_name_length == cdfh_name_length && memcmp(file_name, archive->data + offset, file_name_length) == 0) { return get_entry_at_offset(archive, cdfh->offset, out); } offset += cdfh_name_length; offset += cdfh->extra_field_length; offset += cdfh->file_comment_length; } return -ENOENT; } xdp-tools-1.5.4/lib/libbpf/src/bpf_prog_linfo.c0000644000175100001660000001422014706536574020761 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ #include #include #include #include #include "libbpf.h" #include "libbpf_internal.h" struct bpf_prog_linfo { void *raw_linfo; void *raw_jited_linfo; __u32 *nr_jited_linfo_per_func; __u32 *jited_linfo_func_idx; __u32 nr_linfo; __u32 nr_jited_func; __u32 rec_size; __u32 jited_rec_size; }; static int dissect_jited_func(struct bpf_prog_linfo *prog_linfo, const __u64 *ksym_func, const __u32 *ksym_len) { __u32 nr_jited_func, nr_linfo; const void *raw_jited_linfo; const __u64 *jited_linfo; __u64 last_jited_linfo; /* * Index to raw_jited_linfo: * i: Index for searching the next ksym_func * prev_i: Index to the last found ksym_func */ __u32 i, prev_i; __u32 f; /* Index to ksym_func */ raw_jited_linfo = prog_linfo->raw_jited_linfo; jited_linfo = raw_jited_linfo; if (ksym_func[0] != *jited_linfo) goto errout; prog_linfo->jited_linfo_func_idx[0] = 0; nr_jited_func = prog_linfo->nr_jited_func; nr_linfo = prog_linfo->nr_linfo; for (prev_i = 0, i = 1, f = 1; i < nr_linfo && f < nr_jited_func; i++) { raw_jited_linfo += prog_linfo->jited_rec_size; last_jited_linfo = *jited_linfo; jited_linfo = raw_jited_linfo; if (ksym_func[f] == *jited_linfo) { prog_linfo->jited_linfo_func_idx[f] = i; /* Sanity check */ if (last_jited_linfo - ksym_func[f - 1] + 1 > ksym_len[f - 1]) goto errout; prog_linfo->nr_jited_linfo_per_func[f - 1] = i - prev_i; prev_i = i; /* * The ksym_func[f] is found in jited_linfo. * Look for the next one. */ f++; } else if (*jited_linfo <= last_jited_linfo) { /* Ensure the addr is increasing _within_ a func */ goto errout; } } if (f != nr_jited_func) goto errout; prog_linfo->nr_jited_linfo_per_func[nr_jited_func - 1] = nr_linfo - prev_i; return 0; errout: return -EINVAL; } void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo) { if (!prog_linfo) return; free(prog_linfo->raw_linfo); free(prog_linfo->raw_jited_linfo); free(prog_linfo->nr_jited_linfo_per_func); free(prog_linfo->jited_linfo_func_idx); free(prog_linfo); } struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) { struct bpf_prog_linfo *prog_linfo; __u32 nr_linfo, nr_jited_func; __u64 data_sz; nr_linfo = info->nr_line_info; if (!nr_linfo) return errno = EINVAL, NULL; /* * The min size that bpf_prog_linfo has to access for * searching purpose. */ if (info->line_info_rec_size < offsetof(struct bpf_line_info, file_name_off)) return errno = EINVAL, NULL; prog_linfo = calloc(1, sizeof(*prog_linfo)); if (!prog_linfo) return errno = ENOMEM, NULL; /* Copy xlated line_info */ prog_linfo->nr_linfo = nr_linfo; prog_linfo->rec_size = info->line_info_rec_size; data_sz = (__u64)nr_linfo * prog_linfo->rec_size; prog_linfo->raw_linfo = malloc(data_sz); if (!prog_linfo->raw_linfo) goto err_free; memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, data_sz); nr_jited_func = info->nr_jited_ksyms; if (!nr_jited_func || !info->jited_line_info || info->nr_jited_line_info != nr_linfo || info->jited_line_info_rec_size < sizeof(__u64) || info->nr_jited_func_lens != nr_jited_func || !info->jited_ksyms || !info->jited_func_lens) /* Not enough info to provide jited_line_info */ return prog_linfo; /* Copy jited_line_info */ prog_linfo->nr_jited_func = nr_jited_func; prog_linfo->jited_rec_size = info->jited_line_info_rec_size; data_sz = (__u64)nr_linfo * prog_linfo->jited_rec_size; prog_linfo->raw_jited_linfo = malloc(data_sz); if (!prog_linfo->raw_jited_linfo) goto err_free; memcpy(prog_linfo->raw_jited_linfo, (void *)(long)info->jited_line_info, data_sz); /* Number of jited_line_info per jited func */ prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func * sizeof(__u32)); if (!prog_linfo->nr_jited_linfo_per_func) goto err_free; /* * For each jited func, * the start idx to the "linfo" and "jited_linfo" array, */ prog_linfo->jited_linfo_func_idx = malloc(nr_jited_func * sizeof(__u32)); if (!prog_linfo->jited_linfo_func_idx) goto err_free; if (dissect_jited_func(prog_linfo, (__u64 *)(long)info->jited_ksyms, (__u32 *)(long)info->jited_func_lens)) goto err_free; return prog_linfo; err_free: bpf_prog_linfo__free(prog_linfo); return errno = EINVAL, NULL; } const struct bpf_line_info * bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, __u64 addr, __u32 func_idx, __u32 nr_skip) { __u32 jited_rec_size, rec_size, nr_linfo, start, i; const void *raw_jited_linfo, *raw_linfo; const __u64 *jited_linfo; if (func_idx >= prog_linfo->nr_jited_func) return errno = ENOENT, NULL; nr_linfo = prog_linfo->nr_jited_linfo_per_func[func_idx]; if (nr_skip >= nr_linfo) return errno = ENOENT, NULL; start = prog_linfo->jited_linfo_func_idx[func_idx] + nr_skip; jited_rec_size = prog_linfo->jited_rec_size; raw_jited_linfo = prog_linfo->raw_jited_linfo + (start * jited_rec_size); jited_linfo = raw_jited_linfo; if (addr < *jited_linfo) return errno = ENOENT, NULL; nr_linfo -= nr_skip; rec_size = prog_linfo->rec_size; raw_linfo = prog_linfo->raw_linfo + (start * rec_size); for (i = 0; i < nr_linfo; i++) { if (addr < *jited_linfo) break; raw_linfo += rec_size; raw_jited_linfo += jited_rec_size; jited_linfo = raw_jited_linfo; } return raw_linfo - rec_size; } const struct bpf_line_info * bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, __u32 insn_off, __u32 nr_skip) { const struct bpf_line_info *linfo; __u32 rec_size, nr_linfo, i; const void *raw_linfo; nr_linfo = prog_linfo->nr_linfo; if (nr_skip >= nr_linfo) return errno = ENOENT, NULL; rec_size = prog_linfo->rec_size; raw_linfo = prog_linfo->raw_linfo + (nr_skip * rec_size); linfo = raw_linfo; if (insn_off < linfo->insn_off) return errno = ENOENT, NULL; nr_linfo -= nr_skip; for (i = 0; i < nr_linfo; i++) { if (insn_off < linfo->insn_off) break; raw_linfo += rec_size; linfo = raw_linfo; } return raw_linfo - rec_size; } xdp-tools-1.5.4/lib/libbpf/src/bpf_endian.h0000644000175100001660000000724614706536574020100 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __BPF_ENDIAN__ #define __BPF_ENDIAN__ /* * Isolate byte #n and put it into byte #m, for __u##b type. * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 */ #define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) #define ___bpf_swab16(x) ((__u16)( \ ___bpf_mvb(x, 16, 0, 1) | \ ___bpf_mvb(x, 16, 1, 0))) #define ___bpf_swab32(x) ((__u32)( \ ___bpf_mvb(x, 32, 0, 3) | \ ___bpf_mvb(x, 32, 1, 2) | \ ___bpf_mvb(x, 32, 2, 1) | \ ___bpf_mvb(x, 32, 3, 0))) #define ___bpf_swab64(x) ((__u64)( \ ___bpf_mvb(x, 64, 0, 7) | \ ___bpf_mvb(x, 64, 1, 6) | \ ___bpf_mvb(x, 64, 2, 5) | \ ___bpf_mvb(x, 64, 3, 4) | \ ___bpf_mvb(x, 64, 4, 3) | \ ___bpf_mvb(x, 64, 5, 2) | \ ___bpf_mvb(x, 64, 6, 1) | \ ___bpf_mvb(x, 64, 7, 0))) /* LLVM's BPF target selects the endianness of the CPU * it compiles on, or the user specifies (bpfel/bpfeb), * respectively. The used __BYTE_ORDER__ is defined by * the compiler, we cannot rely on __BYTE_ORDER from * libc headers, since it doesn't reflect the actual * requested byte order. * * Note, LLVM's BPF target has different __builtin_bswapX() * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE * in bpfel and bpfeb case, which means below, that we map * to cpu_to_be16(). We could use it unconditionally in BPF * case, but better not rely on it, so that this header here * can be used from application and BPF program side, which * use different targets. */ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ # define __bpf_ntohs(x) __builtin_bswap16(x) # define __bpf_htons(x) __builtin_bswap16(x) # define __bpf_constant_ntohs(x) ___bpf_swab16(x) # define __bpf_constant_htons(x) ___bpf_swab16(x) # define __bpf_ntohl(x) __builtin_bswap32(x) # define __bpf_htonl(x) __builtin_bswap32(x) # define __bpf_constant_ntohl(x) ___bpf_swab32(x) # define __bpf_constant_htonl(x) ___bpf_swab32(x) # define __bpf_be64_to_cpu(x) __builtin_bswap64(x) # define __bpf_cpu_to_be64(x) __builtin_bswap64(x) # define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) # define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define __bpf_ntohs(x) (x) # define __bpf_htons(x) (x) # define __bpf_constant_ntohs(x) (x) # define __bpf_constant_htons(x) (x) # define __bpf_ntohl(x) (x) # define __bpf_htonl(x) (x) # define __bpf_constant_ntohl(x) (x) # define __bpf_constant_htonl(x) (x) # define __bpf_be64_to_cpu(x) (x) # define __bpf_cpu_to_be64(x) (x) # define __bpf_constant_be64_to_cpu(x) (x) # define __bpf_constant_cpu_to_be64(x) (x) #else # error "Fix your compiler's __BYTE_ORDER__?!" #endif #define bpf_htons(x) \ (__builtin_constant_p(x) ? \ __bpf_constant_htons(x) : __bpf_htons(x)) #define bpf_ntohs(x) \ (__builtin_constant_p(x) ? \ __bpf_constant_ntohs(x) : __bpf_ntohs(x)) #define bpf_htonl(x) \ (__builtin_constant_p(x) ? \ __bpf_constant_htonl(x) : __bpf_htonl(x)) #define bpf_ntohl(x) \ (__builtin_constant_p(x) ? \ __bpf_constant_ntohl(x) : __bpf_ntohl(x)) #define bpf_cpu_to_be64(x) \ (__builtin_constant_p(x) ? \ __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x)) #define bpf_be64_to_cpu(x) \ (__builtin_constant_p(x) ? \ __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x)) #endif /* __BPF_ENDIAN__ */ xdp-tools-1.5.4/lib/libbpf/src/str_error.c0000644000175100001660000000177414706536574020027 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) #undef _GNU_SOURCE #include #include #include #include "str_error.h" /* make sure libbpf doesn't use kernel-only integer typedefs */ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 /* * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl * libc, while checking strerror_r() return to avoid having to check this in * all places calling it. */ char *libbpf_strerror_r(int err, char *dst, int len) { int ret = strerror_r(err < 0 ? -err : err, dst, len); /* on glibc <2.13, ret == -1 and errno is set, if strerror_r() can't * handle the error, on glibc >=2.13 *positive* (errno-like) error * code is returned directly */ if (ret == -1) ret = errno; if (ret) { if (ret == EINVAL) /* strerror_r() doesn't recognize this specific error */ snprintf(dst, len, "unknown error (%d)", err < 0 ? err : -err); else snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret); } return dst; } xdp-tools-1.5.4/lib/libbpf/src/bpf.c0000644000175100001660000011152114706536574016545 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * common eBPF ELF operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License (not later!) * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see */ #include #include #include #include #include #include #include #include #include #include #include #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" /* * When building perf, unistd.h is overridden. __NR_bpf is * required to be defined explicitly. */ #ifndef __NR_bpf # if defined(__i386__) # define __NR_bpf 357 # elif defined(__x86_64__) # define __NR_bpf 321 # elif defined(__aarch64__) # define __NR_bpf 280 # elif defined(__sparc__) # define __NR_bpf 349 # elif defined(__s390__) # define __NR_bpf 351 # elif defined(__arc__) # define __NR_bpf 280 # elif defined(__mips__) && defined(_ABIO32) # define __NR_bpf 4355 # elif defined(__mips__) && defined(_ABIN32) # define __NR_bpf 6319 # elif defined(__mips__) && defined(_ABI64) # define __NR_bpf 5315 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif #endif static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; } static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { return syscall(__NR_bpf, cmd, attr, size); } static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { int fd; fd = sys_bpf(cmd, attr, size); return ensure_good_fd(fd); } int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) { int fd; do { fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); } while (fd < 0 && errno == EAGAIN && --attempts > 0); return fd; } /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to * memcg-based memory accounting for BPF maps and progs. This was done in [0]. * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF. * * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/ * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper") */ int probe_memcg_account(int token_fd) { const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); struct bpf_insn insns[] = { BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), BPF_EXIT_INSN(), }; size_t insn_cnt = ARRAY_SIZE(insns); union bpf_attr attr; int prog_fd; /* attempt loading freplace trying to use custom BTF */ memset(&attr, 0, attr_sz); attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; attr.insns = ptr_to_u64(insns); attr.insn_cnt = insn_cnt; attr.license = ptr_to_u64("GPL"); attr.prog_token_fd = token_fd; if (token_fd) attr.prog_flags |= BPF_F_TOKEN_FD; prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz); if (prog_fd >= 0) { close(prog_fd); return 1; } return 0; } static bool memlock_bumped; static rlim_t memlock_rlim = RLIM_INFINITY; int libbpf_set_memlock_rlim(size_t memlock_bytes) { if (memlock_bumped) return libbpf_err(-EBUSY); memlock_rlim = memlock_bytes; return 0; } int bump_rlimit_memlock(void) { struct rlimit rlim; /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */ if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT)) return 0; memlock_bumped = true; /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */ if (memlock_rlim == 0) return 0; rlim.rlim_cur = rlim.rlim_max = memlock_rlim; if (setrlimit(RLIMIT_MEMLOCK, &rlim)) return -errno; return 0; } int bpf_map_create(enum bpf_map_type map_type, const char *map_name, __u32 key_size, __u32 value_size, __u32 max_entries, const struct bpf_map_create_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd); union bpf_attr attr; int fd; bump_rlimit_memlock(); memset(&attr, 0, attr_sz); if (!OPTS_VALID(opts, bpf_map_create_opts)) return libbpf_err(-EINVAL); attr.map_type = map_type; if (map_name && feat_supported(NULL, FEAT_PROG_NAME)) libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); attr.key_size = key_size; attr.value_size = value_size; attr.max_entries = max_entries; attr.btf_fd = OPTS_GET(opts, btf_fd, 0); attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0); attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); attr.map_flags = OPTS_GET(opts, map_flags, 0); attr.map_extra = OPTS_GET(opts, map_extra, 0); attr.numa_node = OPTS_GET(opts, numa_node, 0); attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0); attr.map_token_fd = OPTS_GET(opts, token_fd, 0); fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz); return libbpf_err_errno(fd); } static void * alloc_zero_tailing_info(const void *orecord, __u32 cnt, __u32 actual_rec_size, __u32 expected_rec_size) { __u64 info_len = (__u64)actual_rec_size * cnt; void *info, *nrecord; int i; info = malloc(info_len); if (!info) return NULL; /* zero out bytes kernel does not understand */ nrecord = info; for (i = 0; i < cnt; i++) { memcpy(nrecord, orecord, expected_rec_size); memset(nrecord + expected_rec_size, 0, actual_rec_size - expected_rec_size); orecord += actual_rec_size; nrecord += actual_rec_size; } return info; } int bpf_prog_load(enum bpf_prog_type prog_type, const char *prog_name, const char *license, const struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); void *finfo = NULL, *linfo = NULL; const char *func_info, *line_info; __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; __u32 func_info_rec_size, line_info_rec_size; int fd, attempts; union bpf_attr attr; char *log_buf; bump_rlimit_memlock(); if (!OPTS_VALID(opts, bpf_prog_load_opts)) return libbpf_err(-EINVAL); attempts = OPTS_GET(opts, attempts, 0); if (attempts < 0) return libbpf_err(-EINVAL); if (attempts == 0) attempts = PROG_LOAD_ATTEMPTS; memset(&attr, 0, attr_sz); attr.prog_type = prog_type; attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); attr.prog_flags = OPTS_GET(opts, prog_flags, 0); attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); attr.kern_version = OPTS_GET(opts, kern_version, 0); attr.prog_token_fd = OPTS_GET(opts, token_fd, 0); if (prog_name && feat_supported(NULL, FEAT_PROG_NAME)) libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); attr.license = ptr_to_u64(license); if (insn_cnt > UINT_MAX) return libbpf_err(-E2BIG); attr.insns = ptr_to_u64(insns); attr.insn_cnt = (__u32)insn_cnt; attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); if (attach_prog_fd && attach_btf_obj_fd) return libbpf_err(-EINVAL); attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); if (attach_prog_fd) attr.attach_prog_fd = attach_prog_fd; else attr.attach_btf_obj_fd = attach_btf_obj_fd; log_buf = OPTS_GET(opts, log_buf, NULL); log_size = OPTS_GET(opts, log_size, 0); log_level = OPTS_GET(opts, log_level, 0); if (!!log_buf != !!log_size) return libbpf_err(-EINVAL); func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); func_info = OPTS_GET(opts, func_info, NULL); attr.func_info_rec_size = func_info_rec_size; attr.func_info = ptr_to_u64(func_info); attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); line_info = OPTS_GET(opts, line_info, NULL); attr.line_info_rec_size = line_info_rec_size; attr.line_info = ptr_to_u64(line_info); attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); if (log_level) { attr.log_buf = ptr_to_u64(log_buf); attr.log_size = log_size; attr.log_level = log_level; } fd = sys_bpf_prog_load(&attr, attr_sz, attempts); OPTS_SET(opts, log_true_size, attr.log_true_size); if (fd >= 0) return fd; /* After bpf_prog_load, the kernel may modify certain attributes * to give user space a hint how to deal with loading failure. * Check to see whether we can make some changes and load again. */ while (errno == E2BIG && (!finfo || !linfo)) { if (!finfo && attr.func_info_cnt && attr.func_info_rec_size < func_info_rec_size) { /* try with corrected func info records */ finfo = alloc_zero_tailing_info(func_info, attr.func_info_cnt, func_info_rec_size, attr.func_info_rec_size); if (!finfo) { errno = E2BIG; goto done; } attr.func_info = ptr_to_u64(finfo); attr.func_info_rec_size = func_info_rec_size; } else if (!linfo && attr.line_info_cnt && attr.line_info_rec_size < line_info_rec_size) { linfo = alloc_zero_tailing_info(line_info, attr.line_info_cnt, line_info_rec_size, attr.line_info_rec_size); if (!linfo) { errno = E2BIG; goto done; } attr.line_info = ptr_to_u64(linfo); attr.line_info_rec_size = line_info_rec_size; } else { break; } fd = sys_bpf_prog_load(&attr, attr_sz, attempts); OPTS_SET(opts, log_true_size, attr.log_true_size); if (fd >= 0) goto done; } if (log_level == 0 && log_buf) { /* log_level == 0 with non-NULL log_buf requires retrying on error * with log_level == 1 and log_buf/log_buf_size set, to get details of * failure */ attr.log_buf = ptr_to_u64(log_buf); attr.log_size = log_size; attr.log_level = 1; fd = sys_bpf_prog_load(&attr, attr_sz, attempts); OPTS_SET(opts, log_true_size, attr.log_true_size); } done: /* free() doesn't affect errno, so we don't need to restore it */ free(finfo); free(linfo); return libbpf_err_errno(fd); } int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); attr.flags = flags; ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_map_lookup_elem(int fd, const void *key, void *value) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); attr.flags = flags; ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); attr.flags = flags; ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_map_delete_elem(int fd, const void *key) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = ptr_to_u64(key); ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.flags = flags; ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_map_get_next_key(int fd, const void *key, void *next_key) { const size_t attr_sz = offsetofend(union bpf_attr, next_key); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.next_key = ptr_to_u64(next_key); ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_map_freeze(int fd) { const size_t attr_sz = offsetofend(union bpf_attr, map_fd); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.map_fd = fd; ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz); return libbpf_err_errno(ret); } static int bpf_map_batch_common(int cmd, int fd, void *in_batch, void *out_batch, void *keys, void *values, __u32 *count, const struct bpf_map_batch_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, batch); union bpf_attr attr; int ret; if (!OPTS_VALID(opts, bpf_map_batch_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.batch.map_fd = fd; attr.batch.in_batch = ptr_to_u64(in_batch); attr.batch.out_batch = ptr_to_u64(out_batch); attr.batch.keys = ptr_to_u64(keys); attr.batch.values = ptr_to_u64(values); attr.batch.count = *count; attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); attr.batch.flags = OPTS_GET(opts, flags, 0); ret = sys_bpf(cmd, &attr, attr_sz); *count = attr.batch.count; return libbpf_err_errno(ret); } int bpf_map_delete_batch(int fd, const void *keys, __u32 *count, const struct bpf_map_batch_opts *opts) { return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, NULL, (void *)keys, NULL, count, opts); } int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, void *values, __u32 *count, const struct bpf_map_batch_opts *opts) { return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, out_batch, keys, values, count, opts); } int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, void *keys, void *values, __u32 *count, const struct bpf_map_batch_opts *opts) { return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, fd, in_batch, out_batch, keys, values, count, opts); } int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count, const struct bpf_map_batch_opts *opts) { return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, (void *)keys, (void *)values, count, opts); } int bpf_obj_pin_opts(int fd, const char *pathname, const struct bpf_obj_pin_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, path_fd); union bpf_attr attr; int ret; if (!OPTS_VALID(opts, bpf_obj_pin_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.path_fd = OPTS_GET(opts, path_fd, 0); attr.pathname = ptr_to_u64((void *)pathname); attr.file_flags = OPTS_GET(opts, file_flags, 0); attr.bpf_fd = fd; ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_obj_pin(int fd, const char *pathname) { return bpf_obj_pin_opts(fd, pathname, NULL); } int bpf_obj_get(const char *pathname) { return bpf_obj_get_opts(pathname, NULL); } int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, path_fd); union bpf_attr attr; int fd; if (!OPTS_VALID(opts, bpf_obj_get_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.path_fd = OPTS_GET(opts, path_fd, 0); attr.pathname = ptr_to_u64((void *)pathname); attr.file_flags = OPTS_GET(opts, file_flags, 0); fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, unsigned int flags) { DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, .flags = flags, ); return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts); } int bpf_prog_attach_opts(int prog_fd, int target, enum bpf_attach_type type, const struct bpf_prog_attach_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, expected_revision); __u32 relative_id, flags; int ret, relative_fd; union bpf_attr attr; if (!OPTS_VALID(opts, bpf_prog_attach_opts)) return libbpf_err(-EINVAL); relative_id = OPTS_GET(opts, relative_id, 0); relative_fd = OPTS_GET(opts, relative_fd, 0); flags = OPTS_GET(opts, flags, 0); /* validate we don't have unexpected combinations of non-zero fields */ if (relative_fd && relative_id) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.target_fd = target; attr.attach_bpf_fd = prog_fd; attr.attach_type = type; attr.replace_bpf_fd = OPTS_GET(opts, replace_fd, 0); attr.expected_revision = OPTS_GET(opts, expected_revision, 0); if (relative_id) { attr.attach_flags = flags | BPF_F_ID; attr.relative_id = relative_id; } else { attr.attach_flags = flags; attr.relative_fd = relative_fd; } ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_prog_detach_opts(int prog_fd, int target, enum bpf_attach_type type, const struct bpf_prog_detach_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, expected_revision); __u32 relative_id, flags; int ret, relative_fd; union bpf_attr attr; if (!OPTS_VALID(opts, bpf_prog_detach_opts)) return libbpf_err(-EINVAL); relative_id = OPTS_GET(opts, relative_id, 0); relative_fd = OPTS_GET(opts, relative_fd, 0); flags = OPTS_GET(opts, flags, 0); /* validate we don't have unexpected combinations of non-zero fields */ if (relative_fd && relative_id) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.target_fd = target; attr.attach_bpf_fd = prog_fd; attr.attach_type = type; attr.expected_revision = OPTS_GET(opts, expected_revision, 0); if (relative_id) { attr.attach_flags = flags | BPF_F_ID; attr.relative_id = relative_id; } else { attr.attach_flags = flags; attr.relative_fd = relative_fd; } ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_prog_detach(int target_fd, enum bpf_attach_type type) { return bpf_prog_detach_opts(0, target_fd, type, NULL); } int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) { return bpf_prog_detach_opts(prog_fd, target_fd, type, NULL); } int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, const struct bpf_link_create_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, link_create); __u32 target_btf_id, iter_info_len, relative_id; int fd, err, relative_fd; union bpf_attr attr; if (!OPTS_VALID(opts, bpf_link_create_opts)) return libbpf_err(-EINVAL); iter_info_len = OPTS_GET(opts, iter_info_len, 0); target_btf_id = OPTS_GET(opts, target_btf_id, 0); /* validate we don't have unexpected combinations of non-zero fields */ if (iter_info_len || target_btf_id) { if (iter_info_len && target_btf_id) return libbpf_err(-EINVAL); if (!OPTS_ZEROED(opts, target_btf_id)) return libbpf_err(-EINVAL); } memset(&attr, 0, attr_sz); attr.link_create.prog_fd = prog_fd; attr.link_create.target_fd = target_fd; attr.link_create.attach_type = attach_type; attr.link_create.flags = OPTS_GET(opts, flags, 0); if (target_btf_id) { attr.link_create.target_btf_id = target_btf_id; goto proceed; } switch (attach_type) { case BPF_TRACE_ITER: attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); attr.link_create.iter_info_len = iter_info_len; break; case BPF_PERF_EVENT: attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0); if (!OPTS_ZEROED(opts, perf_event)) return libbpf_err(-EINVAL); break; case BPF_TRACE_KPROBE_MULTI: case BPF_TRACE_KPROBE_SESSION: attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0); attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0); attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0)); attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0)); attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0)); if (!OPTS_ZEROED(opts, kprobe_multi)) return libbpf_err(-EINVAL); break; case BPF_TRACE_UPROBE_MULTI: attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0); attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0); attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0)); attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0)); attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0)); attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0)); attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0); if (!OPTS_ZEROED(opts, uprobe_multi)) return libbpf_err(-EINVAL); break; case BPF_TRACE_RAW_TP: case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: case BPF_MODIFY_RETURN: case BPF_LSM_MAC: attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0); if (!OPTS_ZEROED(opts, tracing)) return libbpf_err(-EINVAL); break; case BPF_NETFILTER: attr.link_create.netfilter.pf = OPTS_GET(opts, netfilter.pf, 0); attr.link_create.netfilter.hooknum = OPTS_GET(opts, netfilter.hooknum, 0); attr.link_create.netfilter.priority = OPTS_GET(opts, netfilter.priority, 0); attr.link_create.netfilter.flags = OPTS_GET(opts, netfilter.flags, 0); if (!OPTS_ZEROED(opts, netfilter)) return libbpf_err(-EINVAL); break; case BPF_TCX_INGRESS: case BPF_TCX_EGRESS: relative_fd = OPTS_GET(opts, tcx.relative_fd, 0); relative_id = OPTS_GET(opts, tcx.relative_id, 0); if (relative_fd && relative_id) return libbpf_err(-EINVAL); if (relative_id) { attr.link_create.tcx.relative_id = relative_id; attr.link_create.flags |= BPF_F_ID; } else { attr.link_create.tcx.relative_fd = relative_fd; } attr.link_create.tcx.expected_revision = OPTS_GET(opts, tcx.expected_revision, 0); if (!OPTS_ZEROED(opts, tcx)) return libbpf_err(-EINVAL); break; case BPF_NETKIT_PRIMARY: case BPF_NETKIT_PEER: relative_fd = OPTS_GET(opts, netkit.relative_fd, 0); relative_id = OPTS_GET(opts, netkit.relative_id, 0); if (relative_fd && relative_id) return libbpf_err(-EINVAL); if (relative_id) { attr.link_create.netkit.relative_id = relative_id; attr.link_create.flags |= BPF_F_ID; } else { attr.link_create.netkit.relative_fd = relative_fd; } attr.link_create.netkit.expected_revision = OPTS_GET(opts, netkit.expected_revision, 0); if (!OPTS_ZEROED(opts, netkit)) return libbpf_err(-EINVAL); break; default: if (!OPTS_ZEROED(opts, flags)) return libbpf_err(-EINVAL); break; } proceed: fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz); if (fd >= 0) return fd; /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry * and other similar programs */ err = -errno; if (err != -EINVAL) return libbpf_err(err); /* if user used features not supported by * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately */ if (attr.link_create.target_fd || attr.link_create.target_btf_id) return libbpf_err(err); if (!OPTS_ZEROED(opts, sz)) return libbpf_err(err); /* otherwise, for few select kinds of programs that can be * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as * a fallback for older kernels */ switch (attach_type) { case BPF_TRACE_RAW_TP: case BPF_LSM_MAC: case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: case BPF_MODIFY_RETURN: return bpf_raw_tracepoint_open(NULL, prog_fd); default: return libbpf_err(err); } } int bpf_link_detach(int link_fd) { const size_t attr_sz = offsetofend(union bpf_attr, link_detach); union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.link_detach.link_fd = link_fd; ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_link_update(int link_fd, int new_prog_fd, const struct bpf_link_update_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, link_update); union bpf_attr attr; int ret; if (!OPTS_VALID(opts, bpf_link_update_opts)) return libbpf_err(-EINVAL); if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.link_update.link_fd = link_fd; attr.link_update.new_prog_fd = new_prog_fd; attr.link_update.flags = OPTS_GET(opts, flags, 0); if (OPTS_GET(opts, old_prog_fd, 0)) attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); else if (OPTS_GET(opts, old_map_fd, 0)) attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0); ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_iter_create(int link_fd) { const size_t attr_sz = offsetofend(union bpf_attr, iter_create); union bpf_attr attr; int fd; memset(&attr, 0, attr_sz); attr.iter_create.link_fd = link_fd; fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_prog_query_opts(int target, enum bpf_attach_type type, struct bpf_prog_query_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, query); union bpf_attr attr; int ret; if (!OPTS_VALID(opts, bpf_prog_query_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.query.target_fd = target; attr.query.attach_type = type; attr.query.query_flags = OPTS_GET(opts, query_flags, 0); attr.query.count = OPTS_GET(opts, count, 0); attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL)); attr.query.link_ids = ptr_to_u64(OPTS_GET(opts, link_ids, NULL)); attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL)); attr.query.link_attach_flags = ptr_to_u64(OPTS_GET(opts, link_attach_flags, NULL)); ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz); OPTS_SET(opts, attach_flags, attr.query.attach_flags); OPTS_SET(opts, revision, attr.query.revision); OPTS_SET(opts, count, attr.query.count); return libbpf_err_errno(ret); } int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) { LIBBPF_OPTS(bpf_prog_query_opts, opts); int ret; opts.query_flags = query_flags; opts.prog_ids = prog_ids; opts.prog_cnt = *prog_cnt; ret = bpf_prog_query_opts(target_fd, type, &opts); if (attach_flags) *attach_flags = opts.attach_flags; *prog_cnt = opts.prog_cnt; return libbpf_err_errno(ret); } int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, test); union bpf_attr attr; int ret; if (!OPTS_VALID(opts, bpf_test_run_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.test.prog_fd = prog_fd; attr.test.batch_size = OPTS_GET(opts, batch_size, 0); attr.test.cpu = OPTS_GET(opts, cpu, 0); attr.test.flags = OPTS_GET(opts, flags, 0); attr.test.repeat = OPTS_GET(opts, repeat, 0); attr.test.duration = OPTS_GET(opts, duration, 0); attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz); OPTS_SET(opts, data_size_out, attr.test.data_size_out); OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); OPTS_SET(opts, duration, attr.test.duration); OPTS_SET(opts, retval, attr.test.retval); return libbpf_err_errno(ret); } static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) { const size_t attr_sz = offsetofend(union bpf_attr, open_flags); union bpf_attr attr; int err; memset(&attr, 0, attr_sz); attr.start_id = start_id; err = sys_bpf(cmd, &attr, attr_sz); if (!err) *next_id = attr.next_id; return libbpf_err_errno(err); } int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) { return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); } int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) { return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); } int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) { return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); } int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) { return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); } int bpf_prog_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, open_flags); union bpf_attr attr; int fd; if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.prog_id = id; attr.open_flags = OPTS_GET(opts, open_flags, 0); fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_prog_get_fd_by_id(__u32 id) { return bpf_prog_get_fd_by_id_opts(id, NULL); } int bpf_map_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, open_flags); union bpf_attr attr; int fd; if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.map_id = id; attr.open_flags = OPTS_GET(opts, open_flags, 0); fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_map_get_fd_by_id(__u32 id) { return bpf_map_get_fd_by_id_opts(id, NULL); } int bpf_btf_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, open_flags); union bpf_attr attr; int fd; if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.btf_id = id; attr.open_flags = OPTS_GET(opts, open_flags, 0); fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_btf_get_fd_by_id(__u32 id) { return bpf_btf_get_fd_by_id_opts(id, NULL); } int bpf_link_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, open_flags); union bpf_attr attr; int fd; if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.link_id = id; attr.open_flags = OPTS_GET(opts, open_flags, 0); fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_link_get_fd_by_id(__u32 id) { return bpf_link_get_fd_by_id_opts(id, NULL); } int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) { const size_t attr_sz = offsetofend(union bpf_attr, info); union bpf_attr attr; int err; memset(&attr, 0, attr_sz); attr.info.bpf_fd = bpf_fd; attr.info.info_len = *info_len; attr.info.info = ptr_to_u64(info); err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz); if (!err) *info_len = attr.info.info_len; return libbpf_err_errno(err); } int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len) { return bpf_obj_get_info_by_fd(prog_fd, info, info_len); } int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len) { return bpf_obj_get_info_by_fd(map_fd, info, info_len); } int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len) { return bpf_obj_get_info_by_fd(btf_fd, info, info_len); } int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len) { return bpf_obj_get_info_by_fd(link_fd, info, info_len); } int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint); union bpf_attr attr; int fd; if (!OPTS_VALID(opts, bpf_raw_tp_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.raw_tracepoint.prog_fd = prog_fd; attr.raw_tracepoint.name = ptr_to_u64(OPTS_GET(opts, tp_name, NULL)); attr.raw_tracepoint.cookie = OPTS_GET(opts, cookie, 0); fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_raw_tracepoint_open(const char *name, int prog_fd) { LIBBPF_OPTS(bpf_raw_tp_opts, opts, .tp_name = name); return bpf_raw_tracepoint_open_opts(prog_fd, &opts); } int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd); union bpf_attr attr; char *log_buf; size_t log_size; __u32 log_level; int fd; bump_rlimit_memlock(); memset(&attr, 0, attr_sz); if (!OPTS_VALID(opts, bpf_btf_load_opts)) return libbpf_err(-EINVAL); log_buf = OPTS_GET(opts, log_buf, NULL); log_size = OPTS_GET(opts, log_size, 0); log_level = OPTS_GET(opts, log_level, 0); if (log_size > UINT_MAX) return libbpf_err(-EINVAL); if (log_size && !log_buf) return libbpf_err(-EINVAL); attr.btf = ptr_to_u64(btf_data); attr.btf_size = btf_size; attr.btf_flags = OPTS_GET(opts, btf_flags, 0); attr.btf_token_fd = OPTS_GET(opts, token_fd, 0); /* log_level == 0 and log_buf != NULL means "try loading without * log_buf, but retry with log_buf and log_level=1 on error", which is * consistent across low-level and high-level BTF and program loading * APIs within libbpf and provides a sensible behavior in practice */ if (log_level) { attr.btf_log_buf = ptr_to_u64(log_buf); attr.btf_log_size = (__u32)log_size; attr.btf_log_level = log_level; } fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); if (fd < 0 && log_buf && log_level == 0) { attr.btf_log_buf = ptr_to_u64(log_buf); attr.btf_log_size = (__u32)log_size; attr.btf_log_level = 1; fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); } OPTS_SET(opts, log_true_size, attr.btf_log_true_size); return libbpf_err_errno(fd); } int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr) { const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query); union bpf_attr attr; int err; memset(&attr, 0, attr_sz); attr.task_fd_query.pid = pid; attr.task_fd_query.fd = fd; attr.task_fd_query.flags = flags; attr.task_fd_query.buf = ptr_to_u64(buf); attr.task_fd_query.buf_len = *buf_len; err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz); *buf_len = attr.task_fd_query.buf_len; *prog_id = attr.task_fd_query.prog_id; *fd_type = attr.task_fd_query.fd_type; *probe_offset = attr.task_fd_query.probe_offset; *probe_addr = attr.task_fd_query.probe_addr; return libbpf_err_errno(err); } int bpf_enable_stats(enum bpf_stats_type type) { const size_t attr_sz = offsetofend(union bpf_attr, enable_stats); union bpf_attr attr; int fd; memset(&attr, 0, attr_sz); attr.enable_stats.type = type; fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_prog_bind_map(int prog_fd, int map_fd, const struct bpf_prog_bind_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map); union bpf_attr attr; int ret; if (!OPTS_VALID(opts, bpf_prog_bind_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.prog_bind_map.prog_fd = prog_fd; attr.prog_bind_map.map_fd = map_fd; attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz); return libbpf_err_errno(ret); } int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts) { const size_t attr_sz = offsetofend(union bpf_attr, token_create); union bpf_attr attr; int fd; if (!OPTS_VALID(opts, bpf_token_create_opts)) return libbpf_err(-EINVAL); memset(&attr, 0, attr_sz); attr.token_create.bpffs_fd = bpffs_fd; attr.token_create.flags = OPTS_GET(opts, flags, 0); fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz); return libbpf_err_errno(fd); } xdp-tools-1.5.4/lib/libbpf/src/usdt.c0000644000175100001660000014611214706536574016761 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ #include #include #include #include #include #include #include #include #include /* s8 will be marked as poison while it's a reg of riscv */ #if defined(__riscv) #define rv_s8 s8 #endif #include "bpf.h" #include "libbpf.h" #include "libbpf_common.h" #include "libbpf_internal.h" #include "hashmap.h" /* libbpf's USDT support consists of BPF-side state/code and user-space * state/code working together in concert. BPF-side parts are defined in * usdt.bpf.h header library. User-space state is encapsulated by struct * usdt_manager and all the supporting code centered around usdt_manager. * * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that * don't support BPF cookie (see below). These two maps are implicitly * embedded into user's end BPF object file when user's code included * usdt.bpf.h. This means that libbpf doesn't do anything special to create * these USDT support maps. They are created by normal libbpf logic of * instantiating BPF maps when opening and loading BPF object. * * As such, libbpf is basically unaware of the need to do anything * USDT-related until the very first call to bpf_program__attach_usdt(), which * can be called by user explicitly or happen automatically during skeleton * attach (or, equivalently, through generic bpf_program__attach() call). At * this point, libbpf will instantiate and initialize struct usdt_manager and * store it in bpf_object. USDT manager is per-BPF object construct, as each * independent BPF object might or might not have USDT programs, and thus all * the expected USDT-related state. There is no coordination between two * bpf_object in parts of USDT attachment, they are oblivious of each other's * existence and libbpf is just oblivious, dealing with bpf_object-specific * USDT state. * * Quick crash course on USDTs. * * From user-space application's point of view, USDT is essentially just * a slightly special function call that normally has zero overhead, unless it * is being traced by some external entity (e.g, BPF-based tool). Here's how * a typical application can trigger USDT probe: * * #include // provided by systemtap-sdt-devel package * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h * * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y); * * USDT is identified by it's : pair of names. Each * individual USDT has a fixed number of arguments (3 in the above example) * and specifies values of each argument as if it was a function call. * * USDT call is actually not a function call, but is instead replaced by * a single NOP instruction (thus zero overhead, effectively). But in addition * to that, those USDT macros generate special SHT_NOTE ELF records in * .note.stapsdt ELF section. Here's an example USDT definition as emitted by * `readelf -n `: * * stapsdt 0x00000089 NT_STAPSDT (SystemTap probe descriptors) * Provider: test * Name: usdt12 * Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e * Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil * * In this case we have USDT test:usdt12 with 12 arguments. * * Location and base are offsets used to calculate absolute IP address of that * NOP instruction that kernel can replace with an interrupt instruction to * trigger instrumentation code (BPF program for all that we care about). * * Semaphore above is and optional feature. It records an address of a 2-byte * refcount variable (normally in '.probes' ELF section) used for signaling if * there is anything that is attached to USDT. This is useful for user * applications if, for example, they need to prepare some arguments that are * passed only to USDTs and preparation is expensive. By checking if USDT is * "activated", an application can avoid paying those costs unnecessarily. * Recent enough kernel has built-in support for automatically managing this * refcount, which libbpf expects and relies on. If USDT is defined without * associated semaphore, this value will be zero. See selftests for semaphore * examples. * * Arguments is the most interesting part. This USDT specification string is * providing information about all the USDT arguments and their locations. The * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and * whether the argument is signed or unsigned (negative size means signed). * The part after @ sign is assembly-like definition of argument location * (see [0] for more details). Technically, assembler can provide some pretty * advanced definitions, but libbpf is currently supporting three most common * cases: * 1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9); * 2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer * whose value is in register %rdx"; * 3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which * specifies signed 32-bit integer stored at offset -1204 bytes from * memory address stored in %rbp. * * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation * * During attachment, libbpf parses all the relevant USDT specifications and * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side * code through spec map. This allows BPF applications to quickly fetch the * actual value at runtime using a simple BPF-side code. * * With basics out of the way, let's go over less immediately obvious aspects * of supporting USDTs. * * First, there is no special USDT BPF program type. It is actually just * a uprobe BPF program (which for kernel, at least currently, is just a kprobe * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference * that uprobe is usually attached at the function entry, while USDT will * normally will be somewhere inside the function. But it should always be * pointing to NOP instruction, which makes such uprobes the fastest uprobe * kind. * * Second, it's important to realize that such STAP_PROBEn(provider, name, ...) * macro invocations can end up being inlined many-many times, depending on * specifics of each individual user application. So single conceptual USDT * (identified by provider:name pair of identifiers) is, generally speaking, * multiple uprobe locations (USDT call sites) in different places in user * application. Further, again due to inlining, each USDT call site might end * up having the same argument #N be located in a different place. In one call * site it could be a constant, in another will end up in a register, and in * yet another could be some other register or even somewhere on the stack. * * As such, "attaching to USDT" means (in general case) attaching the same * uprobe BPF program to multiple target locations in user application, each * potentially having a completely different USDT spec associated with it. * To wire all this up together libbpf allocates a unique integer spec ID for * each unique USDT spec. Spec IDs are allocated as sequential small integers * so that they can be used as keys in array BPF map (for performance reasons). * Spec ID allocation and accounting is big part of what usdt_manager is * about. This state has to be maintained per-BPF object and coordinate * between different USDT attachments within the same BPF object. * * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out * as struct usdt_spec. Each invocation of BPF program at runtime needs to * know its associated spec ID. It gets it either through BPF cookie, which * libbpf sets to spec ID during attach time, or, if kernel is too old to * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such * case. The latter means that some modes of operation can't be supported * without BPF cookie. Such mode is attaching to shared library "generically", * without specifying target process. In such case, it's impossible to * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode * is not supported without BPF cookie support. * * Note that libbpf is using BPF cookie functionality for its own internal * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf * provides conceptually equivalent USDT cookie support. It's still u64 * user-provided value that can be associated with USDT attachment. Note that * this will be the same value for all USDT call sites within the same single * *logical* USDT attachment. This makes sense because to user attaching to * USDT is a single BPF program triggered for singular USDT probe. The fact * that this is done at multiple actual locations is a mostly hidden * implementation details. This USDT cookie value can be fetched with * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h * * Lastly, while single USDT can have tons of USDT call sites, it doesn't * necessarily have that many different USDT specs. It very well might be * that 1000 USDT call sites only need 5 different USDT specs, because all the * arguments are typically contained in a small set of registers or stack * locations. As such, it's wasteful to allocate as many USDT spec IDs as * there are USDT call sites. So libbpf tries to be frugal and performs * on-the-fly deduplication during a single USDT attachment to only allocate * the minimal required amount of unique USDT specs (and thus spec IDs). This * is trivially achieved by using USDT spec string (Arguments string from USDT * note) as a lookup key in a hashmap. USDT spec string uniquely defines * everything about how to fetch USDT arguments, so two USDT call sites * sharing USDT spec string can safely share the same USDT spec and spec ID. * Note, this spec string deduplication is happening only during the same USDT * attachment, so each USDT spec shares the same USDT cookie value. This is * not generally true for other USDT attachments within the same BPF object, * as even if USDT spec string is the same, USDT cookie value can be * different. It was deemed excessive to try to deduplicate across independent * USDT attachments by taking into account USDT spec string *and* USDT cookie * value, which would complicated spec ID accounting significantly for little * gain. */ #define USDT_BASE_SEC ".stapsdt.base" #define USDT_SEMA_SEC ".probes" #define USDT_NOTE_SEC ".note.stapsdt" #define USDT_NOTE_TYPE 3 #define USDT_NOTE_NAME "stapsdt" /* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */ enum usdt_arg_type { USDT_ARG_CONST, USDT_ARG_REG, USDT_ARG_REG_DEREF, }; /* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */ struct usdt_arg_spec { __u64 val_off; enum usdt_arg_type arg_type; short reg_off; bool arg_signed; char arg_bitshift; }; /* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */ #define USDT_MAX_ARG_CNT 12 /* should match struct __bpf_usdt_spec from usdt.bpf.h */ struct usdt_spec { struct usdt_arg_spec args[USDT_MAX_ARG_CNT]; __u64 usdt_cookie; short arg_cnt; }; struct usdt_note { const char *provider; const char *name; /* USDT args specification string, e.g.: * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx" */ const char *args; long loc_addr; long base_addr; long sema_addr; }; struct usdt_target { long abs_ip; long rel_ip; long sema_off; struct usdt_spec spec; const char *spec_str; }; struct usdt_manager { struct bpf_map *specs_map; struct bpf_map *ip_to_spec_id_map; int *free_spec_ids; size_t free_spec_cnt; size_t next_free_spec_id; bool has_bpf_cookie; bool has_sema_refcnt; bool has_uprobe_multi; }; struct usdt_manager *usdt_manager_new(struct bpf_object *obj) { static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"; struct usdt_manager *man; struct bpf_map *specs_map, *ip_to_spec_id_map; specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs"); ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id"); if (!specs_map || !ip_to_spec_id_map) { pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n"); return ERR_PTR(-ESRCH); } man = calloc(1, sizeof(*man)); if (!man) return ERR_PTR(-ENOMEM); man->specs_map = specs_map; man->ip_to_spec_id_map = ip_to_spec_id_map; /* Detect if BPF cookie is supported for kprobes. * We don't need IP-to-ID mapping if we can use BPF cookies. * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value") */ man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE); /* Detect kernel support for automatic refcounting of USDT semaphore. * If this is not supported, USDTs with semaphores will not be supported. * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe") */ man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0; /* * Detect kernel support for uprobe multi link to be used for attaching * usdt probes. */ man->has_uprobe_multi = kernel_supports(obj, FEAT_UPROBE_MULTI_LINK); return man; } void usdt_manager_free(struct usdt_manager *man) { if (IS_ERR_OR_NULL(man)) return; free(man->free_spec_ids); free(man); } static int sanity_check_usdt_elf(Elf *elf, const char *path) { GElf_Ehdr ehdr; int endianness; if (elf_kind(elf) != ELF_K_ELF) { pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path); return -EBADF; } switch (gelf_getclass(elf)) { case ELFCLASS64: if (sizeof(void *) != 8) { pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path); return -EBADF; } break; case ELFCLASS32: if (sizeof(void *) != 4) { pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path); return -EBADF; } break; default: pr_warn("usdt: unsupported ELF class for '%s'\n", path); return -EBADF; } if (!gelf_getehdr(elf, &ehdr)) return -EINVAL; if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) { pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n", path, ehdr.e_type); return -EBADF; } #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ endianness = ELFDATA2LSB; #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ endianness = ELFDATA2MSB; #else # error "Unrecognized __BYTE_ORDER__" #endif if (endianness != ehdr.e_ident[EI_DATA]) { pr_warn("usdt: ELF endianness mismatch for '%s'\n", path); return -EBADF; } return 0; } static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn) { Elf_Scn *sec = NULL; size_t shstrndx; if (elf_getshdrstrndx(elf, &shstrndx)) return -EINVAL; /* check if ELF is corrupted and avoid calling elf_strptr if yes */ if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) return -EINVAL; while ((sec = elf_nextscn(elf, sec)) != NULL) { char *name; if (!gelf_getshdr(sec, shdr)) return -EINVAL; name = elf_strptr(elf, shstrndx, shdr->sh_name); if (name && strcmp(sec_name, name) == 0) { *scn = sec; return 0; } } return -ENOENT; } struct elf_seg { long start; long end; long offset; bool is_exec; }; static int cmp_elf_segs(const void *_a, const void *_b) { const struct elf_seg *a = _a; const struct elf_seg *b = _b; return a->start < b->start ? -1 : 1; } static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt) { GElf_Phdr phdr; size_t n; int i, err; struct elf_seg *seg; void *tmp; *seg_cnt = 0; if (elf_getphdrnum(elf, &n)) { err = -errno; return err; } for (i = 0; i < n; i++) { if (!gelf_getphdr(elf, i, &phdr)) { err = -errno; return err; } pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n", i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset, (long)phdr.p_type, (long)phdr.p_flags); if (phdr.p_type != PT_LOAD) continue; tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs)); if (!tmp) return -ENOMEM; *segs = tmp; seg = *segs + *seg_cnt; (*seg_cnt)++; seg->start = phdr.p_vaddr; seg->end = phdr.p_vaddr + phdr.p_memsz; seg->offset = phdr.p_offset; seg->is_exec = phdr.p_flags & PF_X; } if (*seg_cnt == 0) { pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path); return -ESRCH; } qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs); return 0; } static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt) { char path[PATH_MAX], line[PATH_MAX], mode[16]; size_t seg_start, seg_end, seg_off; struct elf_seg *seg; int tmp_pid, i, err; FILE *f; *seg_cnt = 0; /* Handle containerized binaries only accessible from * /proc//root/. They will be reported as just / in * /proc//maps. */ if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid) goto proceed; if (!realpath(lib_path, path)) { pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n", lib_path, -errno); libbpf_strlcpy(path, lib_path, sizeof(path)); } proceed: sprintf(line, "/proc/%d/maps", pid); f = fopen(line, "re"); if (!f) { err = -errno; pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n", line, lib_path, err); return err; } /* We need to handle lines with no path at the end: * * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613 /usr/lib64/libc-2.17.so * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0 * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598 /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so */ while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n", &seg_start, &seg_end, mode, &seg_off, line) == 5) { void *tmp; /* to handle no path case (see above) we need to capture line * without skipping any whitespaces. So we need to strip * leading whitespaces manually here */ i = 0; while (isblank(line[i])) i++; if (strcmp(line + i, path) != 0) continue; pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n", path, seg_start, seg_end, mode, seg_off); /* ignore non-executable sections for shared libs */ if (mode[2] != 'x') continue; tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs)); if (!tmp) { err = -ENOMEM; goto err_out; } *segs = tmp; seg = *segs + *seg_cnt; *seg_cnt += 1; seg->start = seg_start; seg->end = seg_end; seg->offset = seg_off; seg->is_exec = true; } if (*seg_cnt == 0) { pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n", lib_path, path, pid); err = -ESRCH; goto err_out; } qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs); err = 0; err_out: fclose(f); return err; } static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr) { struct elf_seg *seg; int i; /* for ELF binaries (both executables and shared libraries), we are * given virtual address (absolute for executables, relative for * libraries) which should match address range of [seg_start, seg_end) */ for (i = 0, seg = segs; i < seg_cnt; i++, seg++) { if (seg->start <= virtaddr && virtaddr < seg->end) return seg; } return NULL; } static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset) { struct elf_seg *seg; int i; /* for VMA segments from /proc//maps file, provided "address" is * actually a file offset, so should be fall within logical * offset-based range of [offset_start, offset_end) */ for (i = 0, seg = segs; i < seg_cnt; i++, seg++) { if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start)) return seg; } return NULL; } static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off, struct usdt_note *usdt_note); static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie); static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid, const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie, struct usdt_target **out_targets, size_t *out_target_cnt) { size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0; struct elf_seg *segs = NULL, *vma_segs = NULL; struct usdt_target *targets = NULL, *target; long base_addr = 0; Elf_Scn *notes_scn, *base_scn; GElf_Shdr base_shdr, notes_shdr; GElf_Ehdr ehdr; GElf_Nhdr nhdr; Elf_Data *data; int err; *out_targets = NULL; *out_target_cnt = 0; err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, ¬es_shdr, ¬es_scn); if (err) { pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path); return err; } if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) { pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path); return -EINVAL; } err = parse_elf_segs(elf, path, &segs, &seg_cnt); if (err) { pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err); goto err_out; } /* .stapsdt.base ELF section is optional, but is used for prelink * offset compensation (see a big comment further below) */ if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0) base_addr = base_shdr.sh_addr; data = elf_getdata(notes_scn, 0); off = 0; while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) { long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0; struct usdt_note note; struct elf_seg *seg = NULL; void *tmp; err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, ¬e); if (err) goto err_out; if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0) continue; /* We need to compensate "prelink effect". See [0] for details, * relevant parts quoted here: * * Each SDT probe also expands into a non-allocated ELF note. You can * find this by looking at SHT_NOTE sections and decoding the format; * see below for details. Because the note is non-allocated, it means * there is no runtime cost, and also preserved in both stripped files * and .debug files. * * However, this means that prelink won't adjust the note's contents * for address offsets. Instead, this is done via the .stapsdt.base * section. This is a special section that is added to the text. We * will only ever have one of these sections in a final link and it * will only ever be one byte long. Nothing about this section itself * matters, we just use it as a marker to detect prelink address * adjustments. * * Each probe note records the link-time address of the .stapsdt.base * section alongside the probe PC address. The decoder compares the * base address stored in the note with the .stapsdt.base section's * sh_addr. Initially these are the same, but the section header will * be adjusted by prelink. So the decoder applies the difference to * the probe PC address to get the correct prelinked PC address; the * same adjustment is applied to the semaphore address, if any. * * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation */ usdt_abs_ip = note.loc_addr; if (base_addr) usdt_abs_ip += base_addr - note.base_addr; /* When attaching uprobes (which is what USDTs basically are) * kernel expects file offset to be specified, not a relative * virtual address, so we need to translate virtual address to * file offset, for both ET_EXEC and ET_DYN binaries. */ seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip); if (!seg) { err = -ESRCH; pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n", usdt_provider, usdt_name, path, usdt_abs_ip); goto err_out; } if (!seg->is_exec) { err = -ESRCH; pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n", path, seg->start, seg->end, usdt_provider, usdt_name, usdt_abs_ip); goto err_out; } /* translate from virtual address to file offset */ usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset; if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) { /* If we don't have BPF cookie support but need to * attach to a shared library, we'll need to know and * record absolute addresses of attach points due to * the need to lookup USDT spec by absolute IP of * triggered uprobe. Doing this resolution is only * possible when we have a specific PID of the process * that's using specified shared library. BPF cookie * removes the absolute address limitation as we don't * need to do this lookup (we just use BPF cookie as * an index of USDT spec), so for newer kernels with * BPF cookie support libbpf supports USDT attachment * to shared libraries with no PID filter. */ if (pid < 0) { pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n"); err = -ENOTSUP; goto err_out; } /* vma_segs are lazily initialized only if necessary */ if (vma_seg_cnt == 0) { err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt); if (err) { pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n", pid, path, err); goto err_out; } } seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip); if (!seg) { err = -ESRCH; pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n", usdt_provider, usdt_name, path, usdt_rel_ip); goto err_out; } usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip; } pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n", usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path, note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args, seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0); /* Adjust semaphore address to be a file offset */ if (note.sema_addr) { if (!man->has_sema_refcnt) { pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n", usdt_provider, usdt_name, path); err = -ENOTSUP; goto err_out; } seg = find_elf_seg(segs, seg_cnt, note.sema_addr); if (!seg) { err = -ESRCH; pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n", usdt_provider, usdt_name, path, note.sema_addr); goto err_out; } if (seg->is_exec) { err = -ESRCH; pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n", path, seg->start, seg->end, usdt_provider, usdt_name, note.sema_addr); goto err_out; } usdt_sema_off = note.sema_addr - seg->start + seg->offset; pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n", usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path, note.sema_addr, note.base_addr, usdt_sema_off, seg->start, seg->end, seg->offset); } /* Record adjusted addresses and offsets and parse USDT spec */ tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets)); if (!tmp) { err = -ENOMEM; goto err_out; } targets = tmp; target = &targets[target_cnt]; memset(target, 0, sizeof(*target)); target->abs_ip = usdt_abs_ip; target->rel_ip = usdt_rel_ip; target->sema_off = usdt_sema_off; /* notes.args references strings from ELF itself, so they can * be referenced safely until elf_end() call */ target->spec_str = note.args; err = parse_usdt_spec(&target->spec, ¬e, usdt_cookie); if (err) goto err_out; target_cnt++; } *out_targets = targets; *out_target_cnt = target_cnt; err = target_cnt; err_out: free(segs); free(vma_segs); if (err < 0) free(targets); return err; } struct bpf_link_usdt { struct bpf_link link; struct usdt_manager *usdt_man; size_t spec_cnt; int *spec_ids; size_t uprobe_cnt; struct { long abs_ip; struct bpf_link *link; } *uprobes; struct bpf_link *multi_link; }; static int bpf_link_usdt_detach(struct bpf_link *link) { struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link); struct usdt_manager *man = usdt_link->usdt_man; int i; bpf_link__destroy(usdt_link->multi_link); /* When having multi_link, uprobe_cnt is 0 */ for (i = 0; i < usdt_link->uprobe_cnt; i++) { /* detach underlying uprobe link */ bpf_link__destroy(usdt_link->uprobes[i].link); /* there is no need to update specs map because it will be * unconditionally overwritten on subsequent USDT attaches, * but if BPF cookies are not used we need to remove entry * from ip_to_spec_id map, otherwise we'll run into false * conflicting IP errors */ if (!man->has_bpf_cookie) { /* not much we can do about errors here */ (void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map), &usdt_link->uprobes[i].abs_ip); } } /* try to return the list of previously used spec IDs to usdt_manager * for future reuse for subsequent USDT attaches */ if (!man->free_spec_ids) { /* if there were no free spec IDs yet, just transfer our IDs */ man->free_spec_ids = usdt_link->spec_ids; man->free_spec_cnt = usdt_link->spec_cnt; usdt_link->spec_ids = NULL; } else { /* otherwise concat IDs */ size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt; int *new_free_ids; new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt, sizeof(*new_free_ids)); /* If we couldn't resize free_spec_ids, we'll just leak * a bunch of free IDs; this is very unlikely to happen and if * system is so exhausted on memory, it's the least of user's * concerns, probably. * So just do our best here to return those IDs to usdt_manager. * Another edge case when we can legitimately get NULL is when * new_cnt is zero, which can happen in some edge cases, so we * need to be careful about that. */ if (new_free_ids || new_cnt == 0) { memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids, usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids)); man->free_spec_ids = new_free_ids; man->free_spec_cnt = new_cnt; } } return 0; } static void bpf_link_usdt_dealloc(struct bpf_link *link) { struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link); free(usdt_link->spec_ids); free(usdt_link->uprobes); free(usdt_link); } static size_t specs_hash_fn(long key, void *ctx) { return str_hash((char *)key); } static bool specs_equal_fn(long key1, long key2, void *ctx) { return strcmp((char *)key1, (char *)key2) == 0; } static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash, struct bpf_link_usdt *link, struct usdt_target *target, int *spec_id, bool *is_new) { long tmp; void *new_ids; int err; /* check if we already allocated spec ID for this spec string */ if (hashmap__find(specs_hash, target->spec_str, &tmp)) { *spec_id = tmp; *is_new = false; return 0; } /* otherwise it's a new ID that needs to be set up in specs map and * returned back to usdt_manager when USDT link is detached */ new_ids = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids)); if (!new_ids) return -ENOMEM; link->spec_ids = new_ids; /* get next free spec ID, giving preference to free list, if not empty */ if (man->free_spec_cnt) { *spec_id = man->free_spec_ids[man->free_spec_cnt - 1]; /* cache spec ID for current spec string for future lookups */ err = hashmap__add(specs_hash, target->spec_str, *spec_id); if (err) return err; man->free_spec_cnt--; } else { /* don't allocate spec ID bigger than what fits in specs map */ if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map)) return -E2BIG; *spec_id = man->next_free_spec_id; /* cache spec ID for current spec string for future lookups */ err = hashmap__add(specs_hash, target->spec_str, *spec_id); if (err) return err; man->next_free_spec_id++; } /* remember new spec ID in the link for later return back to free list on detach */ link->spec_ids[link->spec_cnt] = *spec_id; link->spec_cnt++; *is_new = true; return 0; } struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog, pid_t pid, const char *path, const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie) { unsigned long *offsets = NULL, *ref_ctr_offsets = NULL; int i, err, spec_map_fd, ip_map_fd; LIBBPF_OPTS(bpf_uprobe_opts, opts); struct hashmap *specs_hash = NULL; struct bpf_link_usdt *link = NULL; struct usdt_target *targets = NULL; __u64 *cookies = NULL; struct elf_fd elf_fd; size_t target_cnt; spec_map_fd = bpf_map__fd(man->specs_map); ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map); err = elf_open(path, &elf_fd); if (err) return libbpf_err_ptr(err); err = sanity_check_usdt_elf(elf_fd.elf, path); if (err) goto err_out; /* normalize PID filter */ if (pid < 0) pid = -1; else if (pid == 0) pid = getpid(); /* discover USDT in given binary, optionally limiting * activations to a given PID, if pid > 0 */ err = collect_usdt_targets(man, elf_fd.elf, path, pid, usdt_provider, usdt_name, usdt_cookie, &targets, &target_cnt); if (err <= 0) { err = (err == 0) ? -ENOENT : err; goto err_out; } specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL); if (IS_ERR(specs_hash)) { err = PTR_ERR(specs_hash); goto err_out; } link = calloc(1, sizeof(*link)); if (!link) { err = -ENOMEM; goto err_out; } link->usdt_man = man; link->link.detach = &bpf_link_usdt_detach; link->link.dealloc = &bpf_link_usdt_dealloc; if (man->has_uprobe_multi) { offsets = calloc(target_cnt, sizeof(*offsets)); cookies = calloc(target_cnt, sizeof(*cookies)); ref_ctr_offsets = calloc(target_cnt, sizeof(*ref_ctr_offsets)); if (!offsets || !ref_ctr_offsets || !cookies) { err = -ENOMEM; goto err_out; } } else { link->uprobes = calloc(target_cnt, sizeof(*link->uprobes)); if (!link->uprobes) { err = -ENOMEM; goto err_out; } } for (i = 0; i < target_cnt; i++) { struct usdt_target *target = &targets[i]; struct bpf_link *uprobe_link; bool is_new; int spec_id; /* Spec ID can be either reused or newly allocated. If it is * newly allocated, we'll need to fill out spec map, otherwise * entire spec should be valid and can be just used by a new * uprobe. We reuse spec when USDT arg spec is identical. We * also never share specs between two different USDT * attachments ("links"), so all the reused specs already * share USDT cookie value implicitly. */ err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new); if (err) goto err_out; if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) { err = -errno; pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n", spec_id, usdt_provider, usdt_name, path, err); goto err_out; } if (!man->has_bpf_cookie && bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) { err = -errno; if (err == -EEXIST) { pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n", spec_id, usdt_provider, usdt_name, path); } else { pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n", target->abs_ip, spec_id, usdt_provider, usdt_name, path, err); } goto err_out; } if (man->has_uprobe_multi) { offsets[i] = target->rel_ip; ref_ctr_offsets[i] = target->sema_off; cookies[i] = spec_id; } else { opts.ref_ctr_offset = target->sema_off; opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0; uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path, target->rel_ip, &opts); err = libbpf_get_error(uprobe_link); if (err) { pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n", i, usdt_provider, usdt_name, path, err); goto err_out; } link->uprobes[i].link = uprobe_link; link->uprobes[i].abs_ip = target->abs_ip; link->uprobe_cnt++; } } if (man->has_uprobe_multi) { LIBBPF_OPTS(bpf_uprobe_multi_opts, opts_multi, .ref_ctr_offsets = ref_ctr_offsets, .offsets = offsets, .cookies = cookies, .cnt = target_cnt, ); link->multi_link = bpf_program__attach_uprobe_multi(prog, pid, path, NULL, &opts_multi); if (!link->multi_link) { err = -errno; pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %d\n", usdt_provider, usdt_name, path, err); goto err_out; } free(offsets); free(ref_ctr_offsets); free(cookies); } free(targets); hashmap__free(specs_hash); elf_close(&elf_fd); return &link->link; err_out: free(offsets); free(ref_ctr_offsets); free(cookies); if (link) bpf_link__destroy(&link->link); free(targets); hashmap__free(specs_hash); elf_close(&elf_fd); return libbpf_err_ptr(err); } /* Parse out USDT ELF note from '.note.stapsdt' section. * Logic inspired by perf's code. */ static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off, struct usdt_note *note) { const char *provider, *name, *args; long addrs[3]; size_t len; /* sanity check USDT note name and type first */ if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0) return -EINVAL; if (nhdr->n_type != USDT_NOTE_TYPE) return -EINVAL; /* sanity check USDT note contents ("description" in ELF terminology) */ len = nhdr->n_descsz; data = data + desc_off; /* +3 is the very minimum required to store three empty strings */ if (len < sizeof(addrs) + 3) return -EINVAL; /* get location, base, and semaphore addrs */ memcpy(&addrs, data, sizeof(addrs)); /* parse string fields: provider, name, args */ provider = data + sizeof(addrs); name = (const char *)memchr(provider, '\0', data + len - provider); if (!name) /* non-zero-terminated provider */ return -EINVAL; name++; if (name >= data + len || *name == '\0') /* missing or empty name */ return -EINVAL; args = memchr(name, '\0', data + len - name); if (!args) /* non-zero-terminated name */ return -EINVAL; ++args; if (args >= data + len) /* missing arguments spec */ return -EINVAL; note->provider = provider; note->name = name; if (*args == '\0' || *args == ':') note->args = ""; else note->args = args; note->loc_addr = addrs[0]; note->base_addr = addrs[1]; note->sema_addr = addrs[2]; return 0; } static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz); static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie) { struct usdt_arg_spec *arg; const char *s; int arg_sz, len; spec->usdt_cookie = usdt_cookie; spec->arg_cnt = 0; s = note->args; while (s[0]) { if (spec->arg_cnt >= USDT_MAX_ARG_CNT) { pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n", USDT_MAX_ARG_CNT, note->provider, note->name, note->args); return -E2BIG; } arg = &spec->args[spec->arg_cnt]; len = parse_usdt_arg(s, spec->arg_cnt, arg, &arg_sz); if (len < 0) return len; arg->arg_signed = arg_sz < 0; if (arg_sz < 0) arg_sz = -arg_sz; switch (arg_sz) { case 1: case 2: case 4: case 8: arg->arg_bitshift = 64 - arg_sz * 8; break; default: pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", spec->arg_cnt, s, arg_sz); return -EINVAL; } s += len; spec->arg_cnt++; } return 0; } /* Architecture-specific logic for parsing USDT argument location specs */ #if defined(__x86_64__) || defined(__i386__) static int calc_pt_regs_off(const char *reg_name) { static struct { const char *names[4]; size_t pt_regs_off; } reg_map[] = { #ifdef __x86_64__ #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64) #else #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32) #endif { {"rip", "eip", "", ""}, reg_off(rip, eip) }, { {"rax", "eax", "ax", "al"}, reg_off(rax, eax) }, { {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) }, { {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) }, { {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) }, { {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) }, { {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) }, { {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) }, { {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) }, #undef reg_off #ifdef __x86_64__ { {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) }, { {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) }, { {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) }, { {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) }, { {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) }, { {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) }, { {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) }, { {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) }, #endif }; int i, j; for (i = 0; i < ARRAY_SIZE(reg_map); i++) { for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) { if (strcmp(reg_name, reg_map[i].names[j]) == 0) return reg_map[i].pt_regs_off; } } pr_warn("usdt: unrecognized register '%s'\n", reg_name); return -ENOENT; } static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { char reg_name[16]; int len, reg_off; long off; if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", arg_sz, &off, reg_name, &len) == 3) { /* Memory dereference case, e.g., -4@-20(%rbp) */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", arg_sz, reg_name, &len) == 2) { /* Memory dereference case without offset, e.g., 8@(%rsp) */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = 0; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) { /* Register read case, e.g., -4@%eax */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else if (sscanf(arg_str, " %d @ $%ld %n", arg_sz, &off, &len) == 2) { /* Constant value case, e.g., 4@$71 */ arg->arg_type = USDT_ARG_CONST; arg->val_off = off; arg->reg_off = 0; } else { pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); return -EINVAL; } return len; } #elif defined(__s390x__) /* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { unsigned int reg; int len; long off; if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", arg_sz, &off, ®, &len) == 3) { /* Memory dereference case, e.g., -2@-28(%r15) */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; if (reg > 15) { pr_warn("usdt: unrecognized register '%%r%u'\n", reg); return -EINVAL; } arg->reg_off = offsetof(user_pt_regs, gprs[reg]); } else if (sscanf(arg_str, " %d @ %%r%u %n", arg_sz, ®, &len) == 2) { /* Register read case, e.g., -8@%r0 */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; if (reg > 15) { pr_warn("usdt: unrecognized register '%%r%u'\n", reg); return -EINVAL; } arg->reg_off = offsetof(user_pt_regs, gprs[reg]); } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) { /* Constant value case, e.g., 4@71 */ arg->arg_type = USDT_ARG_CONST; arg->val_off = off; arg->reg_off = 0; } else { pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); return -EINVAL; } return len; } #elif defined(__aarch64__) static int calc_pt_regs_off(const char *reg_name) { int reg_num; if (sscanf(reg_name, "x%d", ®_num) == 1) { if (reg_num >= 0 && reg_num < 31) return offsetof(struct user_pt_regs, regs[reg_num]); } else if (strcmp(reg_name, "sp") == 0) { return offsetof(struct user_pt_regs, sp); } pr_warn("usdt: unrecognized register '%s'\n", reg_name); return -ENOENT; } static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { char reg_name[16]; int len, reg_off; long off; if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , %ld ] %n", arg_sz, reg_name, &off, &len) == 3) { /* Memory dereference case, e.g., -4@[sp, 96] */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) { /* Memory dereference case, e.g., -4@[sp] */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = 0; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) { /* Constant value case, e.g., 4@5 */ arg->arg_type = USDT_ARG_CONST; arg->val_off = off; arg->reg_off = 0; } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) { /* Register read case, e.g., -8@x4 */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else { pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); return -EINVAL; } return len; } #elif defined(__riscv) static int calc_pt_regs_off(const char *reg_name) { static struct { const char *name; size_t pt_regs_off; } reg_map[] = { { "ra", offsetof(struct user_regs_struct, ra) }, { "sp", offsetof(struct user_regs_struct, sp) }, { "gp", offsetof(struct user_regs_struct, gp) }, { "tp", offsetof(struct user_regs_struct, tp) }, { "a0", offsetof(struct user_regs_struct, a0) }, { "a1", offsetof(struct user_regs_struct, a1) }, { "a2", offsetof(struct user_regs_struct, a2) }, { "a3", offsetof(struct user_regs_struct, a3) }, { "a4", offsetof(struct user_regs_struct, a4) }, { "a5", offsetof(struct user_regs_struct, a5) }, { "a6", offsetof(struct user_regs_struct, a6) }, { "a7", offsetof(struct user_regs_struct, a7) }, { "s0", offsetof(struct user_regs_struct, s0) }, { "s1", offsetof(struct user_regs_struct, s1) }, { "s2", offsetof(struct user_regs_struct, s2) }, { "s3", offsetof(struct user_regs_struct, s3) }, { "s4", offsetof(struct user_regs_struct, s4) }, { "s5", offsetof(struct user_regs_struct, s5) }, { "s6", offsetof(struct user_regs_struct, s6) }, { "s7", offsetof(struct user_regs_struct, s7) }, { "s8", offsetof(struct user_regs_struct, rv_s8) }, { "s9", offsetof(struct user_regs_struct, s9) }, { "s10", offsetof(struct user_regs_struct, s10) }, { "s11", offsetof(struct user_regs_struct, s11) }, { "t0", offsetof(struct user_regs_struct, t0) }, { "t1", offsetof(struct user_regs_struct, t1) }, { "t2", offsetof(struct user_regs_struct, t2) }, { "t3", offsetof(struct user_regs_struct, t3) }, { "t4", offsetof(struct user_regs_struct, t4) }, { "t5", offsetof(struct user_regs_struct, t5) }, { "t6", offsetof(struct user_regs_struct, t6) }, }; int i; for (i = 0; i < ARRAY_SIZE(reg_map); i++) { if (strcmp(reg_name, reg_map[i].name) == 0) return reg_map[i].pt_regs_off; } pr_warn("usdt: unrecognized register '%s'\n", reg_name); return -ENOENT; } static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { char reg_name[16]; int len, reg_off; long off; if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", arg_sz, &off, reg_name, &len) == 3) { /* Memory dereference case, e.g., -8@-88(s0) */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) { /* Constant value case, e.g., 4@5 */ arg->arg_type = USDT_ARG_CONST; arg->val_off = off; arg->reg_off = 0; } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) { /* Register read case, e.g., -8@a1 */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else { pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); return -EINVAL; } return len; } #elif defined(__arm__) static int calc_pt_regs_off(const char *reg_name) { static struct { const char *name; size_t pt_regs_off; } reg_map[] = { { "r0", offsetof(struct pt_regs, uregs[0]) }, { "r1", offsetof(struct pt_regs, uregs[1]) }, { "r2", offsetof(struct pt_regs, uregs[2]) }, { "r3", offsetof(struct pt_regs, uregs[3]) }, { "r4", offsetof(struct pt_regs, uregs[4]) }, { "r5", offsetof(struct pt_regs, uregs[5]) }, { "r6", offsetof(struct pt_regs, uregs[6]) }, { "r7", offsetof(struct pt_regs, uregs[7]) }, { "r8", offsetof(struct pt_regs, uregs[8]) }, { "r9", offsetof(struct pt_regs, uregs[9]) }, { "r10", offsetof(struct pt_regs, uregs[10]) }, { "fp", offsetof(struct pt_regs, uregs[11]) }, { "ip", offsetof(struct pt_regs, uregs[12]) }, { "sp", offsetof(struct pt_regs, uregs[13]) }, { "lr", offsetof(struct pt_regs, uregs[14]) }, { "pc", offsetof(struct pt_regs, uregs[15]) }, }; int i; for (i = 0; i < ARRAY_SIZE(reg_map); i++) { if (strcmp(reg_name, reg_map[i].name) == 0) return reg_map[i].pt_regs_off; } pr_warn("usdt: unrecognized register '%s'\n", reg_name); return -ENOENT; } static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { char reg_name[16]; int len, reg_off; long off; if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , #%ld ] %n", arg_sz, reg_name, &off, &len) == 3) { /* Memory dereference case, e.g., -4@[fp, #96] */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = off; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) { /* Memory dereference case, e.g., -4@[sp] */ arg->arg_type = USDT_ARG_REG_DEREF; arg->val_off = 0; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else if (sscanf(arg_str, " %d @ #%ld %n", arg_sz, &off, &len) == 2) { /* Constant value case, e.g., 4@#5 */ arg->arg_type = USDT_ARG_CONST; arg->val_off = off; arg->reg_off = 0; } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) { /* Register read case, e.g., -8@r4 */ arg->arg_type = USDT_ARG_REG; arg->val_off = 0; reg_off = calc_pt_regs_off(reg_name); if (reg_off < 0) return reg_off; arg->reg_off = reg_off; } else { pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); return -EINVAL; } return len; } #else static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) { pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n"); return -ENOTSUP; } #endif xdp-tools-1.5.4/lib/libbpf/src/libbpf_probes.c0000644000175100001660000002757414706536574020624 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2019 Netronome Systems, Inc. */ #include #include #include #include #include #include #include #include #include #include #include #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" /* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release, * but Ubuntu provides /proc/version_signature file, as described at * https://ubuntu.com/kernel, with an example contents below, which we * can use to get a proper LINUX_VERSION_CODE. * * Ubuntu 5.4.0-12.15-generic 5.4.8 * * In the above, 5.4.8 is what kernel is actually expecting, while * uname() call will return 5.4.0 in info.release. */ static __u32 get_ubuntu_kernel_version(void) { const char *ubuntu_kver_file = "/proc/version_signature"; __u32 major, minor, patch; int ret; FILE *f; if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) != 0) return 0; f = fopen(ubuntu_kver_file, "re"); if (!f) return 0; ret = fscanf(f, "%*s %*s %u.%u.%u\n", &major, &minor, &patch); fclose(f); if (ret != 3) return 0; return KERNEL_VERSION(major, minor, patch); } /* On Debian LINUX_VERSION_CODE doesn't correspond to info.release. * Instead, it is provided in info.version. An example content of * Debian 10 looks like the below. * * utsname::release 4.19.0-22-amd64 * utsname::version #1 SMP Debian 4.19.260-1 (2022-09-29) * * In the above, 4.19.260 is what kernel is actually expecting, while * uname() call will return 4.19.0 in info.release. */ static __u32 get_debian_kernel_version(struct utsname *info) { __u32 major, minor, patch; char *p; p = strstr(info->version, "Debian "); if (!p) { /* This is not a Debian kernel. */ return 0; } if (sscanf(p, "Debian %u.%u.%u", &major, &minor, &patch) != 3) return 0; return KERNEL_VERSION(major, minor, patch); } __u32 get_kernel_version(void) { __u32 major, minor, patch, version; struct utsname info; /* Check if this is an Ubuntu kernel. */ version = get_ubuntu_kernel_version(); if (version != 0) return version; uname(&info); /* Check if this is a Debian kernel. */ version = get_debian_kernel_version(&info); if (version != 0) return version; if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3) return 0; return KERNEL_VERSION(major, minor, patch); } static int probe_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, size_t insns_cnt, char *log_buf, size_t log_buf_sz) { LIBBPF_OPTS(bpf_prog_load_opts, opts, .log_buf = log_buf, .log_size = log_buf_sz, .log_level = log_buf ? 1 : 0, ); int fd, err, exp_err = 0; const char *exp_msg = NULL; char buf[4096]; switch (prog_type) { case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT; break; case BPF_PROG_TYPE_CGROUP_SOCKOPT: opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT; break; case BPF_PROG_TYPE_SK_LOOKUP: opts.expected_attach_type = BPF_SK_LOOKUP; break; case BPF_PROG_TYPE_KPROBE: opts.kern_version = get_kernel_version(); break; case BPF_PROG_TYPE_LIRC_MODE2: opts.expected_attach_type = BPF_LIRC_MODE2; break; case BPF_PROG_TYPE_TRACING: case BPF_PROG_TYPE_LSM: opts.log_buf = buf; opts.log_size = sizeof(buf); opts.log_level = 1; if (prog_type == BPF_PROG_TYPE_TRACING) opts.expected_attach_type = BPF_TRACE_FENTRY; else opts.expected_attach_type = BPF_MODIFY_RETURN; opts.attach_btf_id = 1; exp_err = -EINVAL; exp_msg = "attach_btf_id 1 is not a function"; break; case BPF_PROG_TYPE_EXT: opts.log_buf = buf; opts.log_size = sizeof(buf); opts.log_level = 1; opts.attach_btf_id = 1; exp_err = -EINVAL; exp_msg = "Cannot replace kernel functions"; break; case BPF_PROG_TYPE_SYSCALL: opts.prog_flags = BPF_F_SLEEPABLE; break; case BPF_PROG_TYPE_STRUCT_OPS: exp_err = -524; /* -ENOTSUPP */ break; case BPF_PROG_TYPE_UNSPEC: case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_TRACEPOINT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_PERF_EVENT: case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_CGROUP_DEVICE: case BPF_PROG_TYPE_SK_MSG: case BPF_PROG_TYPE_RAW_TRACEPOINT: case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SYSCTL: break; case BPF_PROG_TYPE_NETFILTER: opts.expected_attach_type = BPF_NETFILTER; break; default: return -EOPNOTSUPP; } fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts); err = -errno; if (fd >= 0) close(fd); if (exp_err) { if (fd >= 0 || err != exp_err) return 0; if (exp_msg && !strstr(buf, exp_msg)) return 0; return 1; } return fd >= 0 ? 1 : 0; } int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts) { struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN() }; const size_t insn_cnt = ARRAY_SIZE(insns); int ret; if (opts) return libbpf_err(-EINVAL); ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0); return libbpf_err(ret); } int libbpf__load_raw_btf(const char *raw_types, size_t types_len, const char *str_sec, size_t str_len, int token_fd) { struct btf_header hdr = { .magic = BTF_MAGIC, .version = BTF_VERSION, .hdr_len = sizeof(struct btf_header), .type_len = types_len, .str_off = types_len, .str_len = str_len, }; LIBBPF_OPTS(bpf_btf_load_opts, opts, .token_fd = token_fd, .btf_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); int btf_fd, btf_len; __u8 *raw_btf; btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len; raw_btf = malloc(btf_len); if (!raw_btf) return -ENOMEM; memcpy(raw_btf, &hdr, sizeof(hdr)); memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len); memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len); btf_fd = bpf_btf_load(raw_btf, btf_len, &opts); free(raw_btf); return btf_fd; } static int load_local_storage_btf(void) { const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l"; /* struct bpf_spin_lock { * int val; * }; * struct val { * int cnt; * struct bpf_spin_lock l; * }; */ __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ /* struct bpf_spin_lock */ /* [2] */ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), BTF_MEMBER_ENC(15, 1, 0), /* int val; */ /* struct val */ /* [3] */ BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ }; return libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), 0); } static int probe_map_create(enum bpf_map_type map_type) { LIBBPF_OPTS(bpf_map_create_opts, opts); int key_size, value_size, max_entries; __u32 btf_key_type_id = 0, btf_value_type_id = 0; int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err = 0; key_size = sizeof(__u32); value_size = sizeof(__u32); max_entries = 1; switch (map_type) { case BPF_MAP_TYPE_STACK_TRACE: value_size = sizeof(__u64); break; case BPF_MAP_TYPE_LPM_TRIE: key_size = sizeof(__u64); value_size = sizeof(__u64); opts.map_flags = BPF_F_NO_PREALLOC; break; case BPF_MAP_TYPE_CGROUP_STORAGE: case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: key_size = sizeof(struct bpf_cgroup_storage_key); value_size = sizeof(__u64); max_entries = 0; break; case BPF_MAP_TYPE_QUEUE: case BPF_MAP_TYPE_STACK: key_size = 0; break; case BPF_MAP_TYPE_SK_STORAGE: case BPF_MAP_TYPE_INODE_STORAGE: case BPF_MAP_TYPE_TASK_STORAGE: case BPF_MAP_TYPE_CGRP_STORAGE: btf_key_type_id = 1; btf_value_type_id = 3; value_size = 8; max_entries = 0; opts.map_flags = BPF_F_NO_PREALLOC; btf_fd = load_local_storage_btf(); if (btf_fd < 0) return btf_fd; break; case BPF_MAP_TYPE_RINGBUF: case BPF_MAP_TYPE_USER_RINGBUF: key_size = 0; value_size = 0; max_entries = sysconf(_SC_PAGE_SIZE); break; case BPF_MAP_TYPE_STRUCT_OPS: /* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */ opts.btf_vmlinux_value_type_id = 1; opts.value_type_btf_obj_fd = -1; exp_err = -524; /* -ENOTSUPP */ break; case BPF_MAP_TYPE_BLOOM_FILTER: key_size = 0; max_entries = 1; break; case BPF_MAP_TYPE_ARENA: key_size = 0; value_size = 0; max_entries = 1; /* one page */ opts.map_extra = 0; /* can mmap() at any address */ opts.map_flags = BPF_F_MMAPABLE; break; case BPF_MAP_TYPE_HASH: case BPF_MAP_TYPE_ARRAY: case BPF_MAP_TYPE_PROG_ARRAY: case BPF_MAP_TYPE_PERF_EVENT_ARRAY: case BPF_MAP_TYPE_PERCPU_HASH: case BPF_MAP_TYPE_PERCPU_ARRAY: case BPF_MAP_TYPE_CGROUP_ARRAY: case BPF_MAP_TYPE_LRU_HASH: case BPF_MAP_TYPE_LRU_PERCPU_HASH: case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_DEVMAP_HASH: case BPF_MAP_TYPE_SOCKMAP: case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_XSKMAP: case BPF_MAP_TYPE_SOCKHASH: case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: break; case BPF_MAP_TYPE_UNSPEC: default: return -EOPNOTSUPP; } if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(__u32), sizeof(__u32), 1, NULL); if (fd_inner < 0) goto cleanup; opts.inner_map_fd = fd_inner; } if (btf_fd >= 0) { opts.btf_fd = btf_fd; opts.btf_key_type_id = btf_key_type_id; opts.btf_value_type_id = btf_value_type_id; } fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts); err = -errno; cleanup: if (fd >= 0) close(fd); if (fd_inner >= 0) close(fd_inner); if (btf_fd >= 0) close(btf_fd); if (exp_err) return fd < 0 && err == exp_err ? 1 : 0; else return fd >= 0 ? 1 : 0; } int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts) { int ret; if (opts) return libbpf_err(-EINVAL); ret = probe_map_create(map_type); return libbpf_err(ret); } int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id, const void *opts) { struct bpf_insn insns[] = { BPF_EMIT_CALL((__u32)helper_id), BPF_EXIT_INSN(), }; const size_t insn_cnt = ARRAY_SIZE(insns); char buf[4096]; int ret; if (opts) return libbpf_err(-EINVAL); /* we can't successfully load all prog types to check for BPF helper * support, so bail out with -EOPNOTSUPP error */ switch (prog_type) { case BPF_PROG_TYPE_TRACING: case BPF_PROG_TYPE_EXT: case BPF_PROG_TYPE_LSM: case BPF_PROG_TYPE_STRUCT_OPS: return -EOPNOTSUPP; default: break; } buf[0] = '\0'; ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf)); if (ret < 0) return libbpf_err(ret); /* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id) * at all, it will emit something like "invalid func unknown#181". * If BPF verifier recognizes BPF helper but it's not supported for * given BPF program type, it will emit "unknown func bpf_sys_bpf#166" * or "program of this type cannot use helper bpf_sys_bpf#166". * In both cases, provided combination of BPF program type and BPF * helper is not supported by the kernel. * In all other cases, probe_prog_load() above will either succeed (e.g., * because BPF helper happens to accept no input arguments or it * accepts one input argument and initial PTR_TO_CTX is fine for * that), or we'll get some more specific BPF verifier error about * some unsatisfied conditions. */ if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ") || strstr(buf, "program of this type cannot use helper "))) return 0; return 1; /* assume supported */ } xdp-tools-1.5.4/lib/libbpf/src/zip.h0000644000175100001660000000237014706536574016606 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LIBBPF_ZIP_H #define __LIBBPF_ZIP_H #include /* Represents an open zip archive. * Only basic ZIP files are supported, in particular the following are not * supported: * - encryption * - streaming * - multi-part ZIP files * - ZIP64 */ struct zip_archive; /* Carries information on name, compression method, and data corresponding to a * file in a zip archive. */ struct zip_entry { /* Compression method as defined in pkzip spec. 0 means data is uncompressed. */ __u16 compression; /* Non-null terminated name of the file. */ const char *name; /* Length of the file name. */ __u16 name_length; /* Pointer to the file data. */ const void *data; /* Length of the file data. */ __u32 data_length; /* Offset of the file data within the archive. */ __u32 data_offset; }; /* Open a zip archive. Returns NULL in case of an error. */ struct zip_archive *zip_archive_open(const char *path); /* Close a zip archive and release resources. */ void zip_archive_close(struct zip_archive *archive); /* Look up an entry corresponding to a file in given zip archive. */ int zip_archive_find_entry(struct zip_archive *archive, const char *name, struct zip_entry *out); #endif xdp-tools-1.5.4/lib/libbpf/src/libbpf_internal.h0000644000175100001660000005104614706536574021142 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * Internal libbpf helpers. * * Copyright (c) 2019 Facebook */ #ifndef __LIBBPF_LIBBPF_INTERNAL_H #define __LIBBPF_LIBBPF_INTERNAL_H #include #include #include #include #include #include #include #include #include #include "relo_core.h" /* Android's libc doesn't support AT_EACCESS in faccessat() implementation * ([0]), and just returns -EINVAL even if file exists and is accessible. * See [1] for issues caused by this. * * So just redefine it to 0 on Android. * * [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50 * [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250 */ #ifdef __ANDROID__ #undef AT_EACCESS #define AT_EACCESS 0 #endif /* make sure libbpf doesn't use kernel-only integer typedefs */ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 /* prevent accidental re-addition of reallocarray() */ #pragma GCC poison reallocarray #include "libbpf.h" #include "btf.h" #ifndef EM_BPF #define EM_BPF 247 #endif #ifndef R_BPF_64_64 #define R_BPF_64_64 1 #endif #ifndef R_BPF_64_ABS64 #define R_BPF_64_ABS64 2 #endif #ifndef R_BPF_64_ABS32 #define R_BPF_64_ABS32 3 #endif #ifndef R_BPF_64_32 #define R_BPF_64_32 10 #endif #ifndef SHT_LLVM_ADDRSIG #define SHT_LLVM_ADDRSIG 0x6FFF4C03 #endif /* if libelf is old and doesn't support mmap(), fall back to read() */ #ifndef ELF_C_READ_MMAP #define ELF_C_READ_MMAP ELF_C_READ #endif /* Older libelf all end up in this expression, for both 32 and 64 bit */ #ifndef ELF64_ST_VISIBILITY #define ELF64_ST_VISIBILITY(o) ((o) & 0x03) #endif #define BTF_INFO_ENC(kind, kind_flag, vlen) \ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) #define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type) #define BTF_INT_ENC(encoding, bits_offset, nr_bits) \ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits)) #define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ BTF_INT_ENC(encoding, bits_offset, bits) #define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset) #define BTF_PARAM_ENC(name, type) (name), (type) #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) #define BTF_TYPE_FLOAT_ENC(name, sz) \ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) #define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) #define BTF_TYPE_TYPE_TAG_ENC(value, type) \ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type) #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #endif #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) #endif #ifndef min # define min(x, y) ((x) < (y) ? (x) : (y)) #endif #ifndef max # define max(x, y) ((x) < (y) ? (y) : (x)) #endif #ifndef offsetofend # define offsetofend(TYPE, FIELD) \ (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD)) #endif #ifndef __alias #define __alias(symbol) __attribute__((alias(#symbol))) #endif /* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is * a string literal known at compilation time or char * pointer known only at * runtime. */ #define str_has_pfx(str, pfx) \ (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0) /* suffix check */ static inline bool str_has_sfx(const char *str, const char *sfx) { size_t str_len = strlen(str); size_t sfx_len = strlen(sfx); if (sfx_len > str_len) return false; return strcmp(str + str_len - sfx_len, sfx) == 0; } /* Symbol versioning is different between static and shared library. * Properly versioned symbols are needed for shared library, but * only the symbol of the new version is needed for static library. * Starting with GNU C 10, use symver attribute instead of .symver assembler * directive, which works better with GCC LTO builds. */ #if defined(SHARED) && defined(__GNUC__) && __GNUC__ >= 10 #define DEFAULT_VERSION(internal_name, api_name, version) \ __attribute__((symver(#api_name "@@" #version))) #define COMPAT_VERSION(internal_name, api_name, version) \ __attribute__((symver(#api_name "@" #version))) #elif defined(SHARED) #define COMPAT_VERSION(internal_name, api_name, version) \ asm(".symver " #internal_name "," #api_name "@" #version); #define DEFAULT_VERSION(internal_name, api_name, version) \ asm(".symver " #internal_name "," #api_name "@@" #version); #else /* !SHARED */ #define COMPAT_VERSION(internal_name, api_name, version) #define DEFAULT_VERSION(internal_name, api_name, version) \ extern typeof(internal_name) api_name \ __attribute__((alias(#internal_name))); #endif extern void libbpf_print(enum libbpf_print_level level, const char *format, ...) __attribute__((format(printf, 2, 3))); #define __pr(level, fmt, ...) \ do { \ libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \ } while (0) #define pr_warn(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) #ifndef __has_builtin #define __has_builtin(x) 0 #endif struct bpf_link { int (*detach)(struct bpf_link *link); void (*dealloc)(struct bpf_link *link); char *pin_path; /* NULL, if not pinned */ int fd; /* hook FD, -1 if not applicable */ bool disconnected; }; /* * Re-implement glibc's reallocarray() for libbpf internal-only use. * reallocarray(), unfortunately, is not available in all versions of glibc, * so requires extra feature detection and using reallocarray() stub from * and COMPAT_NEED_REALLOCARRAY. All this complicates * build of libbpf unnecessarily and is just a maintenance burden. Instead, * it's trivial to implement libbpf-specific internal version and use it * throughout libbpf. */ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) { size_t total; #if __has_builtin(__builtin_mul_overflow) if (unlikely(__builtin_mul_overflow(nmemb, size, &total))) return NULL; #else if (size == 0 || nmemb > ULONG_MAX / size) return NULL; total = nmemb * size; #endif return realloc(ptr, total); } /* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst * is zero-terminated string no matter what (unless sz == 0, in which case * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs * in what is returned. Given this is internal helper, it's trivial to extend * this, when necessary. Use this instead of strncpy inside libbpf source code. */ static inline void libbpf_strlcpy(char *dst, const char *src, size_t sz) { size_t i; if (sz == 0) return; sz--; for (i = 0; i < sz && src[i]; i++) dst[i] = src[i]; dst[i] = '\0'; } __u32 get_kernel_version(void); struct btf; struct btf_type; struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id); const char *btf_kind_str(const struct btf_type *t); const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); const struct btf_header *btf_header(const struct btf *btf); void btf_set_base_btf(struct btf *btf, const struct btf *base_btf); int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map); static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t) { return (enum btf_func_linkage)(int)btf_vlen(t); } static inline __u32 btf_type_info(int kind, int vlen, int kflag) { return (kflag << 31) | (kind << 24) | vlen; } enum map_def_parts { MAP_DEF_MAP_TYPE = 0x001, MAP_DEF_KEY_TYPE = 0x002, MAP_DEF_KEY_SIZE = 0x004, MAP_DEF_VALUE_TYPE = 0x008, MAP_DEF_VALUE_SIZE = 0x010, MAP_DEF_MAX_ENTRIES = 0x020, MAP_DEF_MAP_FLAGS = 0x040, MAP_DEF_NUMA_NODE = 0x080, MAP_DEF_PINNING = 0x100, MAP_DEF_INNER_MAP = 0x200, MAP_DEF_MAP_EXTRA = 0x400, MAP_DEF_ALL = 0x7ff, /* combination of all above */ }; struct btf_map_def { enum map_def_parts parts; __u32 map_type; __u32 key_type_id; __u32 key_size; __u32 value_type_id; __u32 value_size; __u32 max_entries; __u32 map_flags; __u32 numa_node; __u32 pinning; __u64 map_extra; }; int parse_btf_map_def(const char *map_name, struct btf *btf, const struct btf_type *def_t, bool strict, struct btf_map_def *map_def, struct btf_map_def *inner_def); void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t cur_cnt, size_t max_cnt, size_t add_cnt); int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt); static inline bool libbpf_is_mem_zeroed(const char *p, ssize_t len) { while (len > 0) { if (*p) return false; p++; len--; } return true; } static inline bool libbpf_validate_opts(const char *opts, size_t opts_sz, size_t user_sz, const char *type_name) { if (user_sz < sizeof(size_t)) { pr_warn("%s size (%zu) is too small\n", type_name, user_sz); return false; } if (!libbpf_is_mem_zeroed(opts + opts_sz, (ssize_t)user_sz - opts_sz)) { pr_warn("%s has non-zero extra bytes\n", type_name); return false; } return true; } #define OPTS_VALID(opts, type) \ (!(opts) || libbpf_validate_opts((const char *)opts, \ offsetofend(struct type, \ type##__last_field), \ (opts)->sz, #type)) #define OPTS_HAS(opts, field) \ ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field)) #define OPTS_GET(opts, field, fallback_value) \ (OPTS_HAS(opts, field) ? (opts)->field : fallback_value) #define OPTS_SET(opts, field, value) \ do { \ if (OPTS_HAS(opts, field)) \ (opts)->field = value; \ } while (0) #define OPTS_ZEROED(opts, last_nonzero_field) \ ({ \ ssize_t __off = offsetofend(typeof(*(opts)), last_nonzero_field); \ !(opts) || libbpf_is_mem_zeroed((const void *)opts + __off, \ (opts)->sz - __off); \ }) enum kern_feature_id { /* v4.14: kernel support for program & map names. */ FEAT_PROG_NAME, /* v5.2: kernel support for global data sections. */ FEAT_GLOBAL_DATA, /* BTF support */ FEAT_BTF, /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */ FEAT_BTF_FUNC, /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ FEAT_BTF_DATASEC, /* BTF_FUNC_GLOBAL is supported */ FEAT_BTF_GLOBAL_FUNC, /* BPF_F_MMAPABLE is supported for arrays */ FEAT_ARRAY_MMAP, /* kernel support for expected_attach_type in BPF_PROG_LOAD */ FEAT_EXP_ATTACH_TYPE, /* bpf_probe_read_{kernel,user}[_str] helpers */ FEAT_PROBE_READ_KERN, /* BPF_PROG_BIND_MAP is supported */ FEAT_PROG_BIND_MAP, /* Kernel support for module BTFs */ FEAT_MODULE_BTF, /* BTF_KIND_FLOAT support */ FEAT_BTF_FLOAT, /* BPF perf link support */ FEAT_PERF_LINK, /* BTF_KIND_DECL_TAG support */ FEAT_BTF_DECL_TAG, /* BTF_KIND_TYPE_TAG support */ FEAT_BTF_TYPE_TAG, /* memcg-based accounting for BPF maps and progs */ FEAT_MEMCG_ACCOUNT, /* BPF cookie (bpf_get_attach_cookie() BPF helper) support */ FEAT_BPF_COOKIE, /* BTF_KIND_ENUM64 support and BTF_KIND_ENUM kflag support */ FEAT_BTF_ENUM64, /* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */ FEAT_SYSCALL_WRAPPER, /* BPF multi-uprobe link support */ FEAT_UPROBE_MULTI_LINK, /* Kernel supports arg:ctx tag (__arg_ctx) for global subprogs natively */ FEAT_ARG_CTX_TAG, /* Kernel supports '?' at the front of datasec names */ FEAT_BTF_QMARK_DATASEC, __FEAT_CNT, }; enum kern_feature_result { FEAT_UNKNOWN = 0, FEAT_SUPPORTED = 1, FEAT_MISSING = 2, }; struct kern_feature_cache { enum kern_feature_result res[__FEAT_CNT]; int token_fd; }; bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id); bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id); int probe_kern_syscall_wrapper(int token_fd); int probe_memcg_account(int token_fd); int bump_rlimit_memlock(void); int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz); int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); int libbpf__load_raw_btf(const char *raw_types, size_t types_len, const char *str_sec, size_t str_len, int token_fd); int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level, int token_fd); struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, const char **prefix, int *kind); struct btf_ext_info { /* * info points to the individual info section (e.g. func_info and * line_info) from the .BTF.ext. It does not include the __u32 rec_size. */ void *info; __u32 rec_size; __u32 len; /* optional (maintained internally by libbpf) mapping between .BTF.ext * section and corresponding ELF section. This is used to join * information like CO-RE relocation records with corresponding BPF * programs defined in ELF sections */ __u32 *sec_idxs; int sec_cnt; }; #define for_each_btf_ext_sec(seg, sec) \ for (sec = (seg)->info; \ (void *)sec < (seg)->info + (seg)->len; \ sec = (void *)sec + sizeof(struct btf_ext_info_sec) + \ (seg)->rec_size * sec->num_info) #define for_each_btf_ext_rec(seg, sec, i, rec) \ for (i = 0, rec = (void *)&(sec)->data; \ i < (sec)->num_info; \ i++, rec = (void *)rec + (seg)->rec_size) /* * The .BTF.ext ELF section layout defined as * struct btf_ext_header * func_info subsection * * The func_info subsection layout: * record size for struct bpf_func_info in the func_info subsection * struct btf_ext_info_sec for section #1 * a list of bpf_func_info records for section #1 * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h * but may not be identical * struct btf_ext_info_sec for section #2 * a list of bpf_func_info records for section #2 * ...... * * Note that the bpf_func_info record size in .BTF.ext may not * be the same as the one defined in include/uapi/linux/bpf.h. * The loader should ensure that record_size meets minimum * requirement and pass the record as is to the kernel. The * kernel will handle the func_info properly based on its contents. */ struct btf_ext_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 func_info_off; __u32 func_info_len; __u32 line_info_off; __u32 line_info_len; /* optional part of .BTF.ext header */ __u32 core_relo_off; __u32 core_relo_len; }; struct btf_ext { union { struct btf_ext_header *hdr; void *data; }; void *data_swapped; bool swapped_endian; struct btf_ext_info func_info; struct btf_ext_info line_info; struct btf_ext_info core_relo_info; __u32 data_size; }; struct btf_ext_info_sec { __u32 sec_name_off; __u32 num_info; /* Followed by num_info * record_size number of bytes */ __u8 data[]; }; /* The minimum bpf_func_info checked by the loader */ struct bpf_func_info_min { __u32 insn_off; __u32 type_id; }; /* The minimum bpf_line_info checked by the loader */ struct bpf_line_info_min { __u32 insn_off; __u32 file_name_off; __u32 line_off; __u32 line_col; }; /* Functions to byte-swap info records */ typedef void (*info_rec_bswap_fn)(void *); static inline void bpf_func_info_bswap(struct bpf_func_info *i) { i->insn_off = bswap_32(i->insn_off); i->type_id = bswap_32(i->type_id); } static inline void bpf_line_info_bswap(struct bpf_line_info *i) { i->insn_off = bswap_32(i->insn_off); i->file_name_off = bswap_32(i->file_name_off); i->line_off = bswap_32(i->line_off); i->line_col = bswap_32(i->line_col); } static inline void bpf_core_relo_bswap(struct bpf_core_relo *i) { i->insn_off = bswap_32(i->insn_off); i->type_id = bswap_32(i->type_id); i->access_str_off = bswap_32(i->access_str_off); i->kind = bswap_32(i->kind); } enum btf_field_iter_kind { BTF_FIELD_ITER_IDS, BTF_FIELD_ITER_STRS, }; struct btf_field_desc { /* once-per-type offsets */ int t_off_cnt, t_offs[2]; /* member struct size, or zero, if no members */ int m_sz; /* repeated per-member offsets */ int m_off_cnt, m_offs[1]; }; struct btf_field_iter { struct btf_field_desc desc; void *p; int m_idx; int off_idx; int vlen; }; int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind); __u32 *btf_field_iter_next(struct btf_field_iter *it); typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx); typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx); int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx); int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx); __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, __u32 kind); /* handle direct returned errors */ static inline int libbpf_err(int ret) { if (ret < 0) errno = -ret; return ret; } /* handle errno-based (e.g., syscall or libc) errors according to libbpf's * strict mode settings */ static inline int libbpf_err_errno(int ret) { /* errno is already assumed to be set on error */ return ret < 0 ? -errno : ret; } /* handle error for pointer-returning APIs, err is assumed to be < 0 always */ static inline void *libbpf_err_ptr(int err) { /* set errno on error, this doesn't break anything */ errno = -err; return NULL; } /* handle pointer-returning APIs' error handling */ static inline void *libbpf_ptr(void *ret) { /* set errno on error, this doesn't break anything */ if (IS_ERR(ret)) errno = -PTR_ERR(ret); return IS_ERR(ret) ? NULL : ret; } static inline bool str_is_empty(const char *s) { return !s || !s[0]; } static inline bool is_ldimm64_insn(struct bpf_insn *insn) { return insn->code == (BPF_LD | BPF_IMM | BPF_DW); } static inline void bpf_insn_bswap(struct bpf_insn *insn) { __u8 tmp_reg = insn->dst_reg; insn->dst_reg = insn->src_reg; insn->src_reg = tmp_reg; insn->off = bswap_16(insn->off); insn->imm = bswap_32(insn->imm); } /* Unconditionally dup FD, ensuring it doesn't use [0, 2] range. * Original FD is not closed or altered in any other way. * Preserves original FD value, if it's invalid (negative). */ static inline int dup_good_fd(int fd) { if (fd < 0) return fd; return fcntl(fd, F_DUPFD_CLOEXEC, 3); } /* if fd is stdin, stdout, or stderr, dup to a fd greater than 2 * Takes ownership of the fd passed in, and closes it if calling * fcntl(fd, F_DUPFD_CLOEXEC, 3). */ static inline int ensure_good_fd(int fd) { int old_fd = fd, saved_errno; if (fd < 0) return fd; if (fd < 3) { fd = dup_good_fd(fd); saved_errno = errno; close(old_fd); errno = saved_errno; if (fd < 0) { pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno); errno = saved_errno; } } return fd; } static inline int sys_dup3(int oldfd, int newfd, int flags) { return syscall(__NR_dup3, oldfd, newfd, flags); } /* Point *fixed_fd* to the same file that *tmp_fd* points to. * Regardless of success, *tmp_fd* is closed. * Whatever *fixed_fd* pointed to is closed silently. */ static inline int reuse_fd(int fixed_fd, int tmp_fd) { int err; err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC); err = err < 0 ? -errno : 0; close(tmp_fd); /* clean up temporary FD */ return err; } /* The following two functions are exposed to bpftool */ int bpf_core_add_cands(struct bpf_core_cand *local_cand, size_t local_essent_len, const struct btf *targ_btf, const char *targ_btf_name, int targ_start_id, struct bpf_core_cand_list *cands); void bpf_core_free_cands(struct bpf_core_cand_list *cands); struct usdt_manager *usdt_manager_new(struct bpf_object *obj); void usdt_manager_free(struct usdt_manager *man); struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog, pid_t pid, const char *path, const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie); static inline bool is_pow_of_2(size_t x) { return x && (x & (x - 1)) == 0; } #define PROG_LOAD_ATTEMPTS 5 int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts); bool glob_match(const char *str, const char *pat); long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name); long elf_find_func_offset_from_file(const char *binary_path, const char *name); struct elf_fd { Elf *elf; int fd; }; int elf_open(const char *binary_path, struct elf_fd *elf_fd); void elf_close(struct elf_fd *elf_fd); int elf_resolve_syms_offsets(const char *binary_path, int cnt, const char **syms, unsigned long **poffsets, int st_type); int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern, unsigned long **poffsets, size_t *pcnt); int probe_fd(int fd); #endif /* __LIBBPF_LIBBPF_INTERNAL_H */ xdp-tools-1.5.4/lib/libbpf/src/libbpf_legacy.h0000644000175100001660000001210714706536574020565 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * Libbpf legacy APIs (either discouraged or deprecated, as mentioned in [0]) * * [0] https://docs.google.com/document/d/1UyjTZuPFWiPFyKk1tV5an11_iaRuec6U-ZESZ54nNTY * * Copyright (C) 2021 Facebook */ #ifndef __LIBBPF_LEGACY_BPF_H #define __LIBBPF_LEGACY_BPF_H #include #include #include #include #include "libbpf_common.h" #ifdef __cplusplus extern "C" { #endif /* As of libbpf 1.0 libbpf_set_strict_mode() and enum libbpf_struct_mode have * no effect. But they are left in libbpf_legacy.h so that applications that * prepared for libbpf 1.0 before final release by using * libbpf_set_strict_mode() still work with libbpf 1.0+ without any changes. */ enum libbpf_strict_mode { /* Turn on all supported strict features of libbpf to simulate libbpf * v1.0 behavior. * This will be the default behavior in libbpf v1.0. */ LIBBPF_STRICT_ALL = 0xffffffff, /* * Disable any libbpf 1.0 behaviors. This is the default before libbpf * v1.0. It won't be supported anymore in v1.0, please update your * code so that it handles LIBBPF_STRICT_ALL mode before libbpf v1.0. */ LIBBPF_STRICT_NONE = 0x00, /* * Return NULL pointers on error, not ERR_PTR(err). * Additionally, libbpf also always sets errno to corresponding Exx * (positive) error code. */ LIBBPF_STRICT_CLEAN_PTRS = 0x01, /* * Return actual error codes from low-level APIs directly, not just -1. * Additionally, libbpf also always sets errno to corresponding Exx * (positive) error code. */ LIBBPF_STRICT_DIRECT_ERRS = 0x02, /* * Enforce strict BPF program section (SEC()) names. * E.g., while prefiously SEC("xdp_whatever") or SEC("perf_event_blah") were * allowed, with LIBBPF_STRICT_SEC_PREFIX this will become * unrecognized by libbpf and would have to be just SEC("xdp") and * SEC("xdp") and SEC("perf_event"). * * Note, in this mode the program pin path will be based on the * function name instead of section name. * * Additionally, routines in the .text section are always considered * sub-programs. Legacy behavior allows for a single routine in .text * to be a program. */ LIBBPF_STRICT_SEC_NAME = 0x04, /* * Disable the global 'bpf_objects_list'. Maintaining this list adds * a race condition to bpf_object__open() and bpf_object__close(). * Clients can maintain it on their own if it is valuable for them. */ LIBBPF_STRICT_NO_OBJECT_LIST = 0x08, /* * Automatically bump RLIMIT_MEMLOCK using setrlimit() before the * first BPF program or map creation operation. This is done only if * kernel is too old to support memcg-based memory accounting for BPF * subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY, * but it can be overridden with libbpf_set_memlock_rlim() API. * Note that libbpf_set_memlock_rlim() needs to be called before * the very first bpf_prog_load(), bpf_map_create() or bpf_object__load() * operation. */ LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK = 0x10, /* * Error out on any SEC("maps") map definition, which are deprecated * in favor of BTF-defined map definitions in SEC(".maps"). */ LIBBPF_STRICT_MAP_DEFINITIONS = 0x20, __LIBBPF_STRICT_LAST, }; LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode); /** * @brief **libbpf_get_error()** extracts the error code from the passed * pointer * @param ptr pointer returned from libbpf API function * @return error code; or 0 if no error occurred * * Note, as of libbpf 1.0 this function is not necessary and not recommended * to be used. Libbpf doesn't return error code embedded into the pointer * itself. Instead, NULL is returned on error and error code is passed through * thread-local errno variable. **libbpf_get_error()** is just returning -errno * value if it receives NULL, which is correct only if errno hasn't been * modified between libbpf API call and corresponding **libbpf_get_error()** * call. Prefer to check return for NULL and use errno directly. * * This API is left in libbpf 1.0 to allow applications that were 1.0-ready * before final libbpf 1.0 without needing to change them. */ LIBBPF_API long libbpf_get_error(const void *ptr); #define DECLARE_LIBBPF_OPTS LIBBPF_OPTS /* "Discouraged" APIs which don't follow consistent libbpf naming patterns. * They are normally a trivial aliases or wrappers for proper APIs and are * left to minimize unnecessary disruption for users of libbpf. But they * shouldn't be used going forward. */ struct bpf_program; struct bpf_map; struct btf; struct btf_ext; LIBBPF_API struct btf *libbpf_find_kernel_btf(void); LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); LIBBPF_API enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBBPF_LEGACY_BPF_H */ xdp-tools-1.5.4/lib/libbpf/src/btf.h0000644000175100001660000005026314706536574016563 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2018 Facebook */ /*! \file */ #ifndef __LIBBPF_BTF_H #define __LIBBPF_BTF_H #include #include #include #include #include "libbpf_common.h" #ifdef __cplusplus extern "C" { #endif #define BTF_ELF_SEC ".BTF" #define BTF_EXT_ELF_SEC ".BTF.ext" #define BTF_BASE_ELF_SEC ".BTF.base" #define MAPS_ELF_SEC ".maps" struct btf; struct btf_ext; struct btf_type; struct bpf_object; enum btf_endianness { BTF_LITTLE_ENDIAN = 0, BTF_BIG_ENDIAN = 1, }; /** * @brief **btf__free()** frees all data of a BTF object * @param btf BTF object to free */ LIBBPF_API void btf__free(struct btf *btf); /** * @brief **btf__new()** creates a new instance of a BTF object from the raw * bytes of an ELF's BTF section * @param data raw bytes * @param size number of bytes passed in `data` * @return new BTF object instance which has to be eventually freed with * **btf__free()** * * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract * error code from such a pointer `libbpf_get_error()` should be used. If * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is * returned on error instead. In both cases thread-local `errno` variable is * always set to error code as well. */ LIBBPF_API struct btf *btf__new(const void *data, __u32 size); /** * @brief **btf__new_split()** create a new instance of a BTF object from the * provided raw data bytes. It takes another BTF instance, **base_btf**, which * serves as a base BTF, which is extended by types in a newly created BTF * instance * @param data raw bytes * @param size length of raw bytes * @param base_btf the base BTF object * @return new BTF object instance which has to be eventually freed with * **btf__free()** * * If *base_btf* is NULL, `btf__new_split()` is equivalent to `btf__new()` and * creates non-split BTF. * * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract * error code from such a pointer `libbpf_get_error()` should be used. If * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is * returned on error instead. In both cases thread-local `errno` variable is * always set to error code as well. */ LIBBPF_API struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf); /** * @brief **btf__new_empty()** creates an empty BTF object. Use * `btf__add_*()` to populate such BTF object. * @return new BTF object instance which has to be eventually freed with * **btf__free()** * * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract * error code from such a pointer `libbpf_get_error()` should be used. If * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is * returned on error instead. In both cases thread-local `errno` variable is * always set to error code as well. */ LIBBPF_API struct btf *btf__new_empty(void); /** * @brief **btf__new_empty_split()** creates an unpopulated BTF object from an * ELF BTF section except with a base BTF on top of which split BTF should be * based * @return new BTF object instance which has to be eventually freed with * **btf__free()** * * If *base_btf* is NULL, `btf__new_empty_split()` is equivalent to * `btf__new_empty()` and creates non-split BTF. * * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract * error code from such a pointer `libbpf_get_error()` should be used. If * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is * returned on error instead. In both cases thread-local `errno` variable is * always set to error code as well. */ LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf); /** * @brief **btf__distill_base()** creates new versions of the split BTF * *src_btf* and its base BTF. The new base BTF will only contain the types * needed to improve robustness of the split BTF to small changes in base BTF. * When that split BTF is loaded against a (possibly changed) base, this * distilled base BTF will help update references to that (possibly changed) * base BTF. * * Both the new split and its associated new base BTF must be freed by * the caller. * * If successful, 0 is returned and **new_base_btf** and **new_split_btf** * will point at new base/split BTF. Both the new split and its associated * new base BTF must be freed by the caller. * * A negative value is returned on error and the thread-local `errno` variable * is set to the error code as well. */ LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, struct btf **new_split_btf); LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf); LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); LIBBPF_API struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf); LIBBPF_API struct btf *btf__parse_raw(const char *path); LIBBPF_API struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf); LIBBPF_API struct btf *btf__load_vmlinux_btf(void); LIBBPF_API struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf); LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id); LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf); LIBBPF_API int btf__load_into_kernel(struct btf *btf); LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, const char *type_name); LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, __u32 kind); LIBBPF_API __u32 btf__type_cnt(const struct btf *btf); LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf); LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); LIBBPF_API size_t btf__pointer_size(const struct btf *btf); LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz); LIBBPF_API enum btf_endianness btf__endianness(const struct btf *btf); LIBBPF_API int btf__set_endianness(struct btf *btf, enum btf_endianness endian); LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); LIBBPF_API int btf__fd(const struct btf *btf); LIBBPF_API void btf__set_fd(struct btf *btf, int fd); LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size); LIBBPF_API enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext); LIBBPF_API int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian); LIBBPF_API int btf__find_str(struct btf *btf, const char *s); LIBBPF_API int btf__add_str(struct btf *btf, const char *s); LIBBPF_API int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type); /** * @brief **btf__add_btf()** appends all the BTF types from *src_btf* into *btf* * @param btf BTF object which all the BTF types and strings are added to * @param src_btf BTF object which all BTF types and referenced strings are copied from * @return BTF type ID of the first appended BTF type, or negative error code * * **btf__add_btf()** can be used to simply and efficiently append the entire * contents of one BTF object to another one. All the BTF type data is copied * over, all referenced type IDs are adjusted by adding a necessary ID offset. * Only strings referenced from BTF types are copied over and deduplicated, so * if there were some unused strings in *src_btf*, those won't be copied over, * which is consistent with the general string deduplication semantics of BTF * writing APIs. * * If any error is encountered during this process, the contents of *btf* is * left intact, which means that **btf__add_btf()** follows the transactional * semantics and the operation as a whole is all-or-nothing. * * *src_btf* has to be non-split BTF, as of now copying types from split BTF * is not supported and will result in -ENOTSUP error code returned. */ LIBBPF_API int btf__add_btf(struct btf *btf, const struct btf *src_btf); LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding); LIBBPF_API int btf__add_float(struct btf *btf, const char *name, size_t byte_sz); LIBBPF_API int btf__add_ptr(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems); /* struct/union construction APIs */ LIBBPF_API int btf__add_struct(struct btf *btf, const char *name, __u32 sz); LIBBPF_API int btf__add_union(struct btf *btf, const char *name, __u32 sz); LIBBPF_API int btf__add_field(struct btf *btf, const char *name, int field_type_id, __u32 bit_offset, __u32 bit_size); /* enum construction APIs */ LIBBPF_API int btf__add_enum(struct btf *btf, const char *name, __u32 bytes_sz); LIBBPF_API int btf__add_enum_value(struct btf *btf, const char *name, __s64 value); LIBBPF_API int btf__add_enum64(struct btf *btf, const char *name, __u32 bytes_sz, bool is_signed); LIBBPF_API int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value); enum btf_fwd_kind { BTF_FWD_STRUCT = 0, BTF_FWD_UNION = 1, BTF_FWD_ENUM = 2, }; LIBBPF_API int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind); LIBBPF_API int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id); LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id); /* func and func_proto construction APIs */ LIBBPF_API int btf__add_func(struct btf *btf, const char *name, enum btf_func_linkage linkage, int proto_type_id); LIBBPF_API int btf__add_func_proto(struct btf *btf, int ret_type_id); LIBBPF_API int btf__add_func_param(struct btf *btf, const char *name, int type_id); /* var & datasec construction APIs */ LIBBPF_API int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id); LIBBPF_API int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz); LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz); /* tag construction API */ LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, int component_idx); struct btf_dedup_opts { size_t sz; /* optional .BTF.ext info to dedup along the main BTF info */ struct btf_ext *btf_ext; /* force hash collisions (used for testing) */ bool force_collisions; size_t :0; }; #define btf_dedup_opts__last_field force_collisions LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts); /** * @brief **btf__relocate()** will check the split BTF *btf* for references * to base BTF kinds, and verify those references are compatible with * *base_btf*; if they are, *btf* is adjusted such that is re-parented to * *base_btf* and type ids and strings are adjusted to accommodate this. * * If successful, 0 is returned and **btf** now has **base_btf** as its * base. * * A negative value is returned on error and the thread-local `errno` variable * is set to the error code as well. */ LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf); struct btf_dump; struct btf_dump_opts { size_t sz; }; #define btf_dump_opts__last_field sz typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args); LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf, btf_dump_printf_fn_t printf_fn, void *ctx, const struct btf_dump_opts *opts); LIBBPF_API void btf_dump__free(struct btf_dump *d); LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id); struct btf_dump_emit_type_decl_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* optional field name for type declaration, e.g.: * - struct my_struct * - void (*)(int) * - char (*)[123] */ const char *field_name; /* extra indentation level (in number of tabs) to emit for multi-line * type declarations (e.g., anonymous struct); applies for lines * starting from the second one (first line is assumed to have * necessary indentation already */ int indent_level; /* strip all the const/volatile/restrict mods */ bool strip_mods; size_t :0; }; #define btf_dump_emit_type_decl_opts__last_field strip_mods LIBBPF_API int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, const struct btf_dump_emit_type_decl_opts *opts); struct btf_dump_type_data_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; const char *indent_str; int indent_level; /* below match "show" flags for bpf_show_snprintf() */ bool compact; /* no newlines/indentation */ bool skip_names; /* skip member/type names */ bool emit_zeroes; /* show 0-valued fields */ size_t :0; }; #define btf_dump_type_data_opts__last_field emit_zeroes LIBBPF_API int btf_dump__dump_type_data(struct btf_dump *d, __u32 id, const void *data, size_t data_sz, const struct btf_dump_type_data_opts *opts); /* * A set of helpers for easier BTF types handling. * * The inline functions below rely on constants from the kernel headers which * may not be available for applications including this header file. To avoid * compilation errors, we define all the constants here that were added after * the initial introduction of the BTF_KIND* constants. */ #ifndef BTF_KIND_FUNC #define BTF_KIND_FUNC 12 /* Function */ #define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ #endif #ifndef BTF_KIND_VAR #define BTF_KIND_VAR 14 /* Variable */ #define BTF_KIND_DATASEC 15 /* Section */ #endif #ifndef BTF_KIND_FLOAT #define BTF_KIND_FLOAT 16 /* Floating point */ #endif /* The kernel header switched to enums, so the following were never #defined */ #define BTF_KIND_DECL_TAG 17 /* Decl Tag */ #define BTF_KIND_TYPE_TAG 18 /* Type Tag */ #define BTF_KIND_ENUM64 19 /* Enum for up-to 64bit values */ static inline __u16 btf_kind(const struct btf_type *t) { return BTF_INFO_KIND(t->info); } static inline __u16 btf_vlen(const struct btf_type *t) { return BTF_INFO_VLEN(t->info); } static inline bool btf_kflag(const struct btf_type *t) { return BTF_INFO_KFLAG(t->info); } static inline bool btf_is_void(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_UNKN; } static inline bool btf_is_int(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_INT; } static inline bool btf_is_ptr(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_PTR; } static inline bool btf_is_array(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_ARRAY; } static inline bool btf_is_struct(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_STRUCT; } static inline bool btf_is_union(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_UNION; } static inline bool btf_is_composite(const struct btf_type *t) { __u16 kind = btf_kind(t); return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; } static inline bool btf_is_enum(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_ENUM; } static inline bool btf_is_enum64(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_ENUM64; } static inline bool btf_is_fwd(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_FWD; } static inline bool btf_is_typedef(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_TYPEDEF; } static inline bool btf_is_volatile(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_VOLATILE; } static inline bool btf_is_const(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_CONST; } static inline bool btf_is_restrict(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_RESTRICT; } static inline bool btf_is_mod(const struct btf_type *t) { __u16 kind = btf_kind(t); return kind == BTF_KIND_VOLATILE || kind == BTF_KIND_CONST || kind == BTF_KIND_RESTRICT || kind == BTF_KIND_TYPE_TAG; } static inline bool btf_is_func(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_FUNC; } static inline bool btf_is_func_proto(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_FUNC_PROTO; } static inline bool btf_is_var(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_VAR; } static inline bool btf_is_datasec(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_DATASEC; } static inline bool btf_is_float(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_FLOAT; } static inline bool btf_is_decl_tag(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_DECL_TAG; } static inline bool btf_is_type_tag(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_TYPE_TAG; } static inline bool btf_is_any_enum(const struct btf_type *t) { return btf_is_enum(t) || btf_is_enum64(t); } static inline bool btf_kind_core_compat(const struct btf_type *t1, const struct btf_type *t2) { return btf_kind(t1) == btf_kind(t2) || (btf_is_any_enum(t1) && btf_is_any_enum(t2)); } static inline __u8 btf_int_encoding(const struct btf_type *t) { return BTF_INT_ENCODING(*(__u32 *)(t + 1)); } static inline __u8 btf_int_offset(const struct btf_type *t) { return BTF_INT_OFFSET(*(__u32 *)(t + 1)); } static inline __u8 btf_int_bits(const struct btf_type *t) { return BTF_INT_BITS(*(__u32 *)(t + 1)); } static inline struct btf_array *btf_array(const struct btf_type *t) { return (struct btf_array *)(t + 1); } static inline struct btf_enum *btf_enum(const struct btf_type *t) { return (struct btf_enum *)(t + 1); } struct btf_enum64; static inline struct btf_enum64 *btf_enum64(const struct btf_type *t) { return (struct btf_enum64 *)(t + 1); } static inline __u64 btf_enum64_value(const struct btf_enum64 *e) { /* struct btf_enum64 is introduced in Linux 6.0, which is very * bleeding-edge. Here we are avoiding relying on struct btf_enum64 * definition coming from kernel UAPI headers to support wider range * of system-wide kernel headers. * * Given this header can be also included from C++ applications, that * further restricts C tricks we can use (like using compatible * anonymous struct). So just treat struct btf_enum64 as * a three-element array of u32 and access second (lo32) and third * (hi32) elements directly. * * For reference, here is a struct btf_enum64 definition: * * const struct btf_enum64 { * __u32 name_off; * __u32 val_lo32; * __u32 val_hi32; * }; */ const __u32 *e64 = (const __u32 *)e; return ((__u64)e64[2] << 32) | e64[1]; } static inline struct btf_member *btf_members(const struct btf_type *t) { return (struct btf_member *)(t + 1); } /* Get bit offset of a member with specified index. */ static inline __u32 btf_member_bit_offset(const struct btf_type *t, __u32 member_idx) { const struct btf_member *m = btf_members(t) + member_idx; bool kflag = btf_kflag(t); return kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset; } /* * Get bitfield size of a member, assuming t is BTF_KIND_STRUCT or * BTF_KIND_UNION. If member is not a bitfield, zero is returned. */ static inline __u32 btf_member_bitfield_size(const struct btf_type *t, __u32 member_idx) { const struct btf_member *m = btf_members(t) + member_idx; bool kflag = btf_kflag(t); return kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0; } static inline struct btf_param *btf_params(const struct btf_type *t) { return (struct btf_param *)(t + 1); } static inline struct btf_var *btf_var(const struct btf_type *t) { return (struct btf_var *)(t + 1); } static inline struct btf_var_secinfo * btf_var_secinfos(const struct btf_type *t) { return (struct btf_var_secinfo *)(t + 1); } struct btf_decl_tag; static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t) { return (struct btf_decl_tag *)(t + 1); } #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBBPF_BTF_H */ xdp-tools-1.5.4/lib/libbpf/src/strset.h0000644000175100001660000000111214706536574017321 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2021 Facebook */ #ifndef __LIBBPF_STRSET_H #define __LIBBPF_STRSET_H #include #include struct strset; struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz); void strset__free(struct strset *set); const char *strset__data(const struct strset *set); size_t strset__data_size(const struct strset *set); int strset__find_str(struct strset *set, const char *s); int strset__add_str(struct strset *set, const char *s); #endif /* __LIBBPF_STRSET_H */ xdp-tools-1.5.4/lib/libbpf/src/str_error.h0000644000175100001660000000035314706536574020024 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LIBBPF_STR_ERROR_H #define __LIBBPF_STR_ERROR_H #define STRERR_BUFSIZE 128 char *libbpf_strerror_r(int err, char *dst, int len); #endif /* __LIBBPF_STR_ERROR_H */ xdp-tools-1.5.4/lib/libbpf/src/libbpf.c0000644000175100001660000136134414706536574017247 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * Common eBPF ELF object loading operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. * Copyright (C) 2017 Nicira, Inc. * Copyright (C) 2019 Isovalent, Inc. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libbpf.h" #include "bpf.h" #include "btf.h" #include "str_error.h" #include "libbpf_internal.h" #include "hashmap.h" #include "bpf_gen_internal.h" #include "zip.h" #ifndef BPF_FS_MAGIC #define BPF_FS_MAGIC 0xcafe4a11 #endif #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf" #define BPF_INSN_SZ (sizeof(struct bpf_insn)) /* vsprintf() in __base_pr() uses nonliteral format string. It may break * compilation if user enables corresponding warning. Disable it explicitly. */ #pragma GCC diagnostic ignored "-Wformat-nonliteral" #define __printf(a, b) __attribute__((format(printf, a, b))) static struct bpf_map *bpf_object__add_map(struct bpf_object *obj); static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog); static int map_set_def_max_entries(struct bpf_map *map); static const char * const attach_type_name[] = { [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress", [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress", [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create", [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release", [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops", [BPF_CGROUP_DEVICE] = "cgroup_device", [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind", [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind", [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect", [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect", [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect", [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind", [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind", [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername", [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername", [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername", [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname", [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname", [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname", [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg", [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg", [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg", [BPF_CGROUP_SYSCTL] = "cgroup_sysctl", [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg", [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg", [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg", [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt", [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt", [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser", [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict", [BPF_SK_SKB_VERDICT] = "sk_skb_verdict", [BPF_SK_MSG_VERDICT] = "sk_msg_verdict", [BPF_LIRC_MODE2] = "lirc_mode2", [BPF_FLOW_DISSECTOR] = "flow_dissector", [BPF_TRACE_RAW_TP] = "trace_raw_tp", [BPF_TRACE_FENTRY] = "trace_fentry", [BPF_TRACE_FEXIT] = "trace_fexit", [BPF_MODIFY_RETURN] = "modify_return", [BPF_LSM_MAC] = "lsm_mac", [BPF_LSM_CGROUP] = "lsm_cgroup", [BPF_SK_LOOKUP] = "sk_lookup", [BPF_TRACE_ITER] = "trace_iter", [BPF_XDP_DEVMAP] = "xdp_devmap", [BPF_XDP_CPUMAP] = "xdp_cpumap", [BPF_XDP] = "xdp", [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select", [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate", [BPF_PERF_EVENT] = "perf_event", [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi", [BPF_STRUCT_OPS] = "struct_ops", [BPF_NETFILTER] = "netfilter", [BPF_TCX_INGRESS] = "tcx_ingress", [BPF_TCX_EGRESS] = "tcx_egress", [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi", [BPF_NETKIT_PRIMARY] = "netkit_primary", [BPF_NETKIT_PEER] = "netkit_peer", [BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session", }; static const char * const link_type_name[] = { [BPF_LINK_TYPE_UNSPEC] = "unspec", [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", [BPF_LINK_TYPE_TRACING] = "tracing", [BPF_LINK_TYPE_CGROUP] = "cgroup", [BPF_LINK_TYPE_ITER] = "iter", [BPF_LINK_TYPE_NETNS] = "netns", [BPF_LINK_TYPE_XDP] = "xdp", [BPF_LINK_TYPE_PERF_EVENT] = "perf_event", [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi", [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops", [BPF_LINK_TYPE_NETFILTER] = "netfilter", [BPF_LINK_TYPE_TCX] = "tcx", [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi", [BPF_LINK_TYPE_NETKIT] = "netkit", [BPF_LINK_TYPE_SOCKMAP] = "sockmap", }; static const char * const map_type_name[] = { [BPF_MAP_TYPE_UNSPEC] = "unspec", [BPF_MAP_TYPE_HASH] = "hash", [BPF_MAP_TYPE_ARRAY] = "array", [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array", [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array", [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash", [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array", [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace", [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array", [BPF_MAP_TYPE_LRU_HASH] = "lru_hash", [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash", [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie", [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps", [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps", [BPF_MAP_TYPE_DEVMAP] = "devmap", [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash", [BPF_MAP_TYPE_SOCKMAP] = "sockmap", [BPF_MAP_TYPE_CPUMAP] = "cpumap", [BPF_MAP_TYPE_XSKMAP] = "xskmap", [BPF_MAP_TYPE_SOCKHASH] = "sockhash", [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray", [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage", [BPF_MAP_TYPE_QUEUE] = "queue", [BPF_MAP_TYPE_STACK] = "stack", [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage", [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops", [BPF_MAP_TYPE_RINGBUF] = "ringbuf", [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage", [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage", [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter", [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf", [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage", [BPF_MAP_TYPE_ARENA] = "arena", }; static const char * const prog_type_name[] = { [BPF_PROG_TYPE_UNSPEC] = "unspec", [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter", [BPF_PROG_TYPE_KPROBE] = "kprobe", [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls", [BPF_PROG_TYPE_SCHED_ACT] = "sched_act", [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint", [BPF_PROG_TYPE_XDP] = "xdp", [BPF_PROG_TYPE_PERF_EVENT] = "perf_event", [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb", [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock", [BPF_PROG_TYPE_LWT_IN] = "lwt_in", [BPF_PROG_TYPE_LWT_OUT] = "lwt_out", [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit", [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops", [BPF_PROG_TYPE_SK_SKB] = "sk_skb", [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device", [BPF_PROG_TYPE_SK_MSG] = "sk_msg", [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local", [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport", [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl", [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable", [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt", [BPF_PROG_TYPE_TRACING] = "tracing", [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", [BPF_PROG_TYPE_EXT] = "ext", [BPF_PROG_TYPE_LSM] = "lsm", [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", [BPF_PROG_TYPE_SYSCALL] = "syscall", [BPF_PROG_TYPE_NETFILTER] = "netfilter", }; static int __base_pr(enum libbpf_print_level level, const char *format, va_list args) { const char *env_var = "LIBBPF_LOG_LEVEL"; static enum libbpf_print_level min_level = LIBBPF_INFO; static bool initialized; if (!initialized) { char *verbosity; initialized = true; verbosity = getenv(env_var); if (verbosity) { if (strcasecmp(verbosity, "warn") == 0) min_level = LIBBPF_WARN; else if (strcasecmp(verbosity, "debug") == 0) min_level = LIBBPF_DEBUG; else if (strcasecmp(verbosity, "info") == 0) min_level = LIBBPF_INFO; else fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n", env_var, verbosity); } } /* if too verbose, skip logging */ if (level > min_level) return 0; return vfprintf(stderr, format, args); } static libbpf_print_fn_t __libbpf_pr = __base_pr; libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn) { libbpf_print_fn_t old_print_fn; old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED); return old_print_fn; } __printf(2, 3) void libbpf_print(enum libbpf_print_level level, const char *format, ...) { va_list args; int old_errno; libbpf_print_fn_t print_fn; print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED); if (!print_fn) return; old_errno = errno; va_start(args, format); __libbpf_pr(level, format, args); va_end(args); errno = old_errno; } static void pr_perm_msg(int err) { struct rlimit limit; char buf[100]; if (err != -EPERM || geteuid() != 0) return; err = getrlimit(RLIMIT_MEMLOCK, &limit); if (err) return; if (limit.rlim_cur == RLIM_INFINITY) return; if (limit.rlim_cur < 1024) snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur); else if (limit.rlim_cur < 1024*1024) snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024); else snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024)); pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", buf); } #define STRERR_BUFSIZE 128 /* Copied from tools/perf/util/util.h */ #ifndef zfree # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) #endif #ifndef zclose # define zclose(fd) ({ \ int ___err = 0; \ if ((fd) >= 0) \ ___err = close((fd)); \ fd = -1; \ ___err; }) #endif static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; } int libbpf_set_strict_mode(enum libbpf_strict_mode mode) { /* as of v1.0 libbpf_set_strict_mode() is a no-op */ return 0; } __u32 libbpf_major_version(void) { return LIBBPF_MAJOR_VERSION; } __u32 libbpf_minor_version(void) { return LIBBPF_MINOR_VERSION; } const char *libbpf_version_string(void) { #define __S(X) #X #define _S(X) __S(X) return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION); #undef _S #undef __S } enum reloc_type { RELO_LD64, RELO_CALL, RELO_DATA, RELO_EXTERN_LD64, RELO_EXTERN_CALL, RELO_SUBPROG_ADDR, RELO_CORE, }; struct reloc_desc { enum reloc_type type; int insn_idx; union { const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */ struct { int map_idx; int sym_off; int ext_idx; }; }; }; /* stored as sec_def->cookie for all libbpf-supported SEC()s */ enum sec_def_flags { SEC_NONE = 0, /* expected_attach_type is optional, if kernel doesn't support that */ SEC_EXP_ATTACH_OPT = 1, /* legacy, only used by libbpf_get_type_names() and * libbpf_attach_type_by_name(), not used by libbpf itself at all. * This used to be associated with cgroup (and few other) BPF programs * that were attachable through BPF_PROG_ATTACH command. Pretty * meaningless nowadays, though. */ SEC_ATTACHABLE = 2, SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, /* attachment target is specified through BTF ID in either kernel or * other BPF program's BTF object */ SEC_ATTACH_BTF = 4, /* BPF program type allows sleeping/blocking in kernel */ SEC_SLEEPABLE = 8, /* BPF program support non-linear XDP buffer */ SEC_XDP_FRAGS = 16, /* Setup proper attach type for usdt probes. */ SEC_USDT = 32, }; struct bpf_sec_def { char *sec; enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; long cookie; int handler_id; libbpf_prog_setup_fn_t prog_setup_fn; libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; libbpf_prog_attach_fn_t prog_attach_fn; }; /* * bpf_prog should be a better name but it has been used in * linux/filter.h. */ struct bpf_program { char *name; char *sec_name; size_t sec_idx; const struct bpf_sec_def *sec_def; /* this program's instruction offset (in number of instructions) * within its containing ELF section */ size_t sec_insn_off; /* number of original instructions in ELF section belonging to this * program, not taking into account subprogram instructions possible * appended later during relocation */ size_t sec_insn_cnt; /* Offset (in number of instructions) of the start of instruction * belonging to this BPF program within its containing main BPF * program. For the entry-point (main) BPF program, this is always * zero. For a sub-program, this gets reset before each of main BPF * programs are processed and relocated and is used to determined * whether sub-program was already appended to the main program, and * if yes, at which instruction offset. */ size_t sub_insn_off; /* instructions that belong to BPF program; insns[0] is located at * sec_insn_off instruction within its ELF section in ELF file, so * when mapping ELF file instruction index to the local instruction, * one needs to subtract sec_insn_off; and vice versa. */ struct bpf_insn *insns; /* actual number of instruction in this BPF program's image; for * entry-point BPF programs this includes the size of main program * itself plus all the used sub-programs, appended at the end */ size_t insns_cnt; struct reloc_desc *reloc_desc; int nr_reloc; /* BPF verifier log settings */ char *log_buf; size_t log_size; __u32 log_level; struct bpf_object *obj; int fd; bool autoload; bool autoattach; bool sym_global; bool mark_btf_static; enum bpf_prog_type type; enum bpf_attach_type expected_attach_type; int exception_cb_idx; int prog_ifindex; __u32 attach_btf_obj_fd; __u32 attach_btf_id; __u32 attach_prog_fd; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; void *line_info; __u32 line_info_rec_size; __u32 line_info_cnt; __u32 prog_flags; }; struct bpf_struct_ops { struct bpf_program **progs; __u32 *kern_func_off; /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ void *data; /* e.g. struct bpf_struct_ops_tcp_congestion_ops in * btf_vmlinux's format. * struct bpf_struct_ops_tcp_congestion_ops { * [... some other kernel fields ...] * struct tcp_congestion_ops data; * } * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops) * bpf_map__init_kern_struct_ops() will populate the "kern_vdata" * from "data". */ void *kern_vdata; __u32 type_id; }; #define DATA_SEC ".data" #define BSS_SEC ".bss" #define RODATA_SEC ".rodata" #define KCONFIG_SEC ".kconfig" #define KSYMS_SEC ".ksyms" #define STRUCT_OPS_SEC ".struct_ops" #define STRUCT_OPS_LINK_SEC ".struct_ops.link" #define ARENA_SEC ".addr_space.1" enum libbpf_map_type { LIBBPF_MAP_UNSPEC, LIBBPF_MAP_DATA, LIBBPF_MAP_BSS, LIBBPF_MAP_RODATA, LIBBPF_MAP_KCONFIG, }; struct bpf_map_def { unsigned int type; unsigned int key_size; unsigned int value_size; unsigned int max_entries; unsigned int map_flags; }; struct bpf_map { struct bpf_object *obj; char *name; /* real_name is defined for special internal maps (.rodata*, * .data*, .bss, .kconfig) and preserves their original ELF section * name. This is important to be able to find corresponding BTF * DATASEC information. */ char *real_name; int fd; int sec_idx; size_t sec_offset; int map_ifindex; int inner_map_fd; struct bpf_map_def def; __u32 numa_node; __u32 btf_var_idx; int mod_btf_fd; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 btf_vmlinux_value_type_id; enum libbpf_map_type libbpf_type; void *mmaped; struct bpf_struct_ops *st_ops; struct bpf_map *inner_map; void **init_slots; int init_slots_sz; char *pin_path; bool pinned; bool reused; bool autocreate; bool autoattach; __u64 map_extra; }; enum extern_type { EXT_UNKNOWN, EXT_KCFG, EXT_KSYM, }; enum kcfg_type { KCFG_UNKNOWN, KCFG_CHAR, KCFG_BOOL, KCFG_INT, KCFG_TRISTATE, KCFG_CHAR_ARR, }; struct extern_desc { enum extern_type type; int sym_idx; int btf_id; int sec_btf_id; const char *name; char *essent_name; bool is_set; bool is_weak; union { struct { enum kcfg_type type; int sz; int align; int data_off; bool is_signed; } kcfg; struct { unsigned long long addr; /* target btf_id of the corresponding kernel var. */ int kernel_btf_obj_fd; int kernel_btf_id; /* local btf_id of the ksym extern's type. */ __u32 type_id; /* BTF fd index to be patched in for insn->off, this is * 0 for vmlinux BTF, index in obj->fd_array for module * BTF */ __s16 btf_fd_idx; } ksym; }; }; struct module_btf { struct btf *btf; char *name; __u32 id; int fd; int fd_array_idx; }; enum sec_type { SEC_UNUSED = 0, SEC_RELO, SEC_BSS, SEC_DATA, SEC_RODATA, SEC_ST_OPS, }; struct elf_sec_desc { enum sec_type sec_type; Elf64_Shdr *shdr; Elf_Data *data; }; struct elf_state { int fd; const void *obj_buf; size_t obj_buf_sz; Elf *elf; Elf64_Ehdr *ehdr; Elf_Data *symbols; Elf_Data *arena_data; size_t shstrndx; /* section index for section name strings */ size_t strtabidx; struct elf_sec_desc *secs; size_t sec_cnt; int btf_maps_shndx; __u32 btf_maps_sec_btf_id; int text_shndx; int symbols_shndx; bool has_st_ops; int arena_data_shndx; }; struct usdt_manager; struct bpf_object { char name[BPF_OBJ_NAME_LEN]; char license[64]; __u32 kern_version; struct bpf_program *programs; size_t nr_programs; struct bpf_map *maps; size_t nr_maps; size_t maps_cap; char *kconfig; struct extern_desc *externs; int nr_extern; int kconfig_map_idx; bool loaded; bool has_subcalls; bool has_rodata; struct bpf_gen *gen_loader; /* Information when doing ELF related work. Only valid if efile.elf is not NULL */ struct elf_state efile; unsigned char byteorder; struct btf *btf; struct btf_ext *btf_ext; /* Parse and load BTF vmlinux if any of the programs in the object need * it at load time. */ struct btf *btf_vmlinux; /* Path to the custom BTF to be used for BPF CO-RE relocations as an * override for vmlinux BTF. */ char *btf_custom_path; /* vmlinux BTF override for CO-RE relocations */ struct btf *btf_vmlinux_override; /* Lazily initialized kernel module BTFs */ struct module_btf *btf_modules; bool btf_modules_loaded; size_t btf_module_cnt; size_t btf_module_cap; /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */ char *log_buf; size_t log_size; __u32 log_level; int *fd_array; size_t fd_array_cap; size_t fd_array_cnt; struct usdt_manager *usdt_man; struct bpf_map *arena_map; void *arena_data; size_t arena_data_sz; struct kern_feature_cache *feat_cache; char *token_path; int token_fd; char path[]; }; static const char *elf_sym_str(const struct bpf_object *obj, size_t off); static const char *elf_sec_str(const struct bpf_object *obj, size_t off); static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn); static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx); static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx); void bpf_program__unload(struct bpf_program *prog) { if (!prog) return; zclose(prog->fd); zfree(&prog->func_info); zfree(&prog->line_info); } static void bpf_program__exit(struct bpf_program *prog) { if (!prog) return; bpf_program__unload(prog); zfree(&prog->name); zfree(&prog->sec_name); zfree(&prog->insns); zfree(&prog->reloc_desc); prog->nr_reloc = 0; prog->insns_cnt = 0; prog->sec_idx = -1; } static bool insn_is_subprog_call(const struct bpf_insn *insn) { return BPF_CLASS(insn->code) == BPF_JMP && BPF_OP(insn->code) == BPF_CALL && BPF_SRC(insn->code) == BPF_K && insn->src_reg == BPF_PSEUDO_CALL && insn->dst_reg == 0 && insn->off == 0; } static bool is_call_insn(const struct bpf_insn *insn) { return insn->code == (BPF_JMP | BPF_CALL); } static bool insn_is_pseudo_func(struct bpf_insn *insn) { return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC; } static int bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, const char *name, size_t sec_idx, const char *sec_name, size_t sec_off, void *insn_data, size_t insn_data_sz) { if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) { pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n", sec_name, name, sec_off, insn_data_sz); return -EINVAL; } memset(prog, 0, sizeof(*prog)); prog->obj = obj; prog->sec_idx = sec_idx; prog->sec_insn_off = sec_off / BPF_INSN_SZ; prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; /* insns_cnt can later be increased by appending used subprograms */ prog->insns_cnt = prog->sec_insn_cnt; prog->type = BPF_PROG_TYPE_UNSPEC; prog->fd = -1; prog->exception_cb_idx = -1; /* libbpf's convention for SEC("?abc...") is that it's just like * SEC("abc...") but the corresponding bpf_program starts out with * autoload set to false. */ if (sec_name[0] == '?') { prog->autoload = false; /* from now on forget there was ? in section name */ sec_name++; } else { prog->autoload = true; } prog->autoattach = true; /* inherit object's log_level */ prog->log_level = obj->log_level; prog->sec_name = strdup(sec_name); if (!prog->sec_name) goto errout; prog->name = strdup(name); if (!prog->name) goto errout; prog->insns = malloc(insn_data_sz); if (!prog->insns) goto errout; memcpy(prog->insns, insn_data, insn_data_sz); return 0; errout: pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name); bpf_program__exit(prog); return -ENOMEM; } static int bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, const char *sec_name, int sec_idx) { Elf_Data *symbols = obj->efile.symbols; struct bpf_program *prog, *progs; void *data = sec_data->d_buf; size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms; int nr_progs, err, i; const char *name; Elf64_Sym *sym; progs = obj->programs; nr_progs = obj->nr_programs; nr_syms = symbols->d_size / sizeof(Elf64_Sym); for (i = 0; i < nr_syms; i++) { sym = elf_sym_by_idx(obj, i); if (sym->st_shndx != sec_idx) continue; if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) continue; prog_sz = sym->st_size; sec_off = sym->st_value; name = elf_sym_str(obj, sym->st_name); if (!name) { pr_warn("sec '%s': failed to get symbol name for offset %zu\n", sec_name, sec_off); return -LIBBPF_ERRNO__FORMAT; } if (sec_off + prog_sz > sec_sz) { pr_warn("sec '%s': program at offset %zu crosses section boundary\n", sec_name, sec_off); return -LIBBPF_ERRNO__FORMAT; } if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); return -ENOTSUP; } pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); if (!progs) { /* * In this case the original obj->programs * is still valid, so don't need special treat for * bpf_close_object(). */ pr_warn("sec '%s': failed to alloc memory for new program '%s'\n", sec_name, name); return -ENOMEM; } obj->programs = progs; prog = &progs[nr_progs]; err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name, sec_off, data + sec_off, prog_sz); if (err) return err; if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL) prog->sym_global = true; /* if function is a global/weak symbol, but has restricted * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC * as static to enable more permissive BPF verification mode * with more outside context available to BPF verifier */ if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)) prog->mark_btf_static = true; nr_progs++; obj->nr_programs = nr_progs; } return 0; } static void bpf_object_bswap_progs(struct bpf_object *obj) { struct bpf_program *prog = obj->programs; struct bpf_insn *insn; int p, i; for (p = 0; p < obj->nr_programs; p++, prog++) { insn = prog->insns; for (i = 0; i < prog->insns_cnt; i++, insn++) bpf_insn_bswap(insn); } pr_debug("converted %zu BPF programs to native byte order\n", obj->nr_programs); } static const struct btf_member * find_member_by_offset(const struct btf_type *t, __u32 bit_offset) { struct btf_member *m; int i; for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { if (btf_member_bit_offset(t, i) == bit_offset) return m; } return NULL; } static const struct btf_member * find_member_by_name(const struct btf *btf, const struct btf_type *t, const char *name) { struct btf_member *m; int i; for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) return m; } return NULL; } static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, __u16 kind, struct btf **res_btf, struct module_btf **res_mod_btf); #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, const char *name, __u32 kind); static int find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw, struct module_btf **mod_btf, const struct btf_type **type, __u32 *type_id, const struct btf_type **vtype, __u32 *vtype_id, const struct btf_member **data_member) { const struct btf_type *kern_type, *kern_vtype; const struct btf_member *kern_data_member; struct btf *btf = NULL; __s32 kern_vtype_id, kern_type_id; char tname[256]; __u32 i; snprintf(tname, sizeof(tname), "%.*s", (int)bpf_core_essential_name_len(tname_raw), tname_raw); kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT, &btf, mod_btf); if (kern_type_id < 0) { pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname); return kern_type_id; } kern_type = btf__type_by_id(btf, kern_type_id); /* Find the corresponding "map_value" type that will be used * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example, * find "struct bpf_struct_ops_tcp_congestion_ops" from the * btf_vmlinux. */ kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX, tname, BTF_KIND_STRUCT); if (kern_vtype_id < 0) { pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n", STRUCT_OPS_VALUE_PREFIX, tname); return kern_vtype_id; } kern_vtype = btf__type_by_id(btf, kern_vtype_id); /* Find "struct tcp_congestion_ops" from * struct bpf_struct_ops_tcp_congestion_ops { * [ ... ] * struct tcp_congestion_ops data; * } */ kern_data_member = btf_members(kern_vtype); for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { if (kern_data_member->type == kern_type_id) break; } if (i == btf_vlen(kern_vtype)) { pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n", tname, STRUCT_OPS_VALUE_PREFIX, tname); return -EINVAL; } *type = kern_type; *type_id = kern_type_id; *vtype = kern_vtype; *vtype_id = kern_vtype_id; *data_member = kern_data_member; return 0; } static bool bpf_map__is_struct_ops(const struct bpf_map *map) { return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; } static bool is_valid_st_ops_program(struct bpf_object *obj, const struct bpf_program *prog) { int i; for (i = 0; i < obj->nr_programs; i++) { if (&obj->programs[i] == prog) return prog->type == BPF_PROG_TYPE_STRUCT_OPS; } return false; } /* For each struct_ops program P, referenced from some struct_ops map M, * enable P.autoload if there are Ms for which M.autocreate is true, * disable P.autoload if for all Ms M.autocreate is false. * Don't change P.autoload for programs that are not referenced from any maps. */ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj) { struct bpf_program *prog, *slot_prog; struct bpf_map *map; int i, j, k, vlen; for (i = 0; i < obj->nr_programs; ++i) { int should_load = false; int use_cnt = 0; prog = &obj->programs[i]; if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) continue; for (j = 0; j < obj->nr_maps; ++j) { const struct btf_type *type; map = &obj->maps[j]; if (!bpf_map__is_struct_ops(map)) continue; type = btf__type_by_id(obj->btf, map->st_ops->type_id); vlen = btf_vlen(type); for (k = 0; k < vlen; ++k) { slot_prog = map->st_ops->progs[k]; if (prog != slot_prog) continue; use_cnt++; if (map->autocreate) should_load = true; } } if (use_cnt) prog->autoload = should_load; } return 0; } /* Init the map's fields that depend on kern_btf */ static int bpf_map__init_kern_struct_ops(struct bpf_map *map) { const struct btf_member *member, *kern_member, *kern_data_member; const struct btf_type *type, *kern_type, *kern_vtype; __u32 i, kern_type_id, kern_vtype_id, kern_data_off; struct bpf_object *obj = map->obj; const struct btf *btf = obj->btf; struct bpf_struct_ops *st_ops; const struct btf *kern_btf; struct module_btf *mod_btf = NULL; void *data, *kern_data; const char *tname; int err; st_ops = map->st_ops; type = btf__type_by_id(btf, st_ops->type_id); tname = btf__name_by_offset(btf, type->name_off); err = find_struct_ops_kern_types(obj, tname, &mod_btf, &kern_type, &kern_type_id, &kern_vtype, &kern_vtype_id, &kern_data_member); if (err) return err; kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux; pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", map->name, st_ops->type_id, kern_type_id, kern_vtype_id); map->mod_btf_fd = mod_btf ? mod_btf->fd : -1; map->def.value_size = kern_vtype->size; map->btf_vmlinux_value_type_id = kern_vtype_id; st_ops->kern_vdata = calloc(1, kern_vtype->size); if (!st_ops->kern_vdata) return -ENOMEM; data = st_ops->data; kern_data_off = kern_data_member->offset / 8; kern_data = st_ops->kern_vdata + kern_data_off; member = btf_members(type); for (i = 0; i < btf_vlen(type); i++, member++) { const struct btf_type *mtype, *kern_mtype; __u32 mtype_id, kern_mtype_id; void *mdata, *kern_mdata; struct bpf_program *prog; __s64 msize, kern_msize; __u32 moff, kern_moff; __u32 kern_member_idx; const char *mname; mname = btf__name_by_offset(btf, member->name_off); moff = member->offset / 8; mdata = data + moff; msize = btf__resolve_size(btf, member->type); if (msize < 0) { pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n", map->name, mname); return msize; } kern_member = find_member_by_name(kern_btf, kern_type, mname); if (!kern_member) { if (!libbpf_is_mem_zeroed(mdata, msize)) { pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n", map->name, mname); return -ENOTSUP; } if (st_ops->progs[i]) { /* If we had declaratively set struct_ops callback, we need to * force its autoload to false, because it doesn't have * a chance of succeeding from POV of the current struct_ops map. * If this program is still referenced somewhere else, though, * then bpf_object_adjust_struct_ops_autoload() will update its * autoload accordingly. */ st_ops->progs[i]->autoload = false; st_ops->progs[i] = NULL; } /* Skip all-zero/NULL fields if they are not present in the kernel BTF */ pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n", map->name, mname); continue; } kern_member_idx = kern_member - btf_members(kern_type); if (btf_member_bitfield_size(type, i) || btf_member_bitfield_size(kern_type, kern_member_idx)) { pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n", map->name, mname); return -ENOTSUP; } kern_moff = kern_member->offset / 8; kern_mdata = kern_data + kern_moff; mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, &kern_mtype_id); if (BTF_INFO_KIND(mtype->info) != BTF_INFO_KIND(kern_mtype->info)) { pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n", map->name, mname, BTF_INFO_KIND(mtype->info), BTF_INFO_KIND(kern_mtype->info)); return -ENOTSUP; } if (btf_is_ptr(mtype)) { prog = *(void **)mdata; /* just like for !kern_member case above, reset declaratively * set (at compile time) program's autload to false, * if user replaced it with another program or NULL */ if (st_ops->progs[i] && st_ops->progs[i] != prog) st_ops->progs[i]->autoload = false; /* Update the value from the shadow type */ st_ops->progs[i] = prog; if (!prog) continue; if (!is_valid_st_ops_program(obj, prog)) { pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n", map->name, mname); return -ENOTSUP; } kern_mtype = skip_mods_and_typedefs(kern_btf, kern_mtype->type, &kern_mtype_id); /* mtype->type must be a func_proto which was * guaranteed in bpf_object__collect_st_ops_relos(), * so only check kern_mtype for func_proto here. */ if (!btf_is_func_proto(kern_mtype)) { pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n", map->name, mname); return -ENOTSUP; } if (mod_btf) prog->attach_btf_obj_fd = mod_btf->fd; /* if we haven't yet processed this BPF program, record proper * attach_btf_id and member_idx */ if (!prog->attach_btf_id) { prog->attach_btf_id = kern_type_id; prog->expected_attach_type = kern_member_idx; } /* struct_ops BPF prog can be re-used between multiple * .struct_ops & .struct_ops.link as long as it's the * same struct_ops struct definition and the same * function pointer field */ if (prog->attach_btf_id != kern_type_id) { pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n", map->name, mname, prog->name, prog->sec_name, prog->type, prog->attach_btf_id, kern_type_id); return -EINVAL; } if (prog->expected_attach_type != kern_member_idx) { pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n", map->name, mname, prog->name, prog->sec_name, prog->type, prog->expected_attach_type, kern_member_idx); return -EINVAL; } st_ops->kern_func_off[i] = kern_data_off + kern_moff; pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n", map->name, mname, prog->name, moff, kern_moff); continue; } kern_msize = btf__resolve_size(kern_btf, kern_mtype_id); if (kern_msize < 0 || msize != kern_msize) { pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n", map->name, mname, (ssize_t)msize, (ssize_t)kern_msize); return -ENOTSUP; } pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n", map->name, mname, (unsigned int)msize, moff, kern_moff); memcpy(kern_mdata, mdata, msize); } return 0; } static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) { struct bpf_map *map; size_t i; int err; for (i = 0; i < obj->nr_maps; i++) { map = &obj->maps[i]; if (!bpf_map__is_struct_ops(map)) continue; if (!map->autocreate) continue; err = bpf_map__init_kern_struct_ops(map); if (err) return err; } return 0; } static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name, int shndx, Elf_Data *data) { const struct btf_type *type, *datasec; const struct btf_var_secinfo *vsi; struct bpf_struct_ops *st_ops; const char *tname, *var_name; __s32 type_id, datasec_id; const struct btf *btf; struct bpf_map *map; __u32 i; if (shndx == -1) return 0; btf = obj->btf; datasec_id = btf__find_by_name_kind(btf, sec_name, BTF_KIND_DATASEC); if (datasec_id < 0) { pr_warn("struct_ops init: DATASEC %s not found\n", sec_name); return -EINVAL; } datasec = btf__type_by_id(btf, datasec_id); vsi = btf_var_secinfos(datasec); for (i = 0; i < btf_vlen(datasec); i++, vsi++) { type = btf__type_by_id(obj->btf, vsi->type); var_name = btf__name_by_offset(obj->btf, type->name_off); type_id = btf__resolve_type(obj->btf, vsi->type); if (type_id < 0) { pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", vsi->type, sec_name); return -EINVAL; } type = btf__type_by_id(obj->btf, type_id); tname = btf__name_by_offset(obj->btf, type->name_off); if (!tname[0]) { pr_warn("struct_ops init: anonymous type is not supported\n"); return -ENOTSUP; } if (!btf_is_struct(type)) { pr_warn("struct_ops init: %s is not a struct\n", tname); return -EINVAL; } map = bpf_object__add_map(obj); if (IS_ERR(map)) return PTR_ERR(map); map->sec_idx = shndx; map->sec_offset = vsi->offset; map->name = strdup(var_name); if (!map->name) return -ENOMEM; map->btf_value_type_id = type_id; /* Follow same convention as for programs autoload: * SEC("?.struct_ops") means map is not created by default. */ if (sec_name[0] == '?') { map->autocreate = false; /* from now on forget there was ? in section name */ sec_name++; } map->def.type = BPF_MAP_TYPE_STRUCT_OPS; map->def.key_size = sizeof(int); map->def.value_size = type->size; map->def.max_entries = 1; map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0; map->autoattach = true; map->st_ops = calloc(1, sizeof(*map->st_ops)); if (!map->st_ops) return -ENOMEM; st_ops = map->st_ops; st_ops->data = malloc(type->size); st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); st_ops->kern_func_off = malloc(btf_vlen(type) * sizeof(*st_ops->kern_func_off)); if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) return -ENOMEM; if (vsi->offset + type->size > data->d_size) { pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", var_name, sec_name); return -EINVAL; } memcpy(st_ops->data, data->d_buf + vsi->offset, type->size); st_ops->type_id = type_id; pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", tname, type_id, var_name, vsi->offset); } return 0; } static int bpf_object_init_struct_ops(struct bpf_object *obj) { const char *sec_name; int sec_idx, err; for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) { struct elf_sec_desc *desc = &obj->efile.secs[sec_idx]; if (desc->sec_type != SEC_ST_OPS) continue; sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); if (!sec_name) return -LIBBPF_ERRNO__FORMAT; err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data); if (err) return err; } return 0; } static struct bpf_object *bpf_object__new(const char *path, const void *obj_buf, size_t obj_buf_sz, const char *obj_name) { struct bpf_object *obj; char *end; obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); if (!obj) { pr_warn("alloc memory failed for %s\n", path); return ERR_PTR(-ENOMEM); } strcpy(obj->path, path); if (obj_name) { libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name)); } else { /* Using basename() GNU version which doesn't modify arg. */ libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name)); end = strchr(obj->name, '.'); if (end) *end = 0; } obj->efile.fd = -1; /* * Caller of this function should also call * bpf_object__elf_finish() after data collection to return * obj_buf to user. If not, we should duplicate the buffer to * avoid user freeing them before elf finish. */ obj->efile.obj_buf = obj_buf; obj->efile.obj_buf_sz = obj_buf_sz; obj->efile.btf_maps_shndx = -1; obj->kconfig_map_idx = -1; obj->kern_version = get_kernel_version(); obj->loaded = false; return obj; } static void bpf_object__elf_finish(struct bpf_object *obj) { if (!obj->efile.elf) return; elf_end(obj->efile.elf); obj->efile.elf = NULL; obj->efile.ehdr = NULL; obj->efile.symbols = NULL; obj->efile.arena_data = NULL; zfree(&obj->efile.secs); obj->efile.sec_cnt = 0; zclose(obj->efile.fd); obj->efile.obj_buf = NULL; obj->efile.obj_buf_sz = 0; } static int bpf_object__elf_init(struct bpf_object *obj) { Elf64_Ehdr *ehdr; int err = 0; Elf *elf; if (obj->efile.elf) { pr_warn("elf: init internal error\n"); return -LIBBPF_ERRNO__LIBELF; } if (obj->efile.obj_buf_sz > 0) { /* obj_buf should have been validated by bpf_object__open_mem(). */ elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz); } else { obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC); if (obj->efile.fd < 0) { char errmsg[STRERR_BUFSIZE], *cp; err = -errno; cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warn("elf: failed to open %s: %s\n", obj->path, cp); return err; } elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); } if (!elf) { pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__LIBELF; goto errout; } obj->efile.elf = elf; if (elf_kind(elf) != ELF_K_ELF) { err = -LIBBPF_ERRNO__FORMAT; pr_warn("elf: '%s' is not a proper ELF object\n", obj->path); goto errout; } if (gelf_getclass(elf) != ELFCLASS64) { err = -LIBBPF_ERRNO__FORMAT; pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path); goto errout; } obj->efile.ehdr = ehdr = elf64_getehdr(elf); if (!obj->efile.ehdr) { pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__FORMAT; goto errout; } /* Validate ELF object endianness... */ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB && ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { err = -LIBBPF_ERRNO__ENDIAN; pr_warn("elf: '%s' has unknown byte order\n", obj->path); goto errout; } /* and save after bpf_object_open() frees ELF data */ obj->byteorder = ehdr->e_ident[EI_DATA]; if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) { pr_warn("elf: failed to get section names section index for %s: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__FORMAT; goto errout; } /* ELF is corrupted/truncated, avoid calling elf_strptr. */ if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) { pr_warn("elf: failed to get section names strings from %s: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__FORMAT; goto errout; } /* Old LLVM set e_machine to EM_NONE */ if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) { pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); err = -LIBBPF_ERRNO__FORMAT; goto errout; } return 0; errout: bpf_object__elf_finish(obj); return err; } static bool is_native_endianness(struct bpf_object *obj) { #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return obj->byteorder == ELFDATA2LSB; #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return obj->byteorder == ELFDATA2MSB; #else # error "Unrecognized __BYTE_ORDER__" #endif } static int bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) { if (!data) { pr_warn("invalid license section in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't * go over allowed ELF data section buffer */ libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license))); pr_debug("license of %s is %s\n", obj->path, obj->license); return 0; } static int bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) { __u32 kver; if (!data || size != sizeof(kver)) { pr_warn("invalid kver section in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } memcpy(&kver, data, sizeof(kver)); obj->kern_version = kver; pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); return 0; } static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) { if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || type == BPF_MAP_TYPE_HASH_OF_MAPS) return true; return false; } static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) { Elf_Data *data; Elf_Scn *scn; if (!name) return -EINVAL; scn = elf_sec_by_name(obj, name); data = elf_sec_data(obj, scn); if (data) { *size = data->d_size; return 0; /* found it */ } return -ENOENT; } static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name) { Elf_Data *symbols = obj->efile.symbols; const char *sname; size_t si; for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) { Elf64_Sym *sym = elf_sym_by_idx(obj, si); if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT) continue; if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL && ELF64_ST_BIND(sym->st_info) != STB_WEAK) continue; sname = elf_sym_str(obj, sym->st_name); if (!sname) { pr_warn("failed to get sym name string for var %s\n", name); return ERR_PTR(-EIO); } if (strcmp(name, sname) == 0) return sym; } return ERR_PTR(-ENOENT); } /* Some versions of Android don't provide memfd_create() in their libc * implementation, so avoid complications and just go straight to Linux * syscall. */ static int sys_memfd_create(const char *name, unsigned flags) { return syscall(__NR_memfd_create, name, flags); } #ifndef MFD_CLOEXEC #define MFD_CLOEXEC 0x0001U #endif static int create_placeholder_fd(void) { int fd; fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC)); if (fd < 0) return -errno; return fd; } static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) { struct bpf_map *map; int err; err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap, sizeof(*obj->maps), obj->nr_maps + 1); if (err) return ERR_PTR(err); map = &obj->maps[obj->nr_maps++]; map->obj = obj; /* Preallocate map FD without actually creating BPF map just yet. * These map FD "placeholders" will be reused later without changing * FD value when map is actually created in the kernel. * * This is useful to be able to perform BPF program relocations * without having to create BPF maps before that step. This allows us * to finalize and load BTF very late in BPF object's loading phase, * right before BPF maps have to be created and BPF programs have to * be loaded. By having these map FD placeholders we can perform all * the sanitizations, relocations, and any other adjustments before we * start creating actual BPF kernel objects (BTF, maps, progs). */ map->fd = create_placeholder_fd(); if (map->fd < 0) return ERR_PTR(map->fd); map->inner_map_fd = -1; map->autocreate = true; return map; } static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries) { const long page_sz = sysconf(_SC_PAGE_SIZE); size_t map_sz; map_sz = (size_t)roundup(value_sz, 8) * max_entries; map_sz = roundup(map_sz, page_sz); return map_sz; } static size_t bpf_map_mmap_sz(const struct bpf_map *map) { const long page_sz = sysconf(_SC_PAGE_SIZE); switch (map->def.type) { case BPF_MAP_TYPE_ARRAY: return array_map_mmap_sz(map->def.value_size, map->def.max_entries); case BPF_MAP_TYPE_ARENA: return page_sz * map->def.max_entries; default: return 0; /* not supported */ } } static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz) { void *mmaped; if (!map->mmaped) return -EINVAL; if (old_sz == new_sz) return 0; mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (mmaped == MAP_FAILED) return -errno; memcpy(mmaped, map->mmaped, min(old_sz, new_sz)); munmap(map->mmaped, old_sz); map->mmaped = mmaped; return 0; } static char *internal_map_name(struct bpf_object *obj, const char *real_name) { char map_name[BPF_OBJ_NAME_LEN], *p; int pfx_len, sfx_len = max((size_t)7, strlen(real_name)); /* This is one of the more confusing parts of libbpf for various * reasons, some of which are historical. The original idea for naming * internal names was to include as much of BPF object name prefix as * possible, so that it can be distinguished from similar internal * maps of a different BPF object. * As an example, let's say we have bpf_object named 'my_object_name' * and internal map corresponding to '.rodata' ELF section. The final * map name advertised to user and to the kernel will be * 'my_objec.rodata', taking first 8 characters of object name and * entire 7 characters of '.rodata'. * Somewhat confusingly, if internal map ELF section name is shorter * than 7 characters, e.g., '.bss', we still reserve 7 characters * for the suffix, even though we only have 4 actual characters, and * resulting map will be called 'my_objec.bss', not even using all 15 * characters allowed by the kernel. Oh well, at least the truncated * object name is somewhat consistent in this case. But if the map * name is '.kconfig', we'll still have entirety of '.kconfig' added * (8 chars) and thus will be left with only first 7 characters of the * object name ('my_obje'). Happy guessing, user, that the final map * name will be "my_obje.kconfig". * Now, with libbpf starting to support arbitrarily named .rodata.* * and .data.* data sections, it's possible that ELF section name is * longer than allowed 15 chars, so we now need to be careful to take * only up to 15 first characters of ELF name, taking no BPF object * name characters at all. So '.rodata.abracadabra' will result in * '.rodata.abracad' kernel and user-visible name. * We need to keep this convoluted logic intact for .data, .bss and * .rodata maps, but for new custom .data.custom and .rodata.custom * maps we use their ELF names as is, not prepending bpf_object name * in front. We still need to truncate them to 15 characters for the * kernel. Full name can be recovered for such maps by using DATASEC * BTF type associated with such map's value type, though. */ if (sfx_len >= BPF_OBJ_NAME_LEN) sfx_len = BPF_OBJ_NAME_LEN - 1; /* if there are two or more dots in map name, it's a custom dot map */ if (strchr(real_name + 1, '.') != NULL) pfx_len = 0; else pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name)); snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, sfx_len, real_name); /* sanities map name to characters allowed by kernel */ for (p = map_name; *p && p < map_name + sizeof(map_name); p++) if (!isalnum(*p) && *p != '_' && *p != '.') *p = '_'; return strdup(map_name); } static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map); /* Internal BPF map is mmap()'able only if at least one of corresponding * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL * variable and it's not marked as __hidden (which turns it into, effectively, * a STATIC variable). */ static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map) { const struct btf_type *t, *vt; struct btf_var_secinfo *vsi; int i, n; if (!map->btf_value_type_id) return false; t = btf__type_by_id(obj->btf, map->btf_value_type_id); if (!btf_is_datasec(t)) return false; vsi = btf_var_secinfos(t); for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) { vt = btf__type_by_id(obj->btf, vsi->type); if (!btf_is_var(vt)) continue; if (btf_var(vt)->linkage != BTF_VAR_STATIC) return true; } return false; } static int bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, const char *real_name, int sec_idx, void *data, size_t data_sz) { struct bpf_map_def *def; struct bpf_map *map; size_t mmap_sz; int err; map = bpf_object__add_map(obj); if (IS_ERR(map)) return PTR_ERR(map); map->libbpf_type = type; map->sec_idx = sec_idx; map->sec_offset = 0; map->real_name = strdup(real_name); map->name = internal_map_name(obj, real_name); if (!map->real_name || !map->name) { zfree(&map->real_name); zfree(&map->name); return -ENOMEM; } def = &map->def; def->type = BPF_MAP_TYPE_ARRAY; def->key_size = sizeof(int); def->value_size = data_sz; def->max_entries = 1; def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG ? BPF_F_RDONLY_PROG : 0; /* failures are fine because of maps like .rodata.str1.1 */ (void) map_fill_btf_type_info(obj, map); if (map_is_mmapable(obj, map)) def->map_flags |= BPF_F_MMAPABLE; pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", map->name, map->sec_idx, map->sec_offset, def->map_flags); mmap_sz = bpf_map_mmap_sz(map); map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (map->mmaped == MAP_FAILED) { err = -errno; map->mmaped = NULL; pr_warn("failed to alloc map '%s' content buffer: %d\n", map->name, err); zfree(&map->real_name); zfree(&map->name); return err; } if (data) memcpy(map->mmaped, data, data_sz); pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); return 0; } static int bpf_object__init_global_data_maps(struct bpf_object *obj) { struct elf_sec_desc *sec_desc; const char *sec_name; int err = 0, sec_idx; /* * Populate obj->maps with libbpf internal maps. */ for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) { sec_desc = &obj->efile.secs[sec_idx]; /* Skip recognized sections with size 0. */ if (!sec_desc->data || sec_desc->data->d_size == 0) continue; switch (sec_desc->sec_type) { case SEC_DATA: sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, sec_name, sec_idx, sec_desc->data->d_buf, sec_desc->data->d_size); break; case SEC_RODATA: obj->has_rodata = true; sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, sec_name, sec_idx, sec_desc->data->d_buf, sec_desc->data->d_size); break; case SEC_BSS: sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, sec_name, sec_idx, NULL, sec_desc->data->d_size); break; default: /* skip */ break; } if (err) return err; } return 0; } static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, const void *name) { int i; for (i = 0; i < obj->nr_extern; i++) { if (strcmp(obj->externs[i].name, name) == 0) return &obj->externs[i]; } return NULL; } static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj, const void *name, int len) { const char *ext_name; int i; for (i = 0; i < obj->nr_extern; i++) { ext_name = obj->externs[i].name; if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0) return &obj->externs[i]; } return NULL; } static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, char value) { switch (ext->kcfg.type) { case KCFG_BOOL: if (value == 'm') { pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n", ext->name, value); return -EINVAL; } *(bool *)ext_val = value == 'y' ? true : false; break; case KCFG_TRISTATE: if (value == 'y') *(enum libbpf_tristate *)ext_val = TRI_YES; else if (value == 'm') *(enum libbpf_tristate *)ext_val = TRI_MODULE; else /* value == 'n' */ *(enum libbpf_tristate *)ext_val = TRI_NO; break; case KCFG_CHAR: *(char *)ext_val = value; break; case KCFG_UNKNOWN: case KCFG_INT: case KCFG_CHAR_ARR: default: pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n", ext->name, value); return -EINVAL; } ext->is_set = true; return 0; } static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, const char *value) { size_t len; if (ext->kcfg.type != KCFG_CHAR_ARR) { pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n", ext->name, value); return -EINVAL; } len = strlen(value); if (value[len - 1] != '"') { pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", ext->name, value); return -EINVAL; } /* strip quotes */ len -= 2; if (len >= ext->kcfg.sz) { pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n", ext->name, value, len, ext->kcfg.sz - 1); len = ext->kcfg.sz - 1; } memcpy(ext_val, value + 1, len); ext_val[len] = '\0'; ext->is_set = true; return 0; } static int parse_u64(const char *value, __u64 *res) { char *value_end; int err; errno = 0; *res = strtoull(value, &value_end, 0); if (errno) { err = -errno; pr_warn("failed to parse '%s' as integer: %d\n", value, err); return err; } if (*value_end) { pr_warn("failed to parse '%s' as integer completely\n", value); return -EINVAL; } return 0; } static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) { int bit_sz = ext->kcfg.sz * 8; if (ext->kcfg.sz == 8) return true; /* Validate that value stored in u64 fits in integer of `ext->sz` * bytes size without any loss of information. If the target integer * is signed, we rely on the following limits of integer type of * Y bits and subsequent transformation: * * -2^(Y-1) <= X <= 2^(Y-1) - 1 * 0 <= X + 2^(Y-1) <= 2^Y - 1 * 0 <= X + 2^(Y-1) < 2^Y * * For unsigned target integer, check that all the (64 - Y) bits are * zero. */ if (ext->kcfg.is_signed) return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); else return (v >> bit_sz) == 0; } static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, __u64 value) { if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR && ext->kcfg.type != KCFG_BOOL) { pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n", ext->name, (unsigned long long)value); return -EINVAL; } if (ext->kcfg.type == KCFG_BOOL && value > 1) { pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n", ext->name, (unsigned long long)value); return -EINVAL; } if (!is_kcfg_value_in_range(ext, value)) { pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n", ext->name, (unsigned long long)value, ext->kcfg.sz); return -ERANGE; } switch (ext->kcfg.sz) { case 1: *(__u8 *)ext_val = value; break; case 2: *(__u16 *)ext_val = value; break; case 4: *(__u32 *)ext_val = value; break; case 8: *(__u64 *)ext_val = value; break; default: return -EINVAL; } ext->is_set = true; return 0; } static int bpf_object__process_kconfig_line(struct bpf_object *obj, char *buf, void *data) { struct extern_desc *ext; char *sep, *value; int len, err = 0; void *ext_val; __u64 num; if (!str_has_pfx(buf, "CONFIG_")) return 0; sep = strchr(buf, '='); if (!sep) { pr_warn("failed to parse '%s': no separator\n", buf); return -EINVAL; } /* Trim ending '\n' */ len = strlen(buf); if (buf[len - 1] == '\n') buf[len - 1] = '\0'; /* Split on '=' and ensure that a value is present. */ *sep = '\0'; if (!sep[1]) { *sep = '='; pr_warn("failed to parse '%s': no value\n", buf); return -EINVAL; } ext = find_extern_by_name(obj, buf); if (!ext || ext->is_set) return 0; ext_val = data + ext->kcfg.data_off; value = sep + 1; switch (*value) { case 'y': case 'n': case 'm': err = set_kcfg_value_tri(ext, ext_val, *value); break; case '"': err = set_kcfg_value_str(ext, ext_val, value); break; default: /* assume integer */ err = parse_u64(value, &num); if (err) { pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value); return err; } if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value); return -EINVAL; } err = set_kcfg_value_num(ext, ext_val, num); break; } if (err) return err; pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value); return 0; } static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) { char buf[PATH_MAX]; struct utsname uts; int len, err = 0; gzFile file; uname(&uts); len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); if (len < 0) return -EINVAL; else if (len >= PATH_MAX) return -ENAMETOOLONG; /* gzopen also accepts uncompressed files. */ file = gzopen(buf, "re"); if (!file) file = gzopen("/proc/config.gz", "re"); if (!file) { pr_warn("failed to open system Kconfig\n"); return -ENOENT; } while (gzgets(file, buf, sizeof(buf))) { err = bpf_object__process_kconfig_line(obj, buf, data); if (err) { pr_warn("error parsing system Kconfig line '%s': %d\n", buf, err); goto out; } } out: gzclose(file); return err; } static int bpf_object__read_kconfig_mem(struct bpf_object *obj, const char *config, void *data) { char buf[PATH_MAX]; int err = 0; FILE *file; file = fmemopen((void *)config, strlen(config), "r"); if (!file) { err = -errno; pr_warn("failed to open in-memory Kconfig: %d\n", err); return err; } while (fgets(buf, sizeof(buf), file)) { err = bpf_object__process_kconfig_line(obj, buf, data); if (err) { pr_warn("error parsing in-memory Kconfig line '%s': %d\n", buf, err); break; } } fclose(file); return err; } static int bpf_object__init_kconfig_map(struct bpf_object *obj) { struct extern_desc *last_ext = NULL, *ext; size_t map_sz; int i, err; for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; if (ext->type == EXT_KCFG) last_ext = ext; } if (!last_ext) return 0; map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, ".kconfig", obj->efile.symbols_shndx, NULL, map_sz); if (err) return err; obj->kconfig_map_idx = obj->nr_maps - 1; return 0; } const struct btf_type * skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) { const struct btf_type *t = btf__type_by_id(btf, id); if (res_id) *res_id = id; while (btf_is_mod(t) || btf_is_typedef(t)) { if (res_id) *res_id = t->type; t = btf__type_by_id(btf, t->type); } return t; } static const struct btf_type * resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) { const struct btf_type *t; t = skip_mods_and_typedefs(btf, id, NULL); if (!btf_is_ptr(t)) return NULL; t = skip_mods_and_typedefs(btf, t->type, res_id); return btf_is_func_proto(t) ? t : NULL; } static const char *__btf_kind_str(__u16 kind) { switch (kind) { case BTF_KIND_UNKN: return "void"; case BTF_KIND_INT: return "int"; case BTF_KIND_PTR: return "ptr"; case BTF_KIND_ARRAY: return "array"; case BTF_KIND_STRUCT: return "struct"; case BTF_KIND_UNION: return "union"; case BTF_KIND_ENUM: return "enum"; case BTF_KIND_FWD: return "fwd"; case BTF_KIND_TYPEDEF: return "typedef"; case BTF_KIND_VOLATILE: return "volatile"; case BTF_KIND_CONST: return "const"; case BTF_KIND_RESTRICT: return "restrict"; case BTF_KIND_FUNC: return "func"; case BTF_KIND_FUNC_PROTO: return "func_proto"; case BTF_KIND_VAR: return "var"; case BTF_KIND_DATASEC: return "datasec"; case BTF_KIND_FLOAT: return "float"; case BTF_KIND_DECL_TAG: return "decl_tag"; case BTF_KIND_TYPE_TAG: return "type_tag"; case BTF_KIND_ENUM64: return "enum64"; default: return "unknown"; } } const char *btf_kind_str(const struct btf_type *t) { return __btf_kind_str(btf_kind(t)); } /* * Fetch integer attribute of BTF map definition. Such attributes are * represented using a pointer to an array, in which dimensionality of array * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY]; * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF * type definition, while using only sizeof(void *) space in ELF data section. */ static bool get_map_field_int(const char *map_name, const struct btf *btf, const struct btf_member *m, __u32 *res) { const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); const char *name = btf__name_by_offset(btf, m->name_off); const struct btf_array *arr_info; const struct btf_type *arr_t; if (!btf_is_ptr(t)) { pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", map_name, name, btf_kind_str(t)); return false; } arr_t = btf__type_by_id(btf, t->type); if (!arr_t) { pr_warn("map '%s': attr '%s': type [%u] not found.\n", map_name, name, t->type); return false; } if (!btf_is_array(arr_t)) { pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", map_name, name, btf_kind_str(arr_t)); return false; } arr_info = btf_array(arr_t); *res = arr_info->nelems; return true; } static bool get_map_field_long(const char *map_name, const struct btf *btf, const struct btf_member *m, __u64 *res) { const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); const char *name = btf__name_by_offset(btf, m->name_off); if (btf_is_ptr(t)) { __u32 res32; bool ret; ret = get_map_field_int(map_name, btf, m, &res32); if (ret) *res = (__u64)res32; return ret; } if (!btf_is_enum(t) && !btf_is_enum64(t)) { pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n", map_name, name, btf_kind_str(t)); return false; } if (btf_vlen(t) != 1) { pr_warn("map '%s': attr '%s': invalid __ulong\n", map_name, name); return false; } if (btf_is_enum(t)) { const struct btf_enum *e = btf_enum(t); *res = e->val; } else { const struct btf_enum64 *e = btf_enum64(t); *res = btf_enum64_value(e); } return true; } static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name) { int len; len = snprintf(buf, buf_sz, "%s/%s", path, name); if (len < 0) return -EINVAL; if (len >= buf_sz) return -ENAMETOOLONG; return 0; } static int build_map_pin_path(struct bpf_map *map, const char *path) { char buf[PATH_MAX]; int err; if (!path) path = BPF_FS_DEFAULT_PATH; err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); if (err) return err; return bpf_map__set_pin_path(map, buf); } /* should match definition in bpf_helpers.h */ enum libbpf_pin_type { LIBBPF_PIN_NONE, /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ LIBBPF_PIN_BY_NAME, }; int parse_btf_map_def(const char *map_name, struct btf *btf, const struct btf_type *def_t, bool strict, struct btf_map_def *map_def, struct btf_map_def *inner_def) { const struct btf_type *t; const struct btf_member *m; bool is_inner = inner_def == NULL; int vlen, i; vlen = btf_vlen(def_t); m = btf_members(def_t); for (i = 0; i < vlen; i++, m++) { const char *name = btf__name_by_offset(btf, m->name_off); if (!name) { pr_warn("map '%s': invalid field #%d.\n", map_name, i); return -EINVAL; } if (strcmp(name, "type") == 0) { if (!get_map_field_int(map_name, btf, m, &map_def->map_type)) return -EINVAL; map_def->parts |= MAP_DEF_MAP_TYPE; } else if (strcmp(name, "max_entries") == 0) { if (!get_map_field_int(map_name, btf, m, &map_def->max_entries)) return -EINVAL; map_def->parts |= MAP_DEF_MAX_ENTRIES; } else if (strcmp(name, "map_flags") == 0) { if (!get_map_field_int(map_name, btf, m, &map_def->map_flags)) return -EINVAL; map_def->parts |= MAP_DEF_MAP_FLAGS; } else if (strcmp(name, "numa_node") == 0) { if (!get_map_field_int(map_name, btf, m, &map_def->numa_node)) return -EINVAL; map_def->parts |= MAP_DEF_NUMA_NODE; } else if (strcmp(name, "key_size") == 0) { __u32 sz; if (!get_map_field_int(map_name, btf, m, &sz)) return -EINVAL; if (map_def->key_size && map_def->key_size != sz) { pr_warn("map '%s': conflicting key size %u != %u.\n", map_name, map_def->key_size, sz); return -EINVAL; } map_def->key_size = sz; map_def->parts |= MAP_DEF_KEY_SIZE; } else if (strcmp(name, "key") == 0) { __s64 sz; t = btf__type_by_id(btf, m->type); if (!t) { pr_warn("map '%s': key type [%d] not found.\n", map_name, m->type); return -EINVAL; } if (!btf_is_ptr(t)) { pr_warn("map '%s': key spec is not PTR: %s.\n", map_name, btf_kind_str(t)); return -EINVAL; } sz = btf__resolve_size(btf, t->type); if (sz < 0) { pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", map_name, t->type, (ssize_t)sz); return sz; } if (map_def->key_size && map_def->key_size != sz) { pr_warn("map '%s': conflicting key size %u != %zd.\n", map_name, map_def->key_size, (ssize_t)sz); return -EINVAL; } map_def->key_size = sz; map_def->key_type_id = t->type; map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE; } else if (strcmp(name, "value_size") == 0) { __u32 sz; if (!get_map_field_int(map_name, btf, m, &sz)) return -EINVAL; if (map_def->value_size && map_def->value_size != sz) { pr_warn("map '%s': conflicting value size %u != %u.\n", map_name, map_def->value_size, sz); return -EINVAL; } map_def->value_size = sz; map_def->parts |= MAP_DEF_VALUE_SIZE; } else if (strcmp(name, "value") == 0) { __s64 sz; t = btf__type_by_id(btf, m->type); if (!t) { pr_warn("map '%s': value type [%d] not found.\n", map_name, m->type); return -EINVAL; } if (!btf_is_ptr(t)) { pr_warn("map '%s': value spec is not PTR: %s.\n", map_name, btf_kind_str(t)); return -EINVAL; } sz = btf__resolve_size(btf, t->type); if (sz < 0) { pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", map_name, t->type, (ssize_t)sz); return sz; } if (map_def->value_size && map_def->value_size != sz) { pr_warn("map '%s': conflicting value size %u != %zd.\n", map_name, map_def->value_size, (ssize_t)sz); return -EINVAL; } map_def->value_size = sz; map_def->value_type_id = t->type; map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE; } else if (strcmp(name, "values") == 0) { bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type); bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY; const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value"; char inner_map_name[128]; int err; if (is_inner) { pr_warn("map '%s': multi-level inner maps not supported.\n", map_name); return -ENOTSUP; } if (i != vlen - 1) { pr_warn("map '%s': '%s' member should be last.\n", map_name, name); return -EINVAL; } if (!is_map_in_map && !is_prog_array) { pr_warn("map '%s': should be map-in-map or prog-array.\n", map_name); return -ENOTSUP; } if (map_def->value_size && map_def->value_size != 4) { pr_warn("map '%s': conflicting value size %u != 4.\n", map_name, map_def->value_size); return -EINVAL; } map_def->value_size = 4; t = btf__type_by_id(btf, m->type); if (!t) { pr_warn("map '%s': %s type [%d] not found.\n", map_name, desc, m->type); return -EINVAL; } if (!btf_is_array(t) || btf_array(t)->nelems) { pr_warn("map '%s': %s spec is not a zero-sized array.\n", map_name, desc); return -EINVAL; } t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL); if (!btf_is_ptr(t)) { pr_warn("map '%s': %s def is of unexpected kind %s.\n", map_name, desc, btf_kind_str(t)); return -EINVAL; } t = skip_mods_and_typedefs(btf, t->type, NULL); if (is_prog_array) { if (!btf_is_func_proto(t)) { pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n", map_name, btf_kind_str(t)); return -EINVAL; } continue; } if (!btf_is_struct(t)) { pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", map_name, btf_kind_str(t)); return -EINVAL; } snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name); err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL); if (err) return err; map_def->parts |= MAP_DEF_INNER_MAP; } else if (strcmp(name, "pinning") == 0) { __u32 val; if (is_inner) { pr_warn("map '%s': inner def can't be pinned.\n", map_name); return -EINVAL; } if (!get_map_field_int(map_name, btf, m, &val)) return -EINVAL; if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) { pr_warn("map '%s': invalid pinning value %u.\n", map_name, val); return -EINVAL; } map_def->pinning = val; map_def->parts |= MAP_DEF_PINNING; } else if (strcmp(name, "map_extra") == 0) { __u64 map_extra; if (!get_map_field_long(map_name, btf, m, &map_extra)) return -EINVAL; map_def->map_extra = map_extra; map_def->parts |= MAP_DEF_MAP_EXTRA; } else { if (strict) { pr_warn("map '%s': unknown field '%s'.\n", map_name, name); return -ENOTSUP; } pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name); } } if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) { pr_warn("map '%s': map type isn't specified.\n", map_name); return -EINVAL; } return 0; } static size_t adjust_ringbuf_sz(size_t sz) { __u32 page_sz = sysconf(_SC_PAGE_SIZE); __u32 mul; /* if user forgot to set any size, make sure they see error */ if (sz == 0) return 0; /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be * a power-of-2 multiple of kernel's page size. If user diligently * satisified these conditions, pass the size through. */ if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz)) return sz; /* Otherwise find closest (page_sz * power_of_2) product bigger than * user-set size to satisfy both user size request and kernel * requirements and substitute correct max_entries for map creation. */ for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) { if (mul * page_sz > sz) return mul * page_sz; } /* if it's impossible to satisfy the conditions (i.e., user size is * very close to UINT_MAX but is not a power-of-2 multiple of * page_size) then just return original size and let kernel reject it */ return sz; } static bool map_is_ringbuf(const struct bpf_map *map) { return map->def.type == BPF_MAP_TYPE_RINGBUF || map->def.type == BPF_MAP_TYPE_USER_RINGBUF; } static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def) { map->def.type = def->map_type; map->def.key_size = def->key_size; map->def.value_size = def->value_size; map->def.max_entries = def->max_entries; map->def.map_flags = def->map_flags; map->map_extra = def->map_extra; map->numa_node = def->numa_node; map->btf_key_type_id = def->key_type_id; map->btf_value_type_id = def->value_type_id; /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ if (map_is_ringbuf(map)) map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); if (def->parts & MAP_DEF_MAP_TYPE) pr_debug("map '%s': found type = %u.\n", map->name, def->map_type); if (def->parts & MAP_DEF_KEY_TYPE) pr_debug("map '%s': found key [%u], sz = %u.\n", map->name, def->key_type_id, def->key_size); else if (def->parts & MAP_DEF_KEY_SIZE) pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size); if (def->parts & MAP_DEF_VALUE_TYPE) pr_debug("map '%s': found value [%u], sz = %u.\n", map->name, def->value_type_id, def->value_size); else if (def->parts & MAP_DEF_VALUE_SIZE) pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size); if (def->parts & MAP_DEF_MAX_ENTRIES) pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); if (def->parts & MAP_DEF_MAP_FLAGS) pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags); if (def->parts & MAP_DEF_MAP_EXTRA) pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name, (unsigned long long)def->map_extra); if (def->parts & MAP_DEF_PINNING) pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); if (def->parts & MAP_DEF_NUMA_NODE) pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node); if (def->parts & MAP_DEF_INNER_MAP) pr_debug("map '%s': found inner map definition.\n", map->name); } static const char *btf_var_linkage_str(__u32 linkage) { switch (linkage) { case BTF_VAR_STATIC: return "static"; case BTF_VAR_GLOBAL_ALLOCATED: return "global"; case BTF_VAR_GLOBAL_EXTERN: return "extern"; default: return "unknown"; } } static int bpf_object__init_user_btf_map(struct bpf_object *obj, const struct btf_type *sec, int var_idx, int sec_idx, const Elf_Data *data, bool strict, const char *pin_root_path) { struct btf_map_def map_def = {}, inner_def = {}; const struct btf_type *var, *def; const struct btf_var_secinfo *vi; const struct btf_var *var_extra; const char *map_name; struct bpf_map *map; int err; vi = btf_var_secinfos(sec) + var_idx; var = btf__type_by_id(obj->btf, vi->type); var_extra = btf_var(var); map_name = btf__name_by_offset(obj->btf, var->name_off); if (map_name == NULL || map_name[0] == '\0') { pr_warn("map #%d: empty name.\n", var_idx); return -EINVAL; } if ((__u64)vi->offset + vi->size > data->d_size) { pr_warn("map '%s' BTF data is corrupted.\n", map_name); return -EINVAL; } if (!btf_is_var(var)) { pr_warn("map '%s': unexpected var kind %s.\n", map_name, btf_kind_str(var)); return -EINVAL; } if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) { pr_warn("map '%s': unsupported map linkage %s.\n", map_name, btf_var_linkage_str(var_extra->linkage)); return -EOPNOTSUPP; } def = skip_mods_and_typedefs(obj->btf, var->type, NULL); if (!btf_is_struct(def)) { pr_warn("map '%s': unexpected def kind %s.\n", map_name, btf_kind_str(var)); return -EINVAL; } if (def->size > vi->size) { pr_warn("map '%s': invalid def size.\n", map_name); return -EINVAL; } map = bpf_object__add_map(obj); if (IS_ERR(map)) return PTR_ERR(map); map->name = strdup(map_name); if (!map->name) { pr_warn("map '%s': failed to alloc map name.\n", map_name); return -ENOMEM; } map->libbpf_type = LIBBPF_MAP_UNSPEC; map->def.type = BPF_MAP_TYPE_UNSPEC; map->sec_idx = sec_idx; map->sec_offset = vi->offset; map->btf_var_idx = var_idx; pr_debug("map '%s': at sec_idx %d, offset %zu.\n", map_name, map->sec_idx, map->sec_offset); err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def); if (err) return err; fill_map_from_def(map, &map_def); if (map_def.pinning == LIBBPF_PIN_BY_NAME) { err = build_map_pin_path(map, pin_root_path); if (err) { pr_warn("map '%s': couldn't build pin path.\n", map->name); return err; } } if (map_def.parts & MAP_DEF_INNER_MAP) { map->inner_map = calloc(1, sizeof(*map->inner_map)); if (!map->inner_map) return -ENOMEM; map->inner_map->fd = create_placeholder_fd(); if (map->inner_map->fd < 0) return map->inner_map->fd; map->inner_map->sec_idx = sec_idx; map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1); if (!map->inner_map->name) return -ENOMEM; sprintf(map->inner_map->name, "%s.inner", map_name); fill_map_from_def(map->inner_map, &inner_def); } err = map_fill_btf_type_info(obj, map); if (err) return err; return 0; } static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map, const char *sec_name, int sec_idx, void *data, size_t data_sz) { const long page_sz = sysconf(_SC_PAGE_SIZE); size_t mmap_sz; mmap_sz = bpf_map_mmap_sz(obj->arena_map); if (roundup(data_sz, page_sz) > mmap_sz) { pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n", sec_name, mmap_sz, data_sz); return -E2BIG; } obj->arena_data = malloc(data_sz); if (!obj->arena_data) return -ENOMEM; memcpy(obj->arena_data, data, data_sz); obj->arena_data_sz = data_sz; /* make bpf_map__init_value() work for ARENA maps */ map->mmaped = obj->arena_data; return 0; } static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, const char *pin_root_path) { const struct btf_type *sec = NULL; int nr_types, i, vlen, err; const struct btf_type *t; const char *name; Elf_Data *data; Elf_Scn *scn; if (obj->efile.btf_maps_shndx < 0) return 0; scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); data = elf_sec_data(obj, scn); if (!scn || !data) { pr_warn("elf: failed to get %s map definitions for %s\n", MAPS_ELF_SEC, obj->path); return -EINVAL; } nr_types = btf__type_cnt(obj->btf); for (i = 1; i < nr_types; i++) { t = btf__type_by_id(obj->btf, i); if (!btf_is_datasec(t)) continue; name = btf__name_by_offset(obj->btf, t->name_off); if (strcmp(name, MAPS_ELF_SEC) == 0) { sec = t; obj->efile.btf_maps_sec_btf_id = i; break; } } if (!sec) { pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC); return -ENOENT; } vlen = btf_vlen(sec); for (i = 0; i < vlen; i++) { err = bpf_object__init_user_btf_map(obj, sec, i, obj->efile.btf_maps_shndx, data, strict, pin_root_path); if (err) return err; } for (i = 0; i < obj->nr_maps; i++) { struct bpf_map *map = &obj->maps[i]; if (map->def.type != BPF_MAP_TYPE_ARENA) continue; if (obj->arena_map) { pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n", map->name, obj->arena_map->name); return -EINVAL; } obj->arena_map = map; if (obj->efile.arena_data) { err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx, obj->efile.arena_data->d_buf, obj->efile.arena_data->d_size); if (err) return err; } } if (obj->efile.arena_data && !obj->arena_map) { pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n", ARENA_SEC); return -ENOENT; } return 0; } static int bpf_object__init_maps(struct bpf_object *obj, const struct bpf_object_open_opts *opts) { const char *pin_root_path; bool strict; int err = 0; strict = !OPTS_GET(opts, relaxed_maps, false); pin_root_path = OPTS_GET(opts, pin_root_path, NULL); err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path); err = err ?: bpf_object__init_global_data_maps(obj); err = err ?: bpf_object__init_kconfig_map(obj); err = err ?: bpf_object_init_struct_ops(obj); return err; } static bool section_have_execinstr(struct bpf_object *obj, int idx) { Elf64_Shdr *sh; sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx)); if (!sh) return false; return sh->sh_flags & SHF_EXECINSTR; } static bool starts_with_qmark(const char *s) { return s && s[0] == '?'; } static bool btf_needs_sanitization(struct bpf_object *obj) { bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC); return !has_func || !has_datasec || !has_func_global || !has_float || !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec; } static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) { bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC); int enum64_placeholder_id = 0; struct btf_type *t; int i, j, vlen; for (i = 1; i < btf__type_cnt(btf); i++) { t = (struct btf_type *)btf__type_by_id(btf, i); if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) { /* replace VAR/DECL_TAG with INT */ t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); /* * using size = 1 is the safest choice, 4 will be too * big and cause kernel BTF validation failure if * original variable took less than 4 bytes */ t->size = 1; *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8); } else if (!has_datasec && btf_is_datasec(t)) { /* replace DATASEC with STRUCT */ const struct btf_var_secinfo *v = btf_var_secinfos(t); struct btf_member *m = btf_members(t); struct btf_type *vt; char *name; name = (char *)btf__name_by_offset(btf, t->name_off); while (*name) { if (*name == '.' || *name == '?') *name = '_'; name++; } vlen = btf_vlen(t); t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); for (j = 0; j < vlen; j++, v++, m++) { /* order of field assignments is important */ m->offset = v->offset * 8; m->type = v->type; /* preserve variable name as member name */ vt = (void *)btf__type_by_id(btf, v->type); m->name_off = vt->name_off; } } else if (!has_qmark_datasec && btf_is_datasec(t) && starts_with_qmark(btf__name_by_offset(btf, t->name_off))) { /* replace '?' prefix with '_' for DATASEC names */ char *name; name = (char *)btf__name_by_offset(btf, t->name_off); if (name[0] == '?') name[0] = '_'; } else if (!has_func && btf_is_func_proto(t)) { /* replace FUNC_PROTO with ENUM */ vlen = btf_vlen(t); t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); t->size = sizeof(__u32); /* kernel enforced */ } else if (!has_func && btf_is_func(t)) { /* replace FUNC with TYPEDEF */ t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); } else if (!has_func_global && btf_is_func(t)) { /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); } else if (!has_float && btf_is_float(t)) { /* replace FLOAT with an equally-sized empty STRUCT; * since C compilers do not accept e.g. "float" as a * valid struct name, make it anonymous */ t->name_off = 0; t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0); } else if (!has_type_tag && btf_is_type_tag(t)) { /* replace TYPE_TAG with a CONST */ t->name_off = 0; t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0); } else if (!has_enum64 && btf_is_enum(t)) { /* clear the kflag */ t->info = btf_type_info(btf_kind(t), btf_vlen(t), false); } else if (!has_enum64 && btf_is_enum64(t)) { /* replace ENUM64 with a union */ struct btf_member *m; if (enum64_placeholder_id == 0) { enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0); if (enum64_placeholder_id < 0) return enum64_placeholder_id; t = (struct btf_type *)btf__type_by_id(btf, i); } m = btf_members(t); vlen = btf_vlen(t); t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen); for (j = 0; j < vlen; j++, m++) { m->type = enum64_placeholder_id; m->offset = 0; } } } return 0; } static bool libbpf_needs_btf(const struct bpf_object *obj) { return obj->efile.btf_maps_shndx >= 0 || obj->efile.has_st_ops || obj->nr_extern > 0; } static bool kernel_needs_btf(const struct bpf_object *obj) { return obj->efile.has_st_ops; } static int bpf_object__init_btf(struct bpf_object *obj, Elf_Data *btf_data, Elf_Data *btf_ext_data) { int err = -ENOENT; if (btf_data) { obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); err = libbpf_get_error(obj->btf); if (err) { obj->btf = NULL; pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err); goto out; } /* enforce 8-byte pointers for BPF-targeted BTFs */ btf__set_pointer_size(obj->btf, 8); } if (btf_ext_data) { struct btf_ext_info *ext_segs[3]; int seg_num, sec_num; if (!obj->btf) { pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", BTF_EXT_ELF_SEC, BTF_ELF_SEC); goto out; } obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); err = libbpf_get_error(obj->btf_ext); if (err) { pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n", BTF_EXT_ELF_SEC, err); obj->btf_ext = NULL; goto out; } /* setup .BTF.ext to ELF section mapping */ ext_segs[0] = &obj->btf_ext->func_info; ext_segs[1] = &obj->btf_ext->line_info; ext_segs[2] = &obj->btf_ext->core_relo_info; for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) { struct btf_ext_info *seg = ext_segs[seg_num]; const struct btf_ext_info_sec *sec; const char *sec_name; Elf_Scn *scn; if (seg->sec_cnt == 0) continue; seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs)); if (!seg->sec_idxs) { err = -ENOMEM; goto out; } sec_num = 0; for_each_btf_ext_sec(seg, sec) { /* preventively increment index to avoid doing * this before every continue below */ sec_num++; sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); if (str_is_empty(sec_name)) continue; scn = elf_sec_by_name(obj, sec_name); if (!scn) continue; seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn); } } } out: if (err && libbpf_needs_btf(obj)) { pr_warn("BTF is required, but is missing or corrupted.\n"); return err; } return 0; } static int compare_vsi_off(const void *_a, const void *_b) { const struct btf_var_secinfo *a = _a; const struct btf_var_secinfo *b = _b; return a->offset - b->offset; } static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, struct btf_type *t) { __u32 size = 0, i, vars = btf_vlen(t); const char *sec_name = btf__name_by_offset(btf, t->name_off); struct btf_var_secinfo *vsi; bool fixup_offsets = false; int err; if (!sec_name) { pr_debug("No name found in string section for DATASEC kind.\n"); return -ENOENT; } /* Extern-backing datasecs (.ksyms, .kconfig) have their size and * variable offsets set at the previous step. Further, not every * extern BTF VAR has corresponding ELF symbol preserved, so we skip * all fixups altogether for such sections and go straight to sorting * VARs within their DATASEC. */ if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0) goto sort_vars; /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to * fix this up. But BPF static linker already fixes this up and fills * all the sizes and offsets during static linking. So this step has * to be optional. But the STV_HIDDEN handling is non-optional for any * non-extern DATASEC, so the variable fixup loop below handles both * functions at the same time, paying the cost of BTF VAR <-> ELF * symbol matching just once. */ if (t->size == 0) { err = find_elf_sec_sz(obj, sec_name, &size); if (err || !size) { pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n", sec_name, size, err); return -ENOENT; } t->size = size; fixup_offsets = true; } for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { const struct btf_type *t_var; struct btf_var *var; const char *var_name; Elf64_Sym *sym; t_var = btf__type_by_id(btf, vsi->type); if (!t_var || !btf_is_var(t_var)) { pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name); return -EINVAL; } var = btf_var(t_var); if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN) continue; var_name = btf__name_by_offset(btf, t_var->name_off); if (!var_name) { pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n", sec_name, i); return -ENOENT; } sym = find_elf_var_sym(obj, var_name); if (IS_ERR(sym)) { pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n", sec_name, var_name); return -ENOENT; } if (fixup_offsets) vsi->offset = sym->st_value; /* if variable is a global/weak symbol, but has restricted * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR * as static. This follows similar logic for functions (BPF * subprogs) and influences libbpf's further decisions about * whether to make global data BPF array maps as * BPF_F_MMAPABLE. */ if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL) var->linkage = BTF_VAR_STATIC; } sort_vars: qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); return 0; } static int bpf_object_fixup_btf(struct bpf_object *obj) { int i, n, err = 0; if (!obj->btf) return 0; n = btf__type_cnt(obj->btf); for (i = 1; i < n; i++) { struct btf_type *t = btf_type_by_id(obj->btf, i); /* Loader needs to fix up some of the things compiler * couldn't get its hands on while emitting BTF. This * is section size and global variable offset. We use * the info from the ELF itself for this purpose. */ if (btf_is_datasec(t)) { err = btf_fixup_datasec(obj, obj->btf, t); if (err) return err; } } return 0; } static bool prog_needs_vmlinux_btf(struct bpf_program *prog) { if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || prog->type == BPF_PROG_TYPE_LSM) return true; /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs * also need vmlinux BTF */ if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) return true; return false; } static bool map_needs_vmlinux_btf(struct bpf_map *map) { return bpf_map__is_struct_ops(map); } static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) { struct bpf_program *prog; struct bpf_map *map; int i; /* CO-RE relocations need kernel BTF, only when btf_custom_path * is not specified */ if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path) return true; /* Support for typed ksyms needs kernel BTF */ for (i = 0; i < obj->nr_extern; i++) { const struct extern_desc *ext; ext = &obj->externs[i]; if (ext->type == EXT_KSYM && ext->ksym.type_id) return true; } bpf_object__for_each_program(prog, obj) { if (!prog->autoload) continue; if (prog_needs_vmlinux_btf(prog)) return true; } bpf_object__for_each_map(map, obj) { if (map_needs_vmlinux_btf(map)) return true; } return false; } static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) { int err; /* btf_vmlinux could be loaded earlier */ if (obj->btf_vmlinux || obj->gen_loader) return 0; if (!force && !obj_needs_vmlinux_btf(obj)) return 0; obj->btf_vmlinux = btf__load_vmlinux_btf(); err = libbpf_get_error(obj->btf_vmlinux); if (err) { pr_warn("Error loading vmlinux BTF: %d\n", err); obj->btf_vmlinux = NULL; return err; } return 0; } static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) { struct btf *kern_btf = obj->btf; bool btf_mandatory, sanitize; int i, err = 0; if (!obj->btf) return 0; if (!kernel_supports(obj, FEAT_BTF)) { if (kernel_needs_btf(obj)) { err = -EOPNOTSUPP; goto report; } pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); return 0; } /* Even though some subprogs are global/weak, user might prefer more * permissive BPF verification process that BPF verifier performs for * static functions, taking into account more context from the caller * functions. In such case, they need to mark such subprogs with * __attribute__((visibility("hidden"))) and libbpf will adjust * corresponding FUNC BTF type to be marked as static and trigger more * involved BPF verification process. */ for (i = 0; i < obj->nr_programs; i++) { struct bpf_program *prog = &obj->programs[i]; struct btf_type *t; const char *name; int j, n; if (!prog->mark_btf_static || !prog_is_subprog(obj, prog)) continue; n = btf__type_cnt(obj->btf); for (j = 1; j < n; j++) { t = btf_type_by_id(obj->btf, j); if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) continue; name = btf__str_by_offset(obj->btf, t->name_off); if (strcmp(name, prog->name) != 0) continue; t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0); break; } } sanitize = btf_needs_sanitization(obj); if (sanitize) { const void *raw_data; __u32 sz; /* clone BTF to sanitize a copy and leave the original intact */ raw_data = btf__raw_data(obj->btf, &sz); kern_btf = btf__new(raw_data, sz); err = libbpf_get_error(kern_btf); if (err) return err; /* enforce 8-byte pointers for BPF-targeted BTFs */ btf__set_pointer_size(obj->btf, 8); err = bpf_object__sanitize_btf(obj, kern_btf); if (err) return err; } if (obj->gen_loader) { __u32 raw_size = 0; const void *raw_data = btf__raw_data(kern_btf, &raw_size); if (!raw_data) return -ENOMEM; bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size); /* Pretend to have valid FD to pass various fd >= 0 checks. * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. */ btf__set_fd(kern_btf, 0); } else { /* currently BPF_BTF_LOAD only supports log_level 1 */ err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size, obj->log_level ? 1 : 0, obj->token_fd); } if (sanitize) { if (!err) { /* move fd to libbpf's BTF */ btf__set_fd(obj->btf, btf__fd(kern_btf)); btf__set_fd(kern_btf, -1); } btf__free(kern_btf); } report: if (err) { btf_mandatory = kernel_needs_btf(obj); if (btf_mandatory) { pr_warn("Error loading .BTF into kernel: %d. BTF is mandatory, can't proceed.\n", err); } else { pr_info("Error loading .BTF into kernel: %d. BTF is optional, ignoring.\n", err); err = 0; } } return err; } static const char *elf_sym_str(const struct bpf_object *obj, size_t off) { const char *name; name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); if (!name) { pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", off, obj->path, elf_errmsg(-1)); return NULL; } return name; } static const char *elf_sec_str(const struct bpf_object *obj, size_t off) { const char *name; name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); if (!name) { pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", off, obj->path, elf_errmsg(-1)); return NULL; } return name; } static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) { Elf_Scn *scn; scn = elf_getscn(obj->efile.elf, idx); if (!scn) { pr_warn("elf: failed to get section(%zu) from %s: %s\n", idx, obj->path, elf_errmsg(-1)); return NULL; } return scn; } static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) { Elf_Scn *scn = NULL; Elf *elf = obj->efile.elf; const char *sec_name; while ((scn = elf_nextscn(elf, scn)) != NULL) { sec_name = elf_sec_name(obj, scn); if (!sec_name) return NULL; if (strcmp(sec_name, name) != 0) continue; return scn; } return NULL; } static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn) { Elf64_Shdr *shdr; if (!scn) return NULL; shdr = elf64_getshdr(scn); if (!shdr) { pr_warn("elf: failed to get section(%zu) header from %s: %s\n", elf_ndxscn(scn), obj->path, elf_errmsg(-1)); return NULL; } return shdr; } static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) { const char *name; Elf64_Shdr *sh; if (!scn) return NULL; sh = elf_sec_hdr(obj, scn); if (!sh) return NULL; name = elf_sec_str(obj, sh->sh_name); if (!name) { pr_warn("elf: failed to get section(%zu) name from %s: %s\n", elf_ndxscn(scn), obj->path, elf_errmsg(-1)); return NULL; } return name; } static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) { Elf_Data *data; if (!scn) return NULL; data = elf_getdata(scn, 0); if (!data) { pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "", obj->path, elf_errmsg(-1)); return NULL; } return data; } static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx) { if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym)) return NULL; return (Elf64_Sym *)obj->efile.symbols->d_buf + idx; } static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx) { if (idx >= data->d_size / sizeof(Elf64_Rel)) return NULL; return (Elf64_Rel *)data->d_buf + idx; } static bool is_sec_name_dwarf(const char *name) { /* approximation, but the actual list is too long */ return str_has_pfx(name, ".debug_"); } static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name) { /* no special handling of .strtab */ if (hdr->sh_type == SHT_STRTAB) return true; /* ignore .llvm_addrsig section as well */ if (hdr->sh_type == SHT_LLVM_ADDRSIG) return true; /* no subprograms will lead to an empty .text section, ignore it */ if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && strcmp(name, ".text") == 0) return true; /* DWARF sections */ if (is_sec_name_dwarf(name)) return true; if (str_has_pfx(name, ".rel")) { name += sizeof(".rel") - 1; /* DWARF section relocations */ if (is_sec_name_dwarf(name)) return true; /* .BTF and .BTF.ext don't need relocations */ if (strcmp(name, BTF_ELF_SEC) == 0 || strcmp(name, BTF_EXT_ELF_SEC) == 0) return true; } return false; } static int cmp_progs(const void *_a, const void *_b) { const struct bpf_program *a = _a; const struct bpf_program *b = _b; if (a->sec_idx != b->sec_idx) return a->sec_idx < b->sec_idx ? -1 : 1; /* sec_insn_off can't be the same within the section */ return a->sec_insn_off < b->sec_insn_off ? -1 : 1; } static int bpf_object__elf_collect(struct bpf_object *obj) { struct elf_sec_desc *sec_desc; Elf *elf = obj->efile.elf; Elf_Data *btf_ext_data = NULL; Elf_Data *btf_data = NULL; int idx = 0, err = 0; const char *name; Elf_Data *data; Elf_Scn *scn; Elf64_Shdr *sh; /* ELF section indices are 0-based, but sec #0 is special "invalid" * section. Since section count retrieved by elf_getshdrnum() does * include sec #0, it is already the necessary size of an array to keep * all the sections. */ if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) { pr_warn("elf: failed to get the number of sections for %s: %s\n", obj->path, elf_errmsg(-1)); return -LIBBPF_ERRNO__FORMAT; } obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); if (!obj->efile.secs) return -ENOMEM; /* a bunch of ELF parsing functionality depends on processing symbols, * so do the first pass and find the symbol table */ scn = NULL; while ((scn = elf_nextscn(elf, scn)) != NULL) { sh = elf_sec_hdr(obj, scn); if (!sh) return -LIBBPF_ERRNO__FORMAT; if (sh->sh_type == SHT_SYMTAB) { if (obj->efile.symbols) { pr_warn("elf: multiple symbol tables in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } data = elf_sec_data(obj, scn); if (!data) return -LIBBPF_ERRNO__FORMAT; idx = elf_ndxscn(scn); obj->efile.symbols = data; obj->efile.symbols_shndx = idx; obj->efile.strtabidx = sh->sh_link; } } if (!obj->efile.symbols) { pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n", obj->path); return -ENOENT; } scn = NULL; while ((scn = elf_nextscn(elf, scn)) != NULL) { idx = elf_ndxscn(scn); sec_desc = &obj->efile.secs[idx]; sh = elf_sec_hdr(obj, scn); if (!sh) return -LIBBPF_ERRNO__FORMAT; name = elf_sec_str(obj, sh->sh_name); if (!name) return -LIBBPF_ERRNO__FORMAT; if (ignore_elf_section(sh, name)) continue; data = elf_sec_data(obj, scn); if (!data) return -LIBBPF_ERRNO__FORMAT; pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", idx, name, (unsigned long)data->d_size, (int)sh->sh_link, (unsigned long)sh->sh_flags, (int)sh->sh_type); if (strcmp(name, "license") == 0) { err = bpf_object__init_license(obj, data->d_buf, data->d_size); if (err) return err; } else if (strcmp(name, "version") == 0) { err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); if (err) return err; } else if (strcmp(name, "maps") == 0) { pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n"); return -ENOTSUP; } else if (strcmp(name, MAPS_ELF_SEC) == 0) { obj->efile.btf_maps_shndx = idx; } else if (strcmp(name, BTF_ELF_SEC) == 0) { if (sh->sh_type != SHT_PROGBITS) return -LIBBPF_ERRNO__FORMAT; btf_data = data; } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { if (sh->sh_type != SHT_PROGBITS) return -LIBBPF_ERRNO__FORMAT; btf_ext_data = data; } else if (sh->sh_type == SHT_SYMTAB) { /* already processed during the first pass above */ } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) { if (sh->sh_flags & SHF_EXECINSTR) { if (strcmp(name, ".text") == 0) obj->efile.text_shndx = idx; err = bpf_object__add_programs(obj, data, name, idx); if (err) return err; } else if (strcmp(name, DATA_SEC) == 0 || str_has_pfx(name, DATA_SEC ".")) { sec_desc->sec_type = SEC_DATA; sec_desc->shdr = sh; sec_desc->data = data; } else if (strcmp(name, RODATA_SEC) == 0 || str_has_pfx(name, RODATA_SEC ".")) { sec_desc->sec_type = SEC_RODATA; sec_desc->shdr = sh; sec_desc->data = data; } else if (strcmp(name, STRUCT_OPS_SEC) == 0 || strcmp(name, STRUCT_OPS_LINK_SEC) == 0 || strcmp(name, "?" STRUCT_OPS_SEC) == 0 || strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) { sec_desc->sec_type = SEC_ST_OPS; sec_desc->shdr = sh; sec_desc->data = data; obj->efile.has_st_ops = true; } else if (strcmp(name, ARENA_SEC) == 0) { obj->efile.arena_data = data; obj->efile.arena_data_shndx = idx; } else { pr_info("elf: skipping unrecognized data section(%d) %s\n", idx, name); } } else if (sh->sh_type == SHT_REL) { int targ_sec_idx = sh->sh_info; /* points to other section */ if (sh->sh_entsize != sizeof(Elf64_Rel) || targ_sec_idx >= obj->efile.sec_cnt) return -LIBBPF_ERRNO__FORMAT; /* Only do relo for section with exec instructions */ if (!section_have_execinstr(obj, targ_sec_idx) && strcmp(name, ".rel" STRUCT_OPS_SEC) && strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) && strcmp(name, ".rel?" STRUCT_OPS_SEC) && strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) && strcmp(name, ".rel" MAPS_ELF_SEC)) { pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", idx, name, targ_sec_idx, elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: ""); continue; } sec_desc->sec_type = SEC_RELO; sec_desc->shdr = sh; sec_desc->data = data; } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 || str_has_pfx(name, BSS_SEC "."))) { sec_desc->sec_type = SEC_BSS; sec_desc->shdr = sh; sec_desc->data = data; } else { pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, (size_t)sh->sh_size); } } if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } /* change BPF program insns to native endianness for introspection */ if (!is_native_endianness(obj)) bpf_object_bswap_progs(obj); /* sort BPF programs by section name and in-section instruction offset * for faster search */ if (obj->nr_programs) qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); return bpf_object__init_btf(obj, btf_data, btf_ext_data); } static bool sym_is_extern(const Elf64_Sym *sym) { int bind = ELF64_ST_BIND(sym->st_info); /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ return sym->st_shndx == SHN_UNDEF && (bind == STB_GLOBAL || bind == STB_WEAK) && ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE; } static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) { int bind = ELF64_ST_BIND(sym->st_info); int type = ELF64_ST_TYPE(sym->st_info); /* in .text section */ if (sym->st_shndx != text_shndx) return false; /* local function */ if (bind == STB_LOCAL && type == STT_SECTION) return true; /* global function */ return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC; } static int find_extern_btf_id(const struct btf *btf, const char *ext_name) { const struct btf_type *t; const char *tname; int i, n; if (!btf) return -ESRCH; n = btf__type_cnt(btf); for (i = 1; i < n; i++) { t = btf__type_by_id(btf, i); if (!btf_is_var(t) && !btf_is_func(t)) continue; tname = btf__name_by_offset(btf, t->name_off); if (strcmp(tname, ext_name)) continue; if (btf_is_var(t) && btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) return -EINVAL; if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN) return -EINVAL; return i; } return -ENOENT; } static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { const struct btf_var_secinfo *vs; const struct btf_type *t; int i, j, n; if (!btf) return -ESRCH; n = btf__type_cnt(btf); for (i = 1; i < n; i++) { t = btf__type_by_id(btf, i); if (!btf_is_datasec(t)) continue; vs = btf_var_secinfos(t); for (j = 0; j < btf_vlen(t); j++, vs++) { if (vs->type == ext_btf_id) return i; } } return -ENOENT; } static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, bool *is_signed) { const struct btf_type *t; const char *name; t = skip_mods_and_typedefs(btf, id, NULL); name = btf__name_by_offset(btf, t->name_off); if (is_signed) *is_signed = false; switch (btf_kind(t)) { case BTF_KIND_INT: { int enc = btf_int_encoding(t); if (enc & BTF_INT_BOOL) return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; if (is_signed) *is_signed = enc & BTF_INT_SIGNED; if (t->size == 1) return KCFG_CHAR; if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) return KCFG_UNKNOWN; return KCFG_INT; } case BTF_KIND_ENUM: if (t->size != 4) return KCFG_UNKNOWN; if (strcmp(name, "libbpf_tristate")) return KCFG_UNKNOWN; return KCFG_TRISTATE; case BTF_KIND_ENUM64: if (strcmp(name, "libbpf_tristate")) return KCFG_UNKNOWN; return KCFG_TRISTATE; case BTF_KIND_ARRAY: if (btf_array(t)->nelems == 0) return KCFG_UNKNOWN; if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) return KCFG_UNKNOWN; return KCFG_CHAR_ARR; default: return KCFG_UNKNOWN; } } static int cmp_externs(const void *_a, const void *_b) { const struct extern_desc *a = _a; const struct extern_desc *b = _b; if (a->type != b->type) return a->type < b->type ? -1 : 1; if (a->type == EXT_KCFG) { /* descending order by alignment requirements */ if (a->kcfg.align != b->kcfg.align) return a->kcfg.align > b->kcfg.align ? -1 : 1; /* ascending order by size, within same alignment class */ if (a->kcfg.sz != b->kcfg.sz) return a->kcfg.sz < b->kcfg.sz ? -1 : 1; } /* resolve ties by name */ return strcmp(a->name, b->name); } static int find_int_btf_id(const struct btf *btf) { const struct btf_type *t; int i, n; n = btf__type_cnt(btf); for (i = 1; i < n; i++) { t = btf__type_by_id(btf, i); if (btf_is_int(t) && btf_int_bits(t) == 32) return i; } return 0; } static int add_dummy_ksym_var(struct btf *btf) { int i, int_btf_id, sec_btf_id, dummy_var_btf_id; const struct btf_var_secinfo *vs; const struct btf_type *sec; if (!btf) return 0; sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC, BTF_KIND_DATASEC); if (sec_btf_id < 0) return 0; sec = btf__type_by_id(btf, sec_btf_id); vs = btf_var_secinfos(sec); for (i = 0; i < btf_vlen(sec); i++, vs++) { const struct btf_type *vt; vt = btf__type_by_id(btf, vs->type); if (btf_is_func(vt)) break; } /* No func in ksyms sec. No need to add dummy var. */ if (i == btf_vlen(sec)) return 0; int_btf_id = find_int_btf_id(btf); dummy_var_btf_id = btf__add_var(btf, "dummy_ksym", BTF_VAR_GLOBAL_ALLOCATED, int_btf_id); if (dummy_var_btf_id < 0) pr_warn("cannot create a dummy_ksym var\n"); return dummy_var_btf_id; } static int bpf_object__collect_externs(struct bpf_object *obj) { struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; const struct btf_type *t; struct extern_desc *ext; int i, n, off, dummy_var_btf_id; const char *ext_name, *sec_name; size_t ext_essent_len; Elf_Scn *scn; Elf64_Shdr *sh; if (!obj->efile.symbols) return 0; scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); sh = elf_sec_hdr(obj, scn); if (!sh || sh->sh_entsize != sizeof(Elf64_Sym)) return -LIBBPF_ERRNO__FORMAT; dummy_var_btf_id = add_dummy_ksym_var(obj->btf); if (dummy_var_btf_id < 0) return dummy_var_btf_id; n = sh->sh_size / sh->sh_entsize; pr_debug("looking for externs among %d symbols...\n", n); for (i = 0; i < n; i++) { Elf64_Sym *sym = elf_sym_by_idx(obj, i); if (!sym) return -LIBBPF_ERRNO__FORMAT; if (!sym_is_extern(sym)) continue; ext_name = elf_sym_str(obj, sym->st_name); if (!ext_name || !ext_name[0]) continue; ext = obj->externs; ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); if (!ext) return -ENOMEM; obj->externs = ext; ext = &ext[obj->nr_extern]; memset(ext, 0, sizeof(*ext)); obj->nr_extern++; ext->btf_id = find_extern_btf_id(obj->btf, ext_name); if (ext->btf_id <= 0) { pr_warn("failed to find BTF for extern '%s': %d\n", ext_name, ext->btf_id); return ext->btf_id; } t = btf__type_by_id(obj->btf, ext->btf_id); ext->name = btf__name_by_offset(obj->btf, t->name_off); ext->sym_idx = i; ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; ext_essent_len = bpf_core_essential_name_len(ext->name); ext->essent_name = NULL; if (ext_essent_len != strlen(ext->name)) { ext->essent_name = strndup(ext->name, ext_essent_len); if (!ext->essent_name) return -ENOMEM; } ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); if (ext->sec_btf_id <= 0) { pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", ext_name, ext->btf_id, ext->sec_btf_id); return ext->sec_btf_id; } sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); sec_name = btf__name_by_offset(obj->btf, sec->name_off); if (strcmp(sec_name, KCONFIG_SEC) == 0) { if (btf_is_func(t)) { pr_warn("extern function %s is unsupported under %s section\n", ext->name, KCONFIG_SEC); return -ENOTSUP; } kcfg_sec = sec; ext->type = EXT_KCFG; ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); if (ext->kcfg.sz <= 0) { pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", ext_name, ext->kcfg.sz); return ext->kcfg.sz; } ext->kcfg.align = btf__align_of(obj->btf, t->type); if (ext->kcfg.align <= 0) { pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", ext_name, ext->kcfg.align); return -EINVAL; } ext->kcfg.type = find_kcfg_type(obj->btf, t->type, &ext->kcfg.is_signed); if (ext->kcfg.type == KCFG_UNKNOWN) { pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name); return -ENOTSUP; } } else if (strcmp(sec_name, KSYMS_SEC) == 0) { ksym_sec = sec; ext->type = EXT_KSYM; skip_mods_and_typedefs(obj->btf, t->type, &ext->ksym.type_id); } else { pr_warn("unrecognized extern section '%s'\n", sec_name); return -ENOTSUP; } } pr_debug("collected %d externs total\n", obj->nr_extern); if (!obj->nr_extern) return 0; /* sort externs by type, for kcfg ones also by (align, size, name) */ qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); /* for .ksyms section, we need to turn all externs into allocated * variables in BTF to pass kernel verification; we do this by * pretending that each extern is a 8-byte variable */ if (ksym_sec) { /* find existing 4-byte integer type in BTF to use for fake * extern variables in DATASEC */ int int_btf_id = find_int_btf_id(obj->btf); /* For extern function, a dummy_var added earlier * will be used to replace the vs->type and * its name string will be used to refill * the missing param's name. */ const struct btf_type *dummy_var; dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id); for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; if (ext->type != EXT_KSYM) continue; pr_debug("extern (ksym) #%d: symbol %d, name %s\n", i, ext->sym_idx, ext->name); } sec = ksym_sec; n = btf_vlen(sec); for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; struct btf_type *vt; vt = (void *)btf__type_by_id(obj->btf, vs->type); ext_name = btf__name_by_offset(obj->btf, vt->name_off); ext = find_extern_by_name(obj, ext_name); if (!ext) { pr_warn("failed to find extern definition for BTF %s '%s'\n", btf_kind_str(vt), ext_name); return -ESRCH; } if (btf_is_func(vt)) { const struct btf_type *func_proto; struct btf_param *param; int j; func_proto = btf__type_by_id(obj->btf, vt->type); param = btf_params(func_proto); /* Reuse the dummy_var string if the * func proto does not have param name. */ for (j = 0; j < btf_vlen(func_proto); j++) if (param[j].type && !param[j].name_off) param[j].name_off = dummy_var->name_off; vs->type = dummy_var_btf_id; vt->info &= ~0xffff; vt->info |= BTF_FUNC_GLOBAL; } else { btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; vt->type = int_btf_id; } vs->offset = off; vs->size = sizeof(int); } sec->size = off; } if (kcfg_sec) { sec = kcfg_sec; /* for kcfg externs calculate their offsets within a .kconfig map */ off = 0; for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; if (ext->type != EXT_KCFG) continue; ext->kcfg.data_off = roundup(off, ext->kcfg.align); off = ext->kcfg.data_off + ext->kcfg.sz; pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", i, ext->sym_idx, ext->kcfg.data_off, ext->name); } sec->size = off; n = btf_vlen(sec); for (i = 0; i < n; i++) { struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; t = btf__type_by_id(obj->btf, vs->type); ext_name = btf__name_by_offset(obj->btf, t->name_off); ext = find_extern_by_name(obj, ext_name); if (!ext) { pr_warn("failed to find extern definition for BTF var '%s'\n", ext_name); return -ESRCH; } btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; vs->offset = ext->kcfg.data_off; } } return 0; } static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog) { return prog->sec_idx == obj->efile.text_shndx; } struct bpf_program * bpf_object__find_program_by_name(const struct bpf_object *obj, const char *name) { struct bpf_program *prog; bpf_object__for_each_program(prog, obj) { if (prog_is_subprog(obj, prog)) continue; if (!strcmp(prog->name, name)) return prog; } return errno = ENOENT, NULL; } static bool bpf_object__shndx_is_data(const struct bpf_object *obj, int shndx) { switch (obj->efile.secs[shndx].sec_type) { case SEC_BSS: case SEC_DATA: case SEC_RODATA: return true; default: return false; } } static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, int shndx) { return shndx == obj->efile.btf_maps_shndx; } static enum libbpf_map_type bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) { if (shndx == obj->efile.symbols_shndx) return LIBBPF_MAP_KCONFIG; switch (obj->efile.secs[shndx].sec_type) { case SEC_BSS: return LIBBPF_MAP_BSS; case SEC_DATA: return LIBBPF_MAP_DATA; case SEC_RODATA: return LIBBPF_MAP_RODATA; default: return LIBBPF_MAP_UNSPEC; } } static int bpf_program__record_reloc(struct bpf_program *prog, struct reloc_desc *reloc_desc, __u32 insn_idx, const char *sym_name, const Elf64_Sym *sym, const Elf64_Rel *rel) { struct bpf_insn *insn = &prog->insns[insn_idx]; size_t map_idx, nr_maps = prog->obj->nr_maps; struct bpf_object *obj = prog->obj; __u32 shdr_idx = sym->st_shndx; enum libbpf_map_type type; const char *sym_sec_name; struct bpf_map *map; if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) { pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", prog->name, sym_name, insn_idx, insn->code); return -LIBBPF_ERRNO__RELOC; } if (sym_is_extern(sym)) { int sym_idx = ELF64_R_SYM(rel->r_info); int i, n = obj->nr_extern; struct extern_desc *ext; for (i = 0; i < n; i++) { ext = &obj->externs[i]; if (ext->sym_idx == sym_idx) break; } if (i >= n) { pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", prog->name, sym_name, sym_idx); return -LIBBPF_ERRNO__RELOC; } pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", prog->name, i, ext->name, ext->sym_idx, insn_idx); if (insn->code == (BPF_JMP | BPF_CALL)) reloc_desc->type = RELO_EXTERN_CALL; else reloc_desc->type = RELO_EXTERN_LD64; reloc_desc->insn_idx = insn_idx; reloc_desc->ext_idx = i; return 0; } /* sub-program call relocation */ if (is_call_insn(insn)) { if (insn->src_reg != BPF_PSEUDO_CALL) { pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); return -LIBBPF_ERRNO__RELOC; } /* text_shndx can be 0, if no default "main" program exists */ if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", prog->name, sym_name, sym_sec_name); return -LIBBPF_ERRNO__RELOC; } if (sym->st_value % BPF_INSN_SZ) { pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", prog->name, sym_name, (size_t)sym->st_value); return -LIBBPF_ERRNO__RELOC; } reloc_desc->type = RELO_CALL; reloc_desc->insn_idx = insn_idx; reloc_desc->sym_off = sym->st_value; return 0; } if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", prog->name, sym_name, shdr_idx); return -LIBBPF_ERRNO__RELOC; } /* loading subprog addresses */ if (sym_is_subprog(sym, obj->efile.text_shndx)) { /* global_func: sym->st_value = offset in the section, insn->imm = 0. * local_func: sym->st_value = 0, insn->imm = offset in the section. */ if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) { pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n", prog->name, sym_name, (size_t)sym->st_value, insn->imm); return -LIBBPF_ERRNO__RELOC; } reloc_desc->type = RELO_SUBPROG_ADDR; reloc_desc->insn_idx = insn_idx; reloc_desc->sym_off = sym->st_value; return 0; } type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); /* arena data relocation */ if (shdr_idx == obj->efile.arena_data_shndx) { reloc_desc->type = RELO_DATA; reloc_desc->insn_idx = insn_idx; reloc_desc->map_idx = obj->arena_map - obj->maps; reloc_desc->sym_off = sym->st_value; return 0; } /* generic map reference relocation */ if (type == LIBBPF_MAP_UNSPEC) { if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", prog->name, sym_name, sym_sec_name); return -LIBBPF_ERRNO__RELOC; } for (map_idx = 0; map_idx < nr_maps; map_idx++) { map = &obj->maps[map_idx]; if (map->libbpf_type != type || map->sec_idx != sym->st_shndx || map->sec_offset != sym->st_value) continue; pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", prog->name, map_idx, map->name, map->sec_idx, map->sec_offset, insn_idx); break; } if (map_idx >= nr_maps) { pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", prog->name, sym_sec_name, (size_t)sym->st_value); return -LIBBPF_ERRNO__RELOC; } reloc_desc->type = RELO_LD64; reloc_desc->insn_idx = insn_idx; reloc_desc->map_idx = map_idx; reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ return 0; } /* global data map relocation */ if (!bpf_object__shndx_is_data(obj, shdr_idx)) { pr_warn("prog '%s': bad data relo against section '%s'\n", prog->name, sym_sec_name); return -LIBBPF_ERRNO__RELOC; } for (map_idx = 0; map_idx < nr_maps; map_idx++) { map = &obj->maps[map_idx]; if (map->libbpf_type != type || map->sec_idx != sym->st_shndx) continue; pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", prog->name, map_idx, map->name, map->sec_idx, map->sec_offset, insn_idx); break; } if (map_idx >= nr_maps) { pr_warn("prog '%s': data relo failed to find map for section '%s'\n", prog->name, sym_sec_name); return -LIBBPF_ERRNO__RELOC; } reloc_desc->type = RELO_DATA; reloc_desc->insn_idx = insn_idx; reloc_desc->map_idx = map_idx; reloc_desc->sym_off = sym->st_value; return 0; } static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx) { return insn_idx >= prog->sec_insn_off && insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; } static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, size_t sec_idx, size_t insn_idx) { int l = 0, r = obj->nr_programs - 1, m; struct bpf_program *prog; if (!obj->nr_programs) return NULL; while (l < r) { m = l + (r - l + 1) / 2; prog = &obj->programs[m]; if (prog->sec_idx < sec_idx || (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) l = m; else r = m - 1; } /* matching program could be at index l, but it still might be the * wrong one, so we need to double check conditions for the last time */ prog = &obj->programs[l]; if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) return prog; return NULL; } static int bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) { const char *relo_sec_name, *sec_name; size_t sec_idx = shdr->sh_info, sym_idx; struct bpf_program *prog; struct reloc_desc *relos; int err, i, nrels; const char *sym_name; __u32 insn_idx; Elf_Scn *scn; Elf_Data *scn_data; Elf64_Sym *sym; Elf64_Rel *rel; if (sec_idx >= obj->efile.sec_cnt) return -EINVAL; scn = elf_sec_by_idx(obj, sec_idx); scn_data = elf_sec_data(obj, scn); if (!scn_data) return -LIBBPF_ERRNO__FORMAT; relo_sec_name = elf_sec_str(obj, shdr->sh_name); sec_name = elf_sec_name(obj, scn); if (!relo_sec_name || !sec_name) return -EINVAL; pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", relo_sec_name, sec_idx, sec_name); nrels = shdr->sh_size / shdr->sh_entsize; for (i = 0; i < nrels; i++) { rel = elf_rel_by_idx(data, i); if (!rel) { pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); return -LIBBPF_ERRNO__FORMAT; } sym_idx = ELF64_R_SYM(rel->r_info); sym = elf_sym_by_idx(obj, sym_idx); if (!sym) { pr_warn("sec '%s': symbol #%zu not found for relo #%d\n", relo_sec_name, sym_idx, i); return -LIBBPF_ERRNO__FORMAT; } if (sym->st_shndx >= obj->efile.sec_cnt) { pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n", relo_sec_name, sym_idx, (size_t)sym->st_shndx, i); return -LIBBPF_ERRNO__FORMAT; } if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) { pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", relo_sec_name, (size_t)rel->r_offset, i); return -LIBBPF_ERRNO__FORMAT; } insn_idx = rel->r_offset / BPF_INSN_SZ; /* relocations against static functions are recorded as * relocations against the section that contains a function; * in such case, symbol will be STT_SECTION and sym.st_name * will point to empty string (0), so fetch section name * instead */ if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0) sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx)); else sym_name = elf_sym_str(obj, sym->st_name); sym_name = sym_name ?: "reloc_desc, prog->nr_reloc + 1, sizeof(*relos)); if (!relos) return -ENOMEM; prog->reloc_desc = relos; /* adjust insn_idx to local BPF program frame of reference */ insn_idx -= prog->sec_insn_off; err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], insn_idx, sym_name, sym, rel); if (err) return err; prog->nr_reloc++; } return 0; } static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map) { int id; if (!obj->btf) return -ENOENT; /* if it's BTF-defined map, we don't need to search for type IDs. * For struct_ops map, it does not need btf_key_type_id and * btf_value_type_id. */ if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map)) return 0; /* * LLVM annotates global data differently in BTF, that is, * only as '.data', '.bss' or '.rodata'. */ if (!bpf_map__is_internal(map)) return -ENOENT; id = btf__find_by_name(obj->btf, map->real_name); if (id < 0) return id; map->btf_key_type_id = 0; map->btf_value_type_id = id; return 0; } static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) { char file[PATH_MAX], buff[4096]; FILE *fp; __u32 val; int err; snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd); memset(info, 0, sizeof(*info)); fp = fopen(file, "re"); if (!fp) { err = -errno; pr_warn("failed to open %s: %d. No procfs support?\n", file, err); return err; } while (fgets(buff, sizeof(buff), fp)) { if (sscanf(buff, "map_type:\t%u", &val) == 1) info->type = val; else if (sscanf(buff, "key_size:\t%u", &val) == 1) info->key_size = val; else if (sscanf(buff, "value_size:\t%u", &val) == 1) info->value_size = val; else if (sscanf(buff, "max_entries:\t%u", &val) == 1) info->max_entries = val; else if (sscanf(buff, "map_flags:\t%i", &val) == 1) info->map_flags = val; } fclose(fp); return 0; } bool bpf_map__autocreate(const struct bpf_map *map) { return map->autocreate; } int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate) { if (map->obj->loaded) return libbpf_err(-EBUSY); map->autocreate = autocreate; return 0; } int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach) { if (!bpf_map__is_struct_ops(map)) return libbpf_err(-EINVAL); map->autoattach = autoattach; return 0; } bool bpf_map__autoattach(const struct bpf_map *map) { return map->autoattach; } int bpf_map__reuse_fd(struct bpf_map *map, int fd) { struct bpf_map_info info; __u32 len = sizeof(info), name_len; int new_fd, err; char *new_name; memset(&info, 0, len); err = bpf_map_get_info_by_fd(fd, &info, &len); if (err && errno == EINVAL) err = bpf_get_map_info_from_fdinfo(fd, &info); if (err) return libbpf_err(err); name_len = strlen(info.name); if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0) new_name = strdup(map->name); else new_name = strdup(info.name); if (!new_name) return libbpf_err(-errno); /* * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set. * This is similar to what we do in ensure_good_fd(), but without * closing original FD. */ new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); if (new_fd < 0) { err = -errno; goto err_free_new_name; } err = reuse_fd(map->fd, new_fd); if (err) goto err_free_new_name; free(map->name); map->name = new_name; map->def.type = info.type; map->def.key_size = info.key_size; map->def.value_size = info.value_size; map->def.max_entries = info.max_entries; map->def.map_flags = info.map_flags; map->btf_key_type_id = info.btf_key_type_id; map->btf_value_type_id = info.btf_value_type_id; map->reused = true; map->map_extra = info.map_extra; return 0; err_free_new_name: free(new_name); return libbpf_err(err); } __u32 bpf_map__max_entries(const struct bpf_map *map) { return map->def.max_entries; } struct bpf_map *bpf_map__inner_map(struct bpf_map *map) { if (!bpf_map_type__is_map_in_map(map->def.type)) return errno = EINVAL, NULL; return map->inner_map; } int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) { if (map->obj->loaded) return libbpf_err(-EBUSY); map->def.max_entries = max_entries; /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ if (map_is_ringbuf(map)) map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); return 0; } static int bpf_object_prepare_token(struct bpf_object *obj) { const char *bpffs_path; int bpffs_fd = -1, token_fd, err; bool mandatory; enum libbpf_print_level level; /* token is explicitly prevented */ if (obj->token_path && obj->token_path[0] == '\0') { pr_debug("object '%s': token is prevented, skipping...\n", obj->name); return 0; } mandatory = obj->token_path != NULL; level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG; bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH; bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR); if (bpffs_fd < 0) { err = -errno; __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n", obj->name, err, bpffs_path, mandatory ? "" : ", skipping optional step..."); return mandatory ? err : 0; } token_fd = bpf_token_create(bpffs_fd, 0); close(bpffs_fd); if (token_fd < 0) { if (!mandatory && token_fd == -ENOENT) { pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n", obj->name, bpffs_path); return 0; } __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n", obj->name, token_fd, bpffs_path, mandatory ? "" : ", skipping optional step..."); return mandatory ? token_fd : 0; } obj->feat_cache = calloc(1, sizeof(*obj->feat_cache)); if (!obj->feat_cache) { close(token_fd); return -ENOMEM; } obj->token_fd = token_fd; obj->feat_cache->token_fd = token_fd; return 0; } static int bpf_object__probe_loading(struct bpf_object *obj) { char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; int ret, insn_cnt = ARRAY_SIZE(insns); LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = obj->token_fd, .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0, ); if (obj->gen_loader) return 0; ret = bump_rlimit_memlock(); if (ret) pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret); /* make sure basic loading works */ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts); if (ret < 0) ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); if (ret < 0) { ret = errno; cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF " "program. Make sure your kernel supports BPF " "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is " "set to big enough value.\n", __func__, cp, ret); return -ret; } close(ret); return 0; } bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) { if (obj->gen_loader) /* To generate loader program assume the latest kernel * to avoid doing extra prog_load, map_create syscalls. */ return true; if (obj->token_fd) return feat_supported(obj->feat_cache, feat_id); return feat_supported(NULL, feat_id); } static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) { struct bpf_map_info map_info; char msg[STRERR_BUFSIZE]; __u32 map_info_len = sizeof(map_info); int err; memset(&map_info, 0, map_info_len); err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len); if (err && errno == EINVAL) err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); if (err) { pr_warn("failed to get map info for map FD %d: %s\n", map_fd, libbpf_strerror_r(errno, msg, sizeof(msg))); return false; } return (map_info.type == map->def.type && map_info.key_size == map->def.key_size && map_info.value_size == map->def.value_size && map_info.max_entries == map->def.max_entries && map_info.map_flags == map->def.map_flags && map_info.map_extra == map->map_extra); } static int bpf_object__reuse_map(struct bpf_map *map) { char *cp, errmsg[STRERR_BUFSIZE]; int err, pin_fd; pin_fd = bpf_obj_get(map->pin_path); if (pin_fd < 0) { err = -errno; if (err == -ENOENT) { pr_debug("found no pinned map to reuse at '%s'\n", map->pin_path); return 0; } cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); pr_warn("couldn't retrieve pinned map '%s': %s\n", map->pin_path, cp); return err; } if (!map_is_reuse_compat(map, pin_fd)) { pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", map->pin_path); close(pin_fd); return -EINVAL; } err = bpf_map__reuse_fd(map, pin_fd); close(pin_fd); if (err) return err; map->pinned = true; pr_debug("reused pinned map at '%s'\n", map->pin_path); return 0; } static int bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) { enum libbpf_map_type map_type = map->libbpf_type; char *cp, errmsg[STRERR_BUFSIZE]; int err, zero = 0; size_t mmap_sz; if (obj->gen_loader) { bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, map->mmaped, map->def.value_size); if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) bpf_gen__map_freeze(obj->gen_loader, map - obj->maps); return 0; } err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); if (err) { err = -errno; cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warn("map '%s': failed to set initial contents: %s\n", bpf_map__name(map), cp); return err; } /* Freeze .rodata and .kconfig map as read-only from syscall side. */ if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) { err = bpf_map_freeze(map->fd); if (err) { err = -errno; cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warn("map '%s': failed to freeze as read-only: %s\n", bpf_map__name(map), cp); return err; } } /* Remap anonymous mmap()-ed "map initialization image" as * a BPF map-backed mmap()-ed memory, but preserving the same * memory address. This will cause kernel to change process' * page table to point to a different piece of kernel memory, * but from userspace point of view memory address (and its * contents, being identical at this point) will stay the * same. This mapping will be released by bpf_object__close() * as per normal clean up procedure. */ mmap_sz = bpf_map_mmap_sz(map); if (map->def.map_flags & BPF_F_MMAPABLE) { void *mmaped; int prot; if (map->def.map_flags & BPF_F_RDONLY_PROG) prot = PROT_READ; else prot = PROT_READ | PROT_WRITE; mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0); if (mmaped == MAP_FAILED) { err = -errno; pr_warn("map '%s': failed to re-mmap() contents: %d\n", bpf_map__name(map), err); return err; } map->mmaped = mmaped; } else if (map->mmaped) { munmap(map->mmaped, mmap_sz); map->mmaped = NULL; } return 0; } static void bpf_map__destroy(struct bpf_map *map); static bool map_is_created(const struct bpf_map *map) { return map->obj->loaded || map->reused; } static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) { LIBBPF_OPTS(bpf_map_create_opts, create_attr); struct bpf_map_def *def = &map->def; const char *map_name = NULL; int err = 0, map_fd; if (kernel_supports(obj, FEAT_PROG_NAME)) map_name = map->name; create_attr.map_ifindex = map->map_ifindex; create_attr.map_flags = def->map_flags; create_attr.numa_node = map->numa_node; create_attr.map_extra = map->map_extra; create_attr.token_fd = obj->token_fd; if (obj->token_fd) create_attr.map_flags |= BPF_F_TOKEN_FD; if (bpf_map__is_struct_ops(map)) { create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; if (map->mod_btf_fd >= 0) { create_attr.value_type_btf_obj_fd = map->mod_btf_fd; create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD; } } if (obj->btf && btf__fd(obj->btf) >= 0) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; create_attr.btf_value_type_id = map->btf_value_type_id; } if (bpf_map_type__is_map_in_map(def->type)) { if (map->inner_map) { err = map_set_def_max_entries(map->inner_map); if (err) return err; err = bpf_object__create_map(obj, map->inner_map, true); if (err) { pr_warn("map '%s': failed to create inner map: %d\n", map->name, err); return err; } map->inner_map_fd = map->inner_map->fd; } if (map->inner_map_fd >= 0) create_attr.inner_map_fd = map->inner_map_fd; } switch (def->type) { case BPF_MAP_TYPE_PERF_EVENT_ARRAY: case BPF_MAP_TYPE_CGROUP_ARRAY: case BPF_MAP_TYPE_STACK_TRACE: case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_DEVMAP_HASH: case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_XSKMAP: case BPF_MAP_TYPE_SOCKMAP: case BPF_MAP_TYPE_SOCKHASH: case BPF_MAP_TYPE_QUEUE: case BPF_MAP_TYPE_STACK: case BPF_MAP_TYPE_ARENA: create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; map->btf_key_type_id = 0; map->btf_value_type_id = 0; break; case BPF_MAP_TYPE_STRUCT_OPS: create_attr.btf_value_type_id = 0; break; default: break; } if (obj->gen_loader) { bpf_gen__map_create(obj->gen_loader, def->type, map_name, def->key_size, def->value_size, def->max_entries, &create_attr, is_inner ? -1 : map - obj->maps); /* We keep pretenting we have valid FD to pass various fd >= 0 * checks by just keeping original placeholder FDs in place. * See bpf_object__add_map() comment. * This placeholder fd will not be used with any syscall and * will be reset to -1 eventually. */ map_fd = map->fd; } else { map_fd = bpf_map_create(def->type, map_name, def->key_size, def->value_size, def->max_entries, &create_attr); } if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { char *cp, errmsg[STRERR_BUFSIZE]; err = -errno; cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", map->name, cp, err); create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; map->btf_key_type_id = 0; map->btf_value_type_id = 0; map_fd = bpf_map_create(def->type, map_name, def->key_size, def->value_size, def->max_entries, &create_attr); } if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { if (obj->gen_loader) map->inner_map->fd = -1; bpf_map__destroy(map->inner_map); zfree(&map->inner_map); } if (map_fd < 0) return map_fd; /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */ if (map->fd == map_fd) return 0; /* Keep placeholder FD value but now point it to the BPF map object. * This way everything that relied on this map's FD (e.g., relocated * ldimm64 instructions) will stay valid and won't need adjustments. * map->fd stays valid but now point to what map_fd points to. */ return reuse_fd(map->fd, map_fd); } static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map) { const struct bpf_map *targ_map; unsigned int i; int fd, err = 0; for (i = 0; i < map->init_slots_sz; i++) { if (!map->init_slots[i]) continue; targ_map = map->init_slots[i]; fd = targ_map->fd; if (obj->gen_loader) { bpf_gen__populate_outer_map(obj->gen_loader, map - obj->maps, i, targ_map - obj->maps); } else { err = bpf_map_update_elem(map->fd, &i, &fd, 0); } if (err) { err = -errno; pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", map->name, i, targ_map->name, fd, err); return err; } pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", map->name, i, targ_map->name, fd); } zfree(&map->init_slots); map->init_slots_sz = 0; return 0; } static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map) { const struct bpf_program *targ_prog; unsigned int i; int fd, err; if (obj->gen_loader) return -ENOTSUP; for (i = 0; i < map->init_slots_sz; i++) { if (!map->init_slots[i]) continue; targ_prog = map->init_slots[i]; fd = bpf_program__fd(targ_prog); err = bpf_map_update_elem(map->fd, &i, &fd, 0); if (err) { err = -errno; pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n", map->name, i, targ_prog->name, fd, err); return err; } pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n", map->name, i, targ_prog->name, fd); } zfree(&map->init_slots); map->init_slots_sz = 0; return 0; } static int bpf_object_init_prog_arrays(struct bpf_object *obj) { struct bpf_map *map; int i, err; for (i = 0; i < obj->nr_maps; i++) { map = &obj->maps[i]; if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY) continue; err = init_prog_array_slots(obj, map); if (err < 0) return err; } return 0; } static int map_set_def_max_entries(struct bpf_map *map) { if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) { int nr_cpus; nr_cpus = libbpf_num_possible_cpus(); if (nr_cpus < 0) { pr_warn("map '%s': failed to determine number of system CPUs: %d\n", map->name, nr_cpus); return nr_cpus; } pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); map->def.max_entries = nr_cpus; } return 0; } static int bpf_object__create_maps(struct bpf_object *obj) { struct bpf_map *map; char *cp, errmsg[STRERR_BUFSIZE]; unsigned int i, j; int err; bool retried; for (i = 0; i < obj->nr_maps; i++) { map = &obj->maps[i]; /* To support old kernels, we skip creating global data maps * (.rodata, .data, .kconfig, etc); later on, during program * loading, if we detect that at least one of the to-be-loaded * programs is referencing any global data map, we'll error * out with program name and relocation index logged. * This approach allows to accommodate Clang emitting * unnecessary .rodata.str1.1 sections for string literals, * but also it allows to have CO-RE applications that use * global variables in some of BPF programs, but not others. * If those global variable-using programs are not loaded at * runtime due to bpf_program__set_autoload(prog, false), * bpf_object loading will succeed just fine even on old * kernels. */ if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA)) map->autocreate = false; if (!map->autocreate) { pr_debug("map '%s': skipped auto-creating...\n", map->name); continue; } err = map_set_def_max_entries(map); if (err) goto err_out; retried = false; retry: if (map->pin_path) { err = bpf_object__reuse_map(map); if (err) { pr_warn("map '%s': error reusing pinned map\n", map->name); goto err_out; } if (retried && map->fd < 0) { pr_warn("map '%s': cannot find pinned map\n", map->name); err = -ENOENT; goto err_out; } } if (map->reused) { pr_debug("map '%s': skipping creation (preset fd=%d)\n", map->name, map->fd); } else { err = bpf_object__create_map(obj, map, false); if (err) goto err_out; pr_debug("map '%s': created successfully, fd=%d\n", map->name, map->fd); if (bpf_map__is_internal(map)) { err = bpf_object__populate_internal_map(obj, map); if (err < 0) goto err_out; } else if (map->def.type == BPF_MAP_TYPE_ARENA) { map->mmaped = mmap((void *)(long)map->map_extra, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED, map->fd, 0); if (map->mmaped == MAP_FAILED) { err = -errno; map->mmaped = NULL; pr_warn("map '%s': failed to mmap arena: %d\n", map->name, err); return err; } if (obj->arena_data) { memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz); zfree(&obj->arena_data); } } if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) { err = init_map_in_map_slots(obj, map); if (err < 0) goto err_out; } } if (map->pin_path && !map->pinned) { err = bpf_map__pin(map, NULL); if (err) { if (!retried && err == -EEXIST) { retried = true; goto retry; } pr_warn("map '%s': failed to auto-pin at '%s': %d\n", map->name, map->pin_path, err); goto err_out; } } } return 0; err_out: cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); pr_perm_msg(err); for (j = 0; j < i; j++) zclose(obj->maps[j].fd); return err; } static bool bpf_core_is_flavor_sep(const char *s) { /* check X___Y name pattern, where X and Y are not underscores */ return s[0] != '_' && /* X */ s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ s[4] != '_'; /* Y */ } /* Given 'some_struct_name___with_flavor' return the length of a name prefix * before last triple underscore. Struct name part after last triple * underscore is ignored by BPF CO-RE relocation during relocation matching. */ size_t bpf_core_essential_name_len(const char *name) { size_t n = strlen(name); int i; for (i = n - 5; i >= 0; i--) { if (bpf_core_is_flavor_sep(name + i)) return i + 1; } return n; } void bpf_core_free_cands(struct bpf_core_cand_list *cands) { if (!cands) return; free(cands->cands); free(cands); } int bpf_core_add_cands(struct bpf_core_cand *local_cand, size_t local_essent_len, const struct btf *targ_btf, const char *targ_btf_name, int targ_start_id, struct bpf_core_cand_list *cands) { struct bpf_core_cand *new_cands, *cand; const struct btf_type *t, *local_t; const char *targ_name, *local_name; size_t targ_essent_len; int n, i; local_t = btf__type_by_id(local_cand->btf, local_cand->id); local_name = btf__str_by_offset(local_cand->btf, local_t->name_off); n = btf__type_cnt(targ_btf); for (i = targ_start_id; i < n; i++) { t = btf__type_by_id(targ_btf, i); if (!btf_kind_core_compat(t, local_t)) continue; targ_name = btf__name_by_offset(targ_btf, t->name_off); if (str_is_empty(targ_name)) continue; targ_essent_len = bpf_core_essential_name_len(targ_name); if (targ_essent_len != local_essent_len) continue; if (strncmp(local_name, targ_name, local_essent_len) != 0) continue; pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", local_cand->id, btf_kind_str(local_t), local_name, i, btf_kind_str(t), targ_name, targ_btf_name); new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, sizeof(*cands->cands)); if (!new_cands) return -ENOMEM; cand = &new_cands[cands->len]; cand->btf = targ_btf; cand->id = i; cands->cands = new_cands; cands->len++; } return 0; } static int load_module_btfs(struct bpf_object *obj) { struct bpf_btf_info info; struct module_btf *mod_btf; struct btf *btf; char name[64]; __u32 id = 0, len; int err, fd; if (obj->btf_modules_loaded) return 0; if (obj->gen_loader) return 0; /* don't do this again, even if we find no module BTFs */ obj->btf_modules_loaded = true; /* kernel too old to support module BTFs */ if (!kernel_supports(obj, FEAT_MODULE_BTF)) return 0; while (true) { err = bpf_btf_get_next_id(id, &id); if (err && errno == ENOENT) return 0; if (err && errno == EPERM) { pr_debug("skipping module BTFs loading, missing privileges\n"); return 0; } if (err) { err = -errno; pr_warn("failed to iterate BTF objects: %d\n", err); return err; } fd = bpf_btf_get_fd_by_id(id); if (fd < 0) { if (errno == ENOENT) continue; /* expected race: BTF was unloaded */ err = -errno; pr_warn("failed to get BTF object #%d FD: %d\n", id, err); return err; } len = sizeof(info); memset(&info, 0, sizeof(info)); info.name = ptr_to_u64(name); info.name_len = sizeof(name); err = bpf_btf_get_info_by_fd(fd, &info, &len); if (err) { err = -errno; pr_warn("failed to get BTF object #%d info: %d\n", id, err); goto err_out; } /* ignore non-module BTFs */ if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) { close(fd); continue; } btf = btf_get_from_fd(fd, obj->btf_vmlinux); err = libbpf_get_error(btf); if (err) { pr_warn("failed to load module [%s]'s BTF object #%d: %d\n", name, id, err); goto err_out; } err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); if (err) goto err_out; mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; mod_btf->btf = btf; mod_btf->id = id; mod_btf->fd = fd; mod_btf->name = strdup(name); if (!mod_btf->name) { err = -ENOMEM; goto err_out; } continue; err_out: close(fd); return err; } return 0; } static struct bpf_core_cand_list * bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id) { struct bpf_core_cand local_cand = {}; struct bpf_core_cand_list *cands; const struct btf *main_btf; const struct btf_type *local_t; const char *local_name; size_t local_essent_len; int err, i; local_cand.btf = local_btf; local_cand.id = local_type_id; local_t = btf__type_by_id(local_btf, local_type_id); if (!local_t) return ERR_PTR(-EINVAL); local_name = btf__name_by_offset(local_btf, local_t->name_off); if (str_is_empty(local_name)) return ERR_PTR(-EINVAL); local_essent_len = bpf_core_essential_name_len(local_name); cands = calloc(1, sizeof(*cands)); if (!cands) return ERR_PTR(-ENOMEM); /* Attempt to find target candidates in vmlinux BTF first */ main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux; err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands); if (err) goto err_out; /* if vmlinux BTF has any candidate, don't got for module BTFs */ if (cands->len) return cands; /* if vmlinux BTF was overridden, don't attempt to load module BTFs */ if (obj->btf_vmlinux_override) return cands; /* now look through module BTFs, trying to still find candidates */ err = load_module_btfs(obj); if (err) goto err_out; for (i = 0; i < obj->btf_module_cnt; i++) { err = bpf_core_add_cands(&local_cand, local_essent_len, obj->btf_modules[i].btf, obj->btf_modules[i].name, btf__type_cnt(obj->btf_vmlinux), cands); if (err) goto err_out; } return cands; err_out: bpf_core_free_cands(cands); return ERR_PTR(err); } /* Check local and target types for compatibility. This check is used for * type-based CO-RE relocations and follow slightly different rules than * field-based relocations. This function assumes that root types were already * checked for name match. Beyond that initial root-level name check, names * are completely ignored. Compatibility rules are as follows: * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but * kind should match for local and target types (i.e., STRUCT is not * compatible with UNION); * - for ENUMs, the size is ignored; * - for INT, size and signedness are ignored; * - for ARRAY, dimensionality is ignored, element types are checked for * compatibility recursively; * - CONST/VOLATILE/RESTRICT modifiers are ignored; * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; * - FUNC_PROTOs are compatible if they have compatible signature: same * number of input args and compatible return and argument types. * These rules are not set in stone and probably will be adjusted as we get * more experience with using BPF CO-RE relocations. */ int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id) { return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32); } int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id) { return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32); } static size_t bpf_core_hash_fn(const long key, void *ctx) { return key; } static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx) { return k1 == k2; } static int record_relo_core(struct bpf_program *prog, const struct bpf_core_relo *core_relo, int insn_idx) { struct reloc_desc *relos, *relo; relos = libbpf_reallocarray(prog->reloc_desc, prog->nr_reloc + 1, sizeof(*relos)); if (!relos) return -ENOMEM; relo = &relos[prog->nr_reloc]; relo->type = RELO_CORE; relo->insn_idx = insn_idx; relo->core_relo = core_relo; prog->reloc_desc = relos; prog->nr_reloc++; return 0; } static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx) { struct reloc_desc *relo; int i; for (i = 0; i < prog->nr_reloc; i++) { relo = &prog->reloc_desc[i]; if (relo->type != RELO_CORE || relo->insn_idx != insn_idx) continue; return relo->core_relo; } return NULL; } static int bpf_core_resolve_relo(struct bpf_program *prog, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, struct hashmap *cand_cache, struct bpf_core_relo_res *targ_res) { struct bpf_core_spec specs_scratch[3] = {}; struct bpf_core_cand_list *cands = NULL; const char *prog_name = prog->name; const struct btf_type *local_type; const char *local_name; __u32 local_id = relo->type_id; int err; local_type = btf__type_by_id(local_btf, local_id); if (!local_type) return -EINVAL; local_name = btf__name_by_offset(local_btf, local_type->name_off); if (!local_name) return -EINVAL; if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && !hashmap__find(cand_cache, local_id, &cands)) { cands = bpf_core_find_cands(prog->obj, local_btf, local_id); if (IS_ERR(cands)) { pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", prog_name, relo_idx, local_id, btf_kind_str(local_type), local_name, PTR_ERR(cands)); return PTR_ERR(cands); } err = hashmap__set(cand_cache, local_id, cands, NULL, NULL); if (err) { bpf_core_free_cands(cands); return err; } } return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch, targ_res); } static int bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) { const struct btf_ext_info_sec *sec; struct bpf_core_relo_res targ_res; const struct bpf_core_relo *rec; const struct btf_ext_info *seg; struct hashmap_entry *entry; struct hashmap *cand_cache = NULL; struct bpf_program *prog; struct bpf_insn *insn; const char *sec_name; int i, err = 0, insn_idx, sec_idx, sec_num; if (obj->btf_ext->core_relo_info.len == 0) return 0; if (targ_btf_path) { obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); err = libbpf_get_error(obj->btf_vmlinux_override); if (err) { pr_warn("failed to parse target BTF: %d\n", err); return err; } } cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL); if (IS_ERR(cand_cache)) { err = PTR_ERR(cand_cache); goto out; } seg = &obj->btf_ext->core_relo_info; sec_num = 0; for_each_btf_ext_sec(seg, sec) { sec_idx = seg->sec_idxs[sec_num]; sec_num++; sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); if (str_is_empty(sec_name)) { err = -EINVAL; goto out; } pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info); for_each_btf_ext_rec(seg, sec, i, rec) { if (rec->insn_off % BPF_INSN_SZ) return -EINVAL; insn_idx = rec->insn_off / BPF_INSN_SZ; prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); if (!prog) { /* When __weak subprog is "overridden" by another instance * of the subprog from a different object file, linker still * appends all the .BTF.ext info that used to belong to that * eliminated subprogram. * This is similar to what x86-64 linker does for relocations. * So just ignore such relocations just like we ignore * subprog instructions when discovering subprograms. */ pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n", sec_name, i, insn_idx); continue; } /* no need to apply CO-RE relocation if the program is * not going to be loaded */ if (!prog->autoload) continue; /* adjust insn_idx from section frame of reference to the local * program's frame of reference; (sub-)program code is not yet * relocated, so it's enough to just subtract in-section offset */ insn_idx = insn_idx - prog->sec_insn_off; if (insn_idx >= prog->insns_cnt) return -EINVAL; insn = &prog->insns[insn_idx]; err = record_relo_core(prog, rec, insn_idx); if (err) { pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n", prog->name, i, err); goto out; } if (prog->obj->gen_loader) continue; err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res); if (err) { pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", prog->name, i, err); goto out; } err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res); if (err) { pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n", prog->name, i, insn_idx, err); goto out; } } } out: /* obj->btf_vmlinux and module BTFs are freed after object load */ btf__free(obj->btf_vmlinux_override); obj->btf_vmlinux_override = NULL; if (!IS_ERR_OR_NULL(cand_cache)) { hashmap__for_each_entry(cand_cache, entry, i) { bpf_core_free_cands(entry->pvalue); } hashmap__free(cand_cache); } return err; } /* base map load ldimm64 special constant, used also for log fixup logic */ #define POISON_LDIMM64_MAP_BASE 2001000000 #define POISON_LDIMM64_MAP_PFX "200100" static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, int insn_idx, struct bpf_insn *insn, int map_idx, const struct bpf_map *map) { int i; pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n", prog->name, relo_idx, insn_idx, map_idx, map->name); /* we turn single ldimm64 into two identical invalid calls */ for (i = 0; i < 2; i++) { insn->code = BPF_JMP | BPF_CALL; insn->dst_reg = 0; insn->src_reg = 0; insn->off = 0; /* if this instruction is reachable (not a dead code), * verifier will complain with something like: * invalid func unknown#2001000123 * where lower 123 is map index into obj->maps[] array */ insn->imm = POISON_LDIMM64_MAP_BASE + map_idx; insn++; } } /* unresolved kfunc call special constant, used also for log fixup logic */ #define POISON_CALL_KFUNC_BASE 2002000000 #define POISON_CALL_KFUNC_PFX "2002" static void poison_kfunc_call(struct bpf_program *prog, int relo_idx, int insn_idx, struct bpf_insn *insn, int ext_idx, const struct extern_desc *ext) { pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n", prog->name, relo_idx, insn_idx, ext->name); /* we turn kfunc call into invalid helper call with identifiable constant */ insn->code = BPF_JMP | BPF_CALL; insn->dst_reg = 0; insn->src_reg = 0; insn->off = 0; /* if this instruction is reachable (not a dead code), * verifier will complain with something like: * invalid func unknown#2001000123 * where lower 123 is extern index into obj->externs[] array */ insn->imm = POISON_CALL_KFUNC_BASE + ext_idx; } /* Relocate data references within program code: * - map references; * - global variable references; * - extern references. */ static int bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) { int i; for (i = 0; i < prog->nr_reloc; i++) { struct reloc_desc *relo = &prog->reloc_desc[i]; struct bpf_insn *insn = &prog->insns[relo->insn_idx]; const struct bpf_map *map; struct extern_desc *ext; switch (relo->type) { case RELO_LD64: map = &obj->maps[relo->map_idx]; if (obj->gen_loader) { insn[0].src_reg = BPF_PSEUDO_MAP_IDX; insn[0].imm = relo->map_idx; } else if (map->autocreate) { insn[0].src_reg = BPF_PSEUDO_MAP_FD; insn[0].imm = map->fd; } else { poison_map_ldimm64(prog, i, relo->insn_idx, insn, relo->map_idx, map); } break; case RELO_DATA: map = &obj->maps[relo->map_idx]; insn[1].imm = insn[0].imm + relo->sym_off; if (obj->gen_loader) { insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; insn[0].imm = relo->map_idx; } else if (map->autocreate) { insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; insn[0].imm = map->fd; } else { poison_map_ldimm64(prog, i, relo->insn_idx, insn, relo->map_idx, map); } break; case RELO_EXTERN_LD64: ext = &obj->externs[relo->ext_idx]; if (ext->type == EXT_KCFG) { if (obj->gen_loader) { insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; insn[0].imm = obj->kconfig_map_idx; } else { insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; } insn[1].imm = ext->kcfg.data_off; } else /* EXT_KSYM */ { if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */ insn[0].src_reg = BPF_PSEUDO_BTF_ID; insn[0].imm = ext->ksym.kernel_btf_id; insn[1].imm = ext->ksym.kernel_btf_obj_fd; } else { /* typeless ksyms or unresolved typed ksyms */ insn[0].imm = (__u32)ext->ksym.addr; insn[1].imm = ext->ksym.addr >> 32; } } break; case RELO_EXTERN_CALL: ext = &obj->externs[relo->ext_idx]; insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; if (ext->is_set) { insn[0].imm = ext->ksym.kernel_btf_id; insn[0].off = ext->ksym.btf_fd_idx; } else { /* unresolved weak kfunc call */ poison_kfunc_call(prog, i, relo->insn_idx, insn, relo->ext_idx, ext); } break; case RELO_SUBPROG_ADDR: if (insn[0].src_reg != BPF_PSEUDO_FUNC) { pr_warn("prog '%s': relo #%d: bad insn\n", prog->name, i); return -EINVAL; } /* handled already */ break; case RELO_CALL: /* handled already */ break; case RELO_CORE: /* will be handled by bpf_program_record_relos() */ break; default: pr_warn("prog '%s': relo #%d: bad relo type %d\n", prog->name, i, relo->type); return -EINVAL; } } return 0; } static int adjust_prog_btf_ext_info(const struct bpf_object *obj, const struct bpf_program *prog, const struct btf_ext_info *ext_info, void **prog_info, __u32 *prog_rec_cnt, __u32 *prog_rec_sz) { void *copy_start = NULL, *copy_end = NULL; void *rec, *rec_end, *new_prog_info; const struct btf_ext_info_sec *sec; size_t old_sz, new_sz; int i, sec_num, sec_idx, off_adj; sec_num = 0; for_each_btf_ext_sec(ext_info, sec) { sec_idx = ext_info->sec_idxs[sec_num]; sec_num++; if (prog->sec_idx != sec_idx) continue; for_each_btf_ext_rec(ext_info, sec, i, rec) { __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ; if (insn_off < prog->sec_insn_off) continue; if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) break; if (!copy_start) copy_start = rec; copy_end = rec + ext_info->rec_size; } if (!copy_start) return -ENOENT; /* append func/line info of a given (sub-)program to the main * program func/line info */ old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; new_sz = old_sz + (copy_end - copy_start); new_prog_info = realloc(*prog_info, new_sz); if (!new_prog_info) return -ENOMEM; *prog_info = new_prog_info; *prog_rec_cnt = new_sz / ext_info->rec_size; memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); /* Kernel instruction offsets are in units of 8-byte * instructions, while .BTF.ext instruction offsets generated * by Clang are in units of bytes. So convert Clang offsets * into kernel offsets and adjust offset according to program * relocated position. */ off_adj = prog->sub_insn_off - prog->sec_insn_off; rec = new_prog_info + old_sz; rec_end = new_prog_info + new_sz; for (; rec < rec_end; rec += ext_info->rec_size) { __u32 *insn_off = rec; *insn_off = *insn_off / BPF_INSN_SZ + off_adj; } *prog_rec_sz = ext_info->rec_size; return 0; } return -ENOENT; } static int reloc_prog_func_and_line_info(const struct bpf_object *obj, struct bpf_program *main_prog, const struct bpf_program *prog) { int err; /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't * support func/line info */ if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC)) return 0; /* only attempt func info relocation if main program's func_info * relocation was successful */ if (main_prog != prog && !main_prog->func_info) goto line_info; err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, &main_prog->func_info, &main_prog->func_info_cnt, &main_prog->func_info_rec_size); if (err) { if (err != -ENOENT) { pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", prog->name, err); return err; } if (main_prog->func_info) { /* * Some info has already been found but has problem * in the last btf_ext reloc. Must have to error out. */ pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); return err; } /* Have problem loading the very first info. Ignore the rest. */ pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n", prog->name); } line_info: /* don't relocate line info if main program's relocation failed */ if (main_prog != prog && !main_prog->line_info) return 0; err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, &main_prog->line_info, &main_prog->line_info_cnt, &main_prog->line_info_rec_size); if (err) { if (err != -ENOENT) { pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n", prog->name, err); return err; } if (main_prog->line_info) { /* * Some info has already been found but has problem * in the last btf_ext reloc. Must have to error out. */ pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); return err; } /* Have problem loading the very first info. Ignore the rest. */ pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n", prog->name); } return 0; } static int cmp_relo_by_insn_idx(const void *key, const void *elem) { size_t insn_idx = *(const size_t *)key; const struct reloc_desc *relo = elem; if (insn_idx == relo->insn_idx) return 0; return insn_idx < relo->insn_idx ? -1 : 1; } static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) { if (!prog->nr_reloc) return NULL; return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); } static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog) { int new_cnt = main_prog->nr_reloc + subprog->nr_reloc; struct reloc_desc *relos; int i; if (main_prog == subprog) return 0; relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); /* if new count is zero, reallocarray can return a valid NULL result; * in this case the previous pointer will be freed, so we *have to* * reassign old pointer to the new value (even if it's NULL) */ if (!relos && new_cnt) return -ENOMEM; if (subprog->nr_reloc) memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, sizeof(*relos) * subprog->nr_reloc); for (i = main_prog->nr_reloc; i < new_cnt; i++) relos[i].insn_idx += subprog->sub_insn_off; /* After insn_idx adjustment the 'relos' array is still sorted * by insn_idx and doesn't break bsearch. */ main_prog->reloc_desc = relos; main_prog->nr_reloc = new_cnt; return 0; } static int bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog, struct bpf_program *subprog) { struct bpf_insn *insns; size_t new_cnt; int err; subprog->sub_insn_off = main_prog->insns_cnt; new_cnt = main_prog->insns_cnt + subprog->insns_cnt; insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); if (!insns) { pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); return -ENOMEM; } main_prog->insns = insns; main_prog->insns_cnt = new_cnt; memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, subprog->insns_cnt * sizeof(*insns)); pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", main_prog->name, subprog->insns_cnt, subprog->name); /* The subprog insns are now appended. Append its relos too. */ err = append_subprog_relos(main_prog, subprog); if (err) return err; return 0; } static int bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, struct bpf_program *prog) { size_t sub_insn_idx, insn_idx; struct bpf_program *subprog; struct reloc_desc *relo; struct bpf_insn *insn; int err; err = reloc_prog_func_and_line_info(obj, main_prog, prog); if (err) return err; for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn)) continue; relo = find_prog_insn_relo(prog, insn_idx); if (relo && relo->type == RELO_EXTERN_CALL) /* kfunc relocations will be handled later * in bpf_object__relocate_data() */ continue; if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", prog->name, insn_idx, relo->type); return -LIBBPF_ERRNO__RELOC; } if (relo) { /* sub-program instruction index is a combination of * an offset of a symbol pointed to by relocation and * call instruction's imm field; for global functions, * call always has imm = -1, but for static functions * relocation is against STT_SECTION and insn->imm * points to a start of a static function * * for subprog addr relocation, the relo->sym_off + insn->imm is * the byte offset in the corresponding section. */ if (relo->type == RELO_CALL) sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; else sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ; } else if (insn_is_pseudo_func(insn)) { /* * RELO_SUBPROG_ADDR relo is always emitted even if both * functions are in the same section, so it shouldn't reach here. */ pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n", prog->name, insn_idx); return -LIBBPF_ERRNO__RELOC; } else { /* if subprogram call is to a static function within * the same ELF section, there won't be any relocation * emitted, but it also means there is no additional * offset necessary, insns->imm is relative to * instruction's original position within the section */ sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; } /* we enforce that sub-programs should be in .text section */ subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); if (!subprog) { pr_warn("prog '%s': no .text section found yet sub-program call exists\n", prog->name); return -LIBBPF_ERRNO__RELOC; } /* if it's the first call instruction calling into this * subprogram (meaning this subprog hasn't been processed * yet) within the context of current main program: * - append it at the end of main program's instructions blog; * - process is recursively, while current program is put on hold; * - if that subprogram calls some other not yet processes * subprogram, same thing will happen recursively until * there are no more unprocesses subprograms left to append * and relocate. */ if (subprog->sub_insn_off == 0) { err = bpf_object__append_subprog_code(obj, main_prog, subprog); if (err) return err; err = bpf_object__reloc_code(obj, main_prog, subprog); if (err) return err; } /* main_prog->insns memory could have been re-allocated, so * calculate pointer again */ insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; /* calculate correct instruction position within current main * prog; each main prog can have a different set of * subprograms appended (potentially in different order as * well), so position of any subprog can be different for * different main programs */ insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); } return 0; } /* * Relocate sub-program calls. * * Algorithm operates as follows. Each entry-point BPF program (referred to as * main prog) is processed separately. For each subprog (non-entry functions, * that can be called from either entry progs or other subprogs) gets their * sub_insn_off reset to zero. This serves as indicator that this subprogram * hasn't been yet appended and relocated within current main prog. Once its * relocated, sub_insn_off will point at the position within current main prog * where given subprog was appended. This will further be used to relocate all * the call instructions jumping into this subprog. * * We start with main program and process all call instructions. If the call * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off * is zero), subprog instructions are appended at the end of main program's * instruction array. Then main program is "put on hold" while we recursively * process newly appended subprogram. If that subprogram calls into another * subprogram that hasn't been appended, new subprogram is appended again to * the *main* prog's instructions (subprog's instructions are always left * untouched, as they need to be in unmodified state for subsequent main progs * and subprog instructions are always sent only as part of a main prog) and * the process continues recursively. Once all the subprogs called from a main * prog or any of its subprogs are appended (and relocated), all their * positions within finalized instructions array are known, so it's easy to * rewrite call instructions with correct relative offsets, corresponding to * desired target subprog. * * Its important to realize that some subprogs might not be called from some * main prog and any of its called/used subprogs. Those will keep their * subprog->sub_insn_off as zero at all times and won't be appended to current * main prog and won't be relocated within the context of current main prog. * They might still be used from other main progs later. * * Visually this process can be shown as below. Suppose we have two main * programs mainA and mainB and BPF object contains three subprogs: subA, * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and * subC both call subB: * * +--------+ +-------+ * | v v | * +--+---+ +--+-+-+ +---+--+ * | subA | | subB | | subC | * +--+---+ +------+ +---+--+ * ^ ^ * | | * +---+-------+ +------+----+ * | mainA | | mainB | * +-----------+ +-----------+ * * We'll start relocating mainA, will find subA, append it and start * processing sub A recursively: * * +-----------+------+ * | mainA | subA | * +-----------+------+ * * At this point we notice that subB is used from subA, so we append it and * relocate (there are no further subcalls from subB): * * +-----------+------+------+ * | mainA | subA | subB | * +-----------+------+------+ * * At this point, we relocate subA calls, then go one level up and finish with * relocatin mainA calls. mainA is done. * * For mainB process is similar but results in different order. We start with * mainB and skip subA and subB, as mainB never calls them (at least * directly), but we see subC is needed, so we append and start processing it: * * +-----------+------+ * | mainB | subC | * +-----------+------+ * Now we see subC needs subB, so we go back to it, append and relocate it: * * +-----------+------+------+ * | mainB | subC | subB | * +-----------+------+------+ * * At this point we unwind recursion, relocate calls in subC, then in mainB. */ static int bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) { struct bpf_program *subprog; int i, err; /* mark all subprogs as not relocated (yet) within the context of * current main program */ for (i = 0; i < obj->nr_programs; i++) { subprog = &obj->programs[i]; if (!prog_is_subprog(obj, subprog)) continue; subprog->sub_insn_off = 0; } err = bpf_object__reloc_code(obj, prog, prog); if (err) return err; return 0; } static void bpf_object__free_relocs(struct bpf_object *obj) { struct bpf_program *prog; int i; /* free up relocation descriptors */ for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; zfree(&prog->reloc_desc); prog->nr_reloc = 0; } } static int cmp_relocs(const void *_a, const void *_b) { const struct reloc_desc *a = _a; const struct reloc_desc *b = _b; if (a->insn_idx != b->insn_idx) return a->insn_idx < b->insn_idx ? -1 : 1; /* no two relocations should have the same insn_idx, but ... */ if (a->type != b->type) return a->type < b->type ? -1 : 1; return 0; } static void bpf_object__sort_relos(struct bpf_object *obj) { int i; for (i = 0; i < obj->nr_programs; i++) { struct bpf_program *p = &obj->programs[i]; if (!p->nr_reloc) continue; qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); } } static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog) { const char *str = "exception_callback:"; size_t pfx_len = strlen(str); int i, j, n; if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG)) return 0; n = btf__type_cnt(obj->btf); for (i = 1; i < n; i++) { const char *name; struct btf_type *t; t = btf_type_by_id(obj->btf, i); if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1) continue; name = btf__str_by_offset(obj->btf, t->name_off); if (strncmp(name, str, pfx_len) != 0) continue; t = btf_type_by_id(obj->btf, t->type); if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) { pr_warn("prog '%s': exception_callback: decl tag not applied to the main program\n", prog->name); return -EINVAL; } if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0) continue; /* Multiple callbacks are specified for the same prog, * the verifier will eventually return an error for this * case, hence simply skip appending a subprog. */ if (prog->exception_cb_idx >= 0) { prog->exception_cb_idx = -1; break; } name += pfx_len; if (str_is_empty(name)) { pr_warn("prog '%s': exception_callback: decl tag contains empty value\n", prog->name); return -EINVAL; } for (j = 0; j < obj->nr_programs; j++) { struct bpf_program *subprog = &obj->programs[j]; if (!prog_is_subprog(obj, subprog)) continue; if (strcmp(name, subprog->name) != 0) continue; /* Enforce non-hidden, as from verifier point of * view it expects global functions, whereas the * mark_btf_static fixes up linkage as static. */ if (!subprog->sym_global || subprog->mark_btf_static) { pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n", prog->name, subprog->name); return -EINVAL; } /* Let's see if we already saw a static exception callback with the same name */ if (prog->exception_cb_idx >= 0) { pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n", prog->name, subprog->name); return -EINVAL; } prog->exception_cb_idx = j; break; } if (prog->exception_cb_idx >= 0) continue; pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name); return -ENOENT; } return 0; } static struct { enum bpf_prog_type prog_type; const char *ctx_name; } global_ctx_map[] = { { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" }, { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" }, { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" }, { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" }, { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" }, { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" }, { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" }, { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" }, { BPF_PROG_TYPE_LWT_IN, "__sk_buff" }, { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" }, { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" }, { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" }, { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" }, { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" }, { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" }, { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" }, { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" }, { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" }, { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" }, { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" }, { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" }, { BPF_PROG_TYPE_SK_SKB, "__sk_buff" }, { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" }, { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" }, { BPF_PROG_TYPE_XDP, "xdp_md" }, /* all other program types don't have "named" context structs */ }; /* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef, * for below __builtin_types_compatible_p() checks; * with this approach we don't need any extra arch-specific #ifdef guards */ struct pt_regs; struct user_pt_regs; struct user_regs_struct; static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog, const char *subprog_name, int arg_idx, int arg_type_id, const char *ctx_name) { const struct btf_type *t; const char *tname; /* check if existing parameter already matches verifier expectations */ t = skip_mods_and_typedefs(btf, arg_type_id, NULL); if (!btf_is_ptr(t)) goto out_warn; /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe * and perf_event programs, so check this case early on and forget * about it for subsequent checks */ while (btf_is_mod(t)) t = btf__type_by_id(btf, t->type); if (btf_is_typedef(t) && (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) { tname = btf__str_by_offset(btf, t->name_off) ?: ""; if (strcmp(tname, "bpf_user_pt_regs_t") == 0) return false; /* canonical type for kprobe/perf_event */ } /* now we can ignore typedefs moving forward */ t = skip_mods_and_typedefs(btf, t->type, NULL); /* if it's `void *`, definitely fix up BTF info */ if (btf_is_void(t)) return true; /* if it's already proper canonical type, no need to fix up */ tname = btf__str_by_offset(btf, t->name_off) ?: ""; if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0) return false; /* special cases */ switch (prog->type) { case BPF_PROG_TYPE_KPROBE: /* `struct pt_regs *` is expected, but we need to fix up */ if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0) return true; break; case BPF_PROG_TYPE_PERF_EVENT: if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) && btf_is_struct(t) && strcmp(tname, "pt_regs") == 0) return true; if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) && btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0) return true; if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) && btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0) return true; break; case BPF_PROG_TYPE_RAW_TRACEPOINT: case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: /* allow u64* as ctx */ if (btf_is_int(t) && t->size == 8) return true; break; default: break; } out_warn: pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n", prog->name, subprog_name, arg_idx, ctx_name); return false; } static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog) { int fn_id, fn_proto_id, ret_type_id, orig_proto_id; int i, err, arg_cnt, fn_name_off, linkage; struct btf_type *fn_t, *fn_proto_t, *t; struct btf_param *p; /* caller already validated FUNC -> FUNC_PROTO validity */ fn_t = btf_type_by_id(btf, orig_fn_id); fn_proto_t = btf_type_by_id(btf, fn_t->type); /* Note that each btf__add_xxx() operation invalidates * all btf_type and string pointers, so we need to be * very careful when cloning BTF types. BTF type * pointers have to be always refetched. And to avoid * problems with invalidated string pointers, we * add empty strings initially, then just fix up * name_off offsets in place. Offsets are stable for * existing strings, so that works out. */ fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */ linkage = btf_func_linkage(fn_t); orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */ ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */ arg_cnt = btf_vlen(fn_proto_t); /* clone FUNC_PROTO and its params */ fn_proto_id = btf__add_func_proto(btf, ret_type_id); if (fn_proto_id < 0) return -EINVAL; for (i = 0; i < arg_cnt; i++) { int name_off; /* copy original parameter data */ t = btf_type_by_id(btf, orig_proto_id); p = &btf_params(t)[i]; name_off = p->name_off; err = btf__add_func_param(btf, "", p->type); if (err) return err; fn_proto_t = btf_type_by_id(btf, fn_proto_id); p = &btf_params(fn_proto_t)[i]; p->name_off = name_off; /* use remembered str offset */ } /* clone FUNC now, btf__add_func() enforces non-empty name, so use * entry program's name as a placeholder, which we replace immediately * with original name_off */ fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id); if (fn_id < 0) return -EINVAL; fn_t = btf_type_by_id(btf, fn_id); fn_t->name_off = fn_name_off; /* reuse original string */ return fn_id; } /* Check if main program or global subprog's function prototype has `arg:ctx` * argument tags, and, if necessary, substitute correct type to match what BPF * verifier would expect, taking into account specific program type. This * allows to support __arg_ctx tag transparently on old kernels that don't yet * have a native support for it in the verifier, making user's life much * easier. */ static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog) { const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name; struct bpf_func_info_min *func_rec; struct btf_type *fn_t, *fn_proto_t; struct btf *btf = obj->btf; const struct btf_type *t; struct btf_param *p; int ptr_id = 0, struct_id, tag_id, orig_fn_id; int i, n, arg_idx, arg_cnt, err, rec_idx; int *orig_ids; /* no .BTF.ext, no problem */ if (!obj->btf_ext || !prog->func_info) return 0; /* don't do any fix ups if kernel natively supports __arg_ctx */ if (kernel_supports(obj, FEAT_ARG_CTX_TAG)) return 0; /* some BPF program types just don't have named context structs, so * this fallback mechanism doesn't work for them */ for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) { if (global_ctx_map[i].prog_type != prog->type) continue; ctx_name = global_ctx_map[i].ctx_name; break; } if (!ctx_name) return 0; /* remember original func BTF IDs to detect if we already cloned them */ orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids)); if (!orig_ids) return -ENOMEM; for (i = 0; i < prog->func_info_cnt; i++) { func_rec = prog->func_info + prog->func_info_rec_size * i; orig_ids[i] = func_rec->type_id; } /* go through each DECL_TAG with "arg:ctx" and see if it points to one * of our subprogs; if yes and subprog is global and needs adjustment, * clone and adjust FUNC -> FUNC_PROTO combo */ for (i = 1, n = btf__type_cnt(btf); i < n; i++) { /* only DECL_TAG with "arg:ctx" value are interesting */ t = btf__type_by_id(btf, i); if (!btf_is_decl_tag(t)) continue; if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0) continue; /* only global funcs need adjustment, if at all */ orig_fn_id = t->type; fn_t = btf_type_by_id(btf, orig_fn_id); if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL) continue; /* sanity check FUNC -> FUNC_PROTO chain, just in case */ fn_proto_t = btf_type_by_id(btf, fn_t->type); if (!fn_proto_t || !btf_is_func_proto(fn_proto_t)) continue; /* find corresponding func_info record */ func_rec = NULL; for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) { if (orig_ids[rec_idx] == t->type) { func_rec = prog->func_info + prog->func_info_rec_size * rec_idx; break; } } /* current main program doesn't call into this subprog */ if (!func_rec) continue; /* some more sanity checking of DECL_TAG */ arg_cnt = btf_vlen(fn_proto_t); arg_idx = btf_decl_tag(t)->component_idx; if (arg_idx < 0 || arg_idx >= arg_cnt) continue; /* check if we should fix up argument type */ p = &btf_params(fn_proto_t)[arg_idx]; fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: ""; if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name)) continue; /* clone fn/fn_proto, unless we already did it for another arg */ if (func_rec->type_id == orig_fn_id) { int fn_id; fn_id = clone_func_btf_info(btf, orig_fn_id, prog); if (fn_id < 0) { err = fn_id; goto err_out; } /* point func_info record to a cloned FUNC type */ func_rec->type_id = fn_id; } /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument; * we do it just once per main BPF program, as all global * funcs share the same program type, so need only PTR -> * STRUCT type chain */ if (ptr_id == 0) { struct_id = btf__add_struct(btf, ctx_name, 0); ptr_id = btf__add_ptr(btf, struct_id); if (ptr_id < 0 || struct_id < 0) { err = -EINVAL; goto err_out; } } /* for completeness, clone DECL_TAG and point it to cloned param */ tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx); if (tag_id < 0) { err = -EINVAL; goto err_out; } /* all the BTF manipulations invalidated pointers, refetch them */ fn_t = btf_type_by_id(btf, func_rec->type_id); fn_proto_t = btf_type_by_id(btf, fn_t->type); /* fix up type ID pointed to by param */ p = &btf_params(fn_proto_t)[arg_idx]; p->type = ptr_id; } free(orig_ids); return 0; err_out: free(orig_ids); return err; } static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) { struct bpf_program *prog; size_t i, j; int err; if (obj->btf_ext) { err = bpf_object__relocate_core(obj, targ_btf_path); if (err) { pr_warn("failed to perform CO-RE relocations: %d\n", err); return err; } bpf_object__sort_relos(obj); } /* Before relocating calls pre-process relocations and mark * few ld_imm64 instructions that points to subprogs. * Otherwise bpf_object__reloc_code() later would have to consider * all ld_imm64 insns as relocation candidates. That would * reduce relocation speed, since amount of find_prog_insn_relo() * would increase and most of them will fail to find a relo. */ for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; for (j = 0; j < prog->nr_reloc; j++) { struct reloc_desc *relo = &prog->reloc_desc[j]; struct bpf_insn *insn = &prog->insns[relo->insn_idx]; /* mark the insn, so it's recognized by insn_is_pseudo_func() */ if (relo->type == RELO_SUBPROG_ADDR) insn[0].src_reg = BPF_PSEUDO_FUNC; } } /* relocate subprogram calls and append used subprograms to main * programs; each copy of subprogram code needs to be relocated * differently for each main program, because its code location might * have changed. * Append subprog relos to main programs to allow data relos to be * processed after text is completely relocated. */ for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; /* sub-program's sub-calls are relocated within the context of * its main program only */ if (prog_is_subprog(obj, prog)) continue; if (!prog->autoload) continue; err = bpf_object__relocate_calls(obj, prog); if (err) { pr_warn("prog '%s': failed to relocate calls: %d\n", prog->name, err); return err; } err = bpf_prog_assign_exc_cb(obj, prog); if (err) return err; /* Now, also append exception callback if it has not been done already. */ if (prog->exception_cb_idx >= 0) { struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx]; /* Calling exception callback directly is disallowed, which the * verifier will reject later. In case it was processed already, * we can skip this step, otherwise for all other valid cases we * have to append exception callback now. */ if (subprog->sub_insn_off == 0) { err = bpf_object__append_subprog_code(obj, prog, subprog); if (err) return err; err = bpf_object__reloc_code(obj, prog, subprog); if (err) return err; } } } for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; if (prog_is_subprog(obj, prog)) continue; if (!prog->autoload) continue; /* Process data relos for main programs */ err = bpf_object__relocate_data(obj, prog); if (err) { pr_warn("prog '%s': failed to relocate data references: %d\n", prog->name, err); return err; } /* Fix up .BTF.ext information, if necessary */ err = bpf_program_fixup_func_info(obj, prog); if (err) { pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n", prog->name, err); return err; } } return 0; } static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data); static int bpf_object__collect_map_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) { const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); int i, j, nrels, new_sz; const struct btf_var_secinfo *vi = NULL; const struct btf_type *sec, *var, *def; struct bpf_map *map = NULL, *targ_map = NULL; struct bpf_program *targ_prog = NULL; bool is_prog_array, is_map_in_map; const struct btf_member *member; const char *name, *mname, *type; unsigned int moff; Elf64_Sym *sym; Elf64_Rel *rel; void *tmp; if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) return -EINVAL; sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); if (!sec) return -EINVAL; nrels = shdr->sh_size / shdr->sh_entsize; for (i = 0; i < nrels; i++) { rel = elf_rel_by_idx(data, i); if (!rel) { pr_warn(".maps relo #%d: failed to get ELF relo\n", i); return -LIBBPF_ERRNO__FORMAT; } sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); if (!sym) { pr_warn(".maps relo #%d: symbol %zx not found\n", i, (size_t)ELF64_R_SYM(rel->r_info)); return -LIBBPF_ERRNO__FORMAT; } name = elf_sym_str(obj, sym->st_name) ?: ""; pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, (size_t)rel->r_offset, sym->st_name, name); for (j = 0; j < obj->nr_maps; j++) { map = &obj->maps[j]; if (map->sec_idx != obj->efile.btf_maps_shndx) continue; vi = btf_var_secinfos(sec) + map->btf_var_idx; if (vi->offset <= rel->r_offset && rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size) break; } if (j == obj->nr_maps) { pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n", i, name, (size_t)rel->r_offset); return -EINVAL; } is_map_in_map = bpf_map_type__is_map_in_map(map->def.type); is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY; type = is_map_in_map ? "map" : "prog"; if (is_map_in_map) { if (sym->st_shndx != obj->efile.btf_maps_shndx) { pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", i, name); return -LIBBPF_ERRNO__RELOC; } if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && map->def.key_size != sizeof(int)) { pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", i, map->name, sizeof(int)); return -EINVAL; } targ_map = bpf_object__find_map_by_name(obj, name); if (!targ_map) { pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n", i, name); return -ESRCH; } } else if (is_prog_array) { targ_prog = bpf_object__find_program_by_name(obj, name); if (!targ_prog) { pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n", i, name); return -ESRCH; } if (targ_prog->sec_idx != sym->st_shndx || targ_prog->sec_insn_off * 8 != sym->st_value || prog_is_subprog(obj, targ_prog)) { pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n", i, name); return -LIBBPF_ERRNO__RELOC; } } else { return -EINVAL; } var = btf__type_by_id(obj->btf, vi->type); def = skip_mods_and_typedefs(obj->btf, var->type, NULL); if (btf_vlen(def) == 0) return -EINVAL; member = btf_members(def) + btf_vlen(def) - 1; mname = btf__name_by_offset(obj->btf, member->name_off); if (strcmp(mname, "values")) return -EINVAL; moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; if (rel->r_offset - vi->offset < moff) return -EINVAL; moff = rel->r_offset - vi->offset - moff; /* here we use BPF pointer size, which is always 64 bit, as we * are parsing ELF that was built for BPF target */ if (moff % bpf_ptr_sz) return -EINVAL; moff /= bpf_ptr_sz; if (moff >= map->init_slots_sz) { new_sz = moff + 1; tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); if (!tmp) return -ENOMEM; map->init_slots = tmp; memset(map->init_slots + map->init_slots_sz, 0, (new_sz - map->init_slots_sz) * host_ptr_sz); map->init_slots_sz = new_sz; } map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog; pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n", i, map->name, moff, type, name); } return 0; } static int bpf_object__collect_relos(struct bpf_object *obj) { int i, err; for (i = 0; i < obj->efile.sec_cnt; i++) { struct elf_sec_desc *sec_desc = &obj->efile.secs[i]; Elf64_Shdr *shdr; Elf_Data *data; int idx; if (sec_desc->sec_type != SEC_RELO) continue; shdr = sec_desc->shdr; data = sec_desc->data; idx = shdr->sh_info; if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) { pr_warn("internal error at %d\n", __LINE__); return -LIBBPF_ERRNO__INTERNAL; } if (obj->efile.secs[idx].sec_type == SEC_ST_OPS) err = bpf_object__collect_st_ops_relos(obj, shdr, data); else if (idx == obj->efile.btf_maps_shndx) err = bpf_object__collect_map_relos(obj, shdr, data); else err = bpf_object__collect_prog_relos(obj, shdr, data); if (err) return err; } bpf_object__sort_relos(obj); return 0; } static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) { if (BPF_CLASS(insn->code) == BPF_JMP && BPF_OP(insn->code) == BPF_CALL && BPF_SRC(insn->code) == BPF_K && insn->src_reg == 0 && insn->dst_reg == 0) { *func_id = insn->imm; return true; } return false; } static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog) { struct bpf_insn *insn = prog->insns; enum bpf_func_id func_id; int i; if (obj->gen_loader) return 0; for (i = 0; i < prog->insns_cnt; i++, insn++) { if (!insn_is_helper_call(insn, &func_id)) continue; /* on kernels that don't yet support * bpf_probe_read_{kernel,user}[_str] helpers, fall back * to bpf_probe_read() which works well for old kernels */ switch (func_id) { case BPF_FUNC_probe_read_kernel: case BPF_FUNC_probe_read_user: if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) insn->imm = BPF_FUNC_probe_read; break; case BPF_FUNC_probe_read_kernel_str: case BPF_FUNC_probe_read_user_str: if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) insn->imm = BPF_FUNC_probe_read_str; break; default: break; } } return 0; } static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, int *btf_obj_fd, int *btf_type_id); /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */ static int libbpf_prepare_prog_load(struct bpf_program *prog, struct bpf_prog_load_opts *opts, long cookie) { enum sec_def_flags def = cookie; /* old kernels might not support specifying expected_attach_type */ if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE)) opts->expected_attach_type = 0; if (def & SEC_SLEEPABLE) opts->prog_flags |= BPF_F_SLEEPABLE; if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; /* special check for usdt to use uprobe_multi link */ if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) { /* for BPF_TRACE_UPROBE_MULTI, user might want to query expected_attach_type * in prog, and expected_attach_type we set in kernel is from opts, so we * update both. */ prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI; opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI; } if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) { int btf_obj_fd = 0, btf_type_id = 0, err; const char *attach_name; attach_name = strchr(prog->sec_name, '/'); if (!attach_name) { /* if BPF program is annotated with just SEC("fentry") * (or similar) without declaratively specifying * target, then it is expected that target will be * specified with bpf_program__set_attach_target() at * runtime before BPF object load step. If not, then * there is nothing to load into the kernel as BPF * verifier won't be able to validate BPF program * correctness anyways. */ pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n", prog->name); return -EINVAL; } attach_name++; /* skip over / */ err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id); if (err) return err; /* cache resolved BTF FD and BTF type ID in the prog */ prog->attach_btf_obj_fd = btf_obj_fd; prog->attach_btf_id = btf_type_id; /* but by now libbpf common logic is not utilizing * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because * this callback is called after opts were populated by * libbpf, so this callback has to update opts explicitly here */ opts->attach_btf_obj_fd = btf_obj_fd; opts->attach_btf_id = btf_type_id; } return 0; } static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz); static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, const char *license, __u32 kern_version, int *prog_fd) { LIBBPF_OPTS(bpf_prog_load_opts, load_attr); const char *prog_name = NULL; char *cp, errmsg[STRERR_BUFSIZE]; size_t log_buf_size = 0; char *log_buf = NULL, *tmp; bool own_log_buf = true; __u32 log_level = prog->log_level; int ret, err; /* Be more helpful by rejecting programs that can't be validated early * with more meaningful and actionable error message. */ switch (prog->type) { case BPF_PROG_TYPE_UNSPEC: /* * The program type must be set. Most likely we couldn't find a proper * section definition at load time, and thus we didn't infer the type. */ pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n", prog->name, prog->sec_name); return -EINVAL; case BPF_PROG_TYPE_STRUCT_OPS: if (prog->attach_btf_id == 0) { pr_warn("prog '%s': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?\n", prog->name); return -EINVAL; } break; default: break; } if (!insns || !insns_cnt) return -EINVAL; if (kernel_supports(obj, FEAT_PROG_NAME)) prog_name = prog->name; load_attr.attach_prog_fd = prog->attach_prog_fd; load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; load_attr.attach_btf_id = prog->attach_btf_id; load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; load_attr.expected_attach_type = prog->expected_attach_type; /* specify func_info/line_info only if kernel supports them */ if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) { load_attr.prog_btf_fd = btf__fd(obj->btf); load_attr.func_info = prog->func_info; load_attr.func_info_rec_size = prog->func_info_rec_size; load_attr.func_info_cnt = prog->func_info_cnt; load_attr.line_info = prog->line_info; load_attr.line_info_rec_size = prog->line_info_rec_size; load_attr.line_info_cnt = prog->line_info_cnt; } load_attr.log_level = log_level; load_attr.prog_flags = prog->prog_flags; load_attr.fd_array = obj->fd_array; load_attr.token_fd = obj->token_fd; if (obj->token_fd) load_attr.prog_flags |= BPF_F_TOKEN_FD; /* adjust load_attr if sec_def provides custom preload callback */ if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) { err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie); if (err < 0) { pr_warn("prog '%s': failed to prepare load attributes: %d\n", prog->name, err); return err; } insns = prog->insns; insns_cnt = prog->insns_cnt; } if (obj->gen_loader) { bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, license, insns, insns_cnt, &load_attr, prog - obj->programs); *prog_fd = -1; return 0; } retry_load: /* if log_level is zero, we don't request logs initially even if * custom log_buf is specified; if the program load fails, then we'll * bump log_level to 1 and use either custom log_buf or we'll allocate * our own and retry the load to get details on what failed */ if (log_level) { if (prog->log_buf) { log_buf = prog->log_buf; log_buf_size = prog->log_size; own_log_buf = false; } else if (obj->log_buf) { log_buf = obj->log_buf; log_buf_size = obj->log_size; own_log_buf = false; } else { log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2); tmp = realloc(log_buf, log_buf_size); if (!tmp) { ret = -ENOMEM; goto out; } log_buf = tmp; log_buf[0] = '\0'; own_log_buf = true; } } load_attr.log_buf = log_buf; load_attr.log_size = log_buf_size; load_attr.log_level = log_level; ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); if (ret >= 0) { if (log_level && own_log_buf) { pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", prog->name, log_buf); } if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { struct bpf_map *map; int i; for (i = 0; i < obj->nr_maps; i++) { map = &prog->obj->maps[i]; if (map->libbpf_type != LIBBPF_MAP_RODATA) continue; if (bpf_prog_bind_map(ret, map->fd, NULL)) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warn("prog '%s': failed to bind map '%s': %s\n", prog->name, map->real_name, cp); /* Don't fail hard if can't bind rodata. */ } } } *prog_fd = ret; ret = 0; goto out; } if (log_level == 0) { log_level = 1; goto retry_load; } /* On ENOSPC, increase log buffer size and retry, unless custom * log_buf is specified. * Be careful to not overflow u32, though. Kernel's log buf size limit * isn't part of UAPI so it can always be bumped to full 4GB. So don't * multiply by 2 unless we are sure we'll fit within 32 bits. * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2). */ if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2) goto retry_load; ret = -errno; /* post-process verifier log to improve error descriptions */ fixup_verifier_log(prog, log_buf, log_buf_size); cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp); pr_perm_msg(ret); if (own_log_buf && log_buf && log_buf[0] != '\0') { pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", prog->name, log_buf); } out: if (own_log_buf) free(log_buf); return ret; } static char *find_prev_line(char *buf, char *cur) { char *p; if (cur == buf) /* end of a log buf */ return NULL; p = cur - 1; while (p - 1 >= buf && *(p - 1) != '\n') p--; return p; } static void patch_log(char *buf, size_t buf_sz, size_t log_sz, char *orig, size_t orig_sz, const char *patch) { /* size of the remaining log content to the right from the to-be-replaced part */ size_t rem_sz = (buf + log_sz) - (orig + orig_sz); size_t patch_sz = strlen(patch); if (patch_sz != orig_sz) { /* If patch line(s) are longer than original piece of verifier log, * shift log contents by (patch_sz - orig_sz) bytes to the right * starting from after to-be-replaced part of the log. * * If patch line(s) are shorter than original piece of verifier log, * shift log contents by (orig_sz - patch_sz) bytes to the left * starting from after to-be-replaced part of the log * * We need to be careful about not overflowing available * buf_sz capacity. If that's the case, we'll truncate the end * of the original log, as necessary. */ if (patch_sz > orig_sz) { if (orig + patch_sz >= buf + buf_sz) { /* patch is big enough to cover remaining space completely */ patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1; rem_sz = 0; } else if (patch_sz - orig_sz > buf_sz - log_sz) { /* patch causes part of remaining log to be truncated */ rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz); } } /* shift remaining log to the right by calculated amount */ memmove(orig + patch_sz, orig + orig_sz, rem_sz); } memcpy(orig, patch, patch_sz); } static void fixup_log_failed_core_relo(struct bpf_program *prog, char *buf, size_t buf_sz, size_t log_sz, char *line1, char *line2, char *line3) { /* Expected log for failed and not properly guarded CO-RE relocation: * line1 -> 123: (85) call unknown#195896080 * line2 -> invalid func unknown#195896080 * line3 -> * * "123" is the index of the instruction that was poisoned. We extract * instruction index to find corresponding CO-RE relocation and * replace this part of the log with more relevant information about * failed CO-RE relocation. */ const struct bpf_core_relo *relo; struct bpf_core_spec spec; char patch[512], spec_buf[256]; int insn_idx, err, spec_len; if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1) return; relo = find_relo_core(prog, insn_idx); if (!relo) return; err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec); if (err) return; spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec); snprintf(patch, sizeof(patch), "%d: \n" "failed to resolve CO-RE relocation %s%s\n", insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : ""); patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); } static void fixup_log_missing_map_load(struct bpf_program *prog, char *buf, size_t buf_sz, size_t log_sz, char *line1, char *line2, char *line3) { /* Expected log for failed and not properly guarded map reference: * line1 -> 123: (85) call unknown#2001000345 * line2 -> invalid func unknown#2001000345 * line3 -> * * "123" is the index of the instruction that was poisoned. * "345" in "2001000345" is a map index in obj->maps to fetch map name. */ struct bpf_object *obj = prog->obj; const struct bpf_map *map; int insn_idx, map_idx; char patch[128]; if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2) return; map_idx -= POISON_LDIMM64_MAP_BASE; if (map_idx < 0 || map_idx >= obj->nr_maps) return; map = &obj->maps[map_idx]; snprintf(patch, sizeof(patch), "%d: \n" "BPF map '%s' is referenced but wasn't created\n", insn_idx, map->name); patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); } static void fixup_log_missing_kfunc_call(struct bpf_program *prog, char *buf, size_t buf_sz, size_t log_sz, char *line1, char *line2, char *line3) { /* Expected log for failed and not properly guarded kfunc call: * line1 -> 123: (85) call unknown#2002000345 * line2 -> invalid func unknown#2002000345 * line3 -> * * "123" is the index of the instruction that was poisoned. * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name. */ struct bpf_object *obj = prog->obj; const struct extern_desc *ext; int insn_idx, ext_idx; char patch[128]; if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2) return; ext_idx -= POISON_CALL_KFUNC_BASE; if (ext_idx < 0 || ext_idx >= obj->nr_extern) return; ext = &obj->externs[ext_idx]; snprintf(patch, sizeof(patch), "%d: \n" "kfunc '%s' is referenced but wasn't resolved\n", insn_idx, ext->name); patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); } static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz) { /* look for familiar error patterns in last N lines of the log */ const size_t max_last_line_cnt = 10; char *prev_line, *cur_line, *next_line; size_t log_sz; int i; if (!buf) return; log_sz = strlen(buf) + 1; next_line = buf + log_sz - 1; for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) { cur_line = find_prev_line(buf, next_line); if (!cur_line) return; if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) { prev_line = find_prev_line(buf, cur_line); if (!prev_line) continue; /* failed CO-RE relocation case */ fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz, prev_line, cur_line, next_line); return; } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) { prev_line = find_prev_line(buf, cur_line); if (!prev_line) continue; /* reference to uncreated BPF map */ fixup_log_missing_map_load(prog, buf, buf_sz, log_sz, prev_line, cur_line, next_line); return; } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) { prev_line = find_prev_line(buf, cur_line); if (!prev_line) continue; /* reference to unresolved kfunc */ fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz, prev_line, cur_line, next_line); return; } } } static int bpf_program_record_relos(struct bpf_program *prog) { struct bpf_object *obj = prog->obj; int i; for (i = 0; i < prog->nr_reloc; i++) { struct reloc_desc *relo = &prog->reloc_desc[i]; struct extern_desc *ext = &obj->externs[relo->ext_idx]; int kind; switch (relo->type) { case RELO_EXTERN_LD64: if (ext->type != EXT_KSYM) continue; kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ? BTF_KIND_VAR : BTF_KIND_FUNC; bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak, !ext->ksym.type_id, true, kind, relo->insn_idx); break; case RELO_EXTERN_CALL: bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak, false, false, BTF_KIND_FUNC, relo->insn_idx); break; case RELO_CORE: { struct bpf_core_relo cr = { .insn_off = relo->insn_idx * 8, .type_id = relo->core_relo->type_id, .access_str_off = relo->core_relo->access_str_off, .kind = relo->core_relo->kind, }; bpf_gen__record_relo_core(obj->gen_loader, &cr); break; } default: continue; } } return 0; } static int bpf_object__load_progs(struct bpf_object *obj, int log_level) { struct bpf_program *prog; size_t i; int err; for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; err = bpf_object__sanitize_prog(obj, prog); if (err) return err; } for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; if (prog_is_subprog(obj, prog)) continue; if (!prog->autoload) { pr_debug("prog '%s': skipped loading\n", prog->name); continue; } prog->log_level |= log_level; if (obj->gen_loader) bpf_program_record_relos(prog); err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt, obj->license, obj->kern_version, &prog->fd); if (err) { pr_warn("prog '%s': failed to load: %d\n", prog->name, err); return err; } } bpf_object__free_relocs(obj); return 0; } static const struct bpf_sec_def *find_sec_def(const char *sec_name); static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) { struct bpf_program *prog; int err; bpf_object__for_each_program(prog, obj) { prog->sec_def = find_sec_def(prog->sec_name); if (!prog->sec_def) { /* couldn't guess, but user might manually specify */ pr_debug("prog '%s': unrecognized ELF section name '%s'\n", prog->name, prog->sec_name); continue; } prog->type = prog->sec_def->prog_type; prog->expected_attach_type = prog->sec_def->expected_attach_type; /* sec_def can have custom callback which should be called * after bpf_program is initialized to adjust its properties */ if (prog->sec_def->prog_setup_fn) { err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie); if (err < 0) { pr_warn("prog '%s': failed to initialize: %d\n", prog->name, err); return err; } } } return 0; } static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, const char *obj_name, const struct bpf_object_open_opts *opts) { const char *kconfig, *btf_tmp_path, *token_path; struct bpf_object *obj; int err; char *log_buf; size_t log_size; __u32 log_level; if (obj_buf && !obj_name) return ERR_PTR(-EINVAL); if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn("failed to init libelf for %s\n", path ? : "(mem buf)"); return ERR_PTR(-LIBBPF_ERRNO__LIBELF); } if (!OPTS_VALID(opts, bpf_object_open_opts)) return ERR_PTR(-EINVAL); obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name; if (obj_buf) { path = obj_name; pr_debug("loading object '%s' from buffer\n", obj_name); } else { pr_debug("loading object from %s\n", path); } log_buf = OPTS_GET(opts, kernel_log_buf, NULL); log_size = OPTS_GET(opts, kernel_log_size, 0); log_level = OPTS_GET(opts, kernel_log_level, 0); if (log_size > UINT_MAX) return ERR_PTR(-EINVAL); if (log_size && !log_buf) return ERR_PTR(-EINVAL); token_path = OPTS_GET(opts, bpf_token_path, NULL); /* if user didn't specify bpf_token_path explicitly, check if * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path * option */ if (!token_path) token_path = getenv("LIBBPF_BPF_TOKEN_PATH"); if (token_path && strlen(token_path) >= PATH_MAX) return ERR_PTR(-ENAMETOOLONG); obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); if (IS_ERR(obj)) return obj; obj->log_buf = log_buf; obj->log_size = log_size; obj->log_level = log_level; if (token_path) { obj->token_path = strdup(token_path); if (!obj->token_path) { err = -ENOMEM; goto out; } } btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); if (btf_tmp_path) { if (strlen(btf_tmp_path) >= PATH_MAX) { err = -ENAMETOOLONG; goto out; } obj->btf_custom_path = strdup(btf_tmp_path); if (!obj->btf_custom_path) { err = -ENOMEM; goto out; } } kconfig = OPTS_GET(opts, kconfig, NULL); if (kconfig) { obj->kconfig = strdup(kconfig); if (!obj->kconfig) { err = -ENOMEM; goto out; } } err = bpf_object__elf_init(obj); err = err ? : bpf_object__elf_collect(obj); err = err ? : bpf_object__collect_externs(obj); err = err ? : bpf_object_fixup_btf(obj); err = err ? : bpf_object__init_maps(obj, opts); err = err ? : bpf_object_init_progs(obj, opts); err = err ? : bpf_object__collect_relos(obj); if (err) goto out; bpf_object__elf_finish(obj); return obj; out: bpf_object__close(obj); return ERR_PTR(err); } struct bpf_object * bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) { if (!path) return libbpf_err_ptr(-EINVAL); return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts)); } struct bpf_object *bpf_object__open(const char *path) { return bpf_object__open_file(path, NULL); } struct bpf_object * bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts) { char tmp_name[64]; if (!obj_buf || obj_buf_sz == 0) return libbpf_err_ptr(-EINVAL); /* create a (quite useless) default "name" for this memory buffer object */ snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz); return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts)); } static int bpf_object_unload(struct bpf_object *obj) { size_t i; if (!obj) return libbpf_err(-EINVAL); for (i = 0; i < obj->nr_maps; i++) { zclose(obj->maps[i].fd); if (obj->maps[i].st_ops) zfree(&obj->maps[i].st_ops->kern_vdata); } for (i = 0; i < obj->nr_programs; i++) bpf_program__unload(&obj->programs[i]); return 0; } static int bpf_object__sanitize_maps(struct bpf_object *obj) { struct bpf_map *m; bpf_object__for_each_map(m, obj) { if (!bpf_map__is_internal(m)) continue; if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) m->def.map_flags &= ~BPF_F_MMAPABLE; } return 0; } typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type, const char *sym_name, void *ctx); static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx) { char sym_type, sym_name[500]; unsigned long long sym_addr; int ret, err = 0; FILE *f; f = fopen("/proc/kallsyms", "re"); if (!f) { err = -errno; pr_warn("failed to open /proc/kallsyms: %d\n", err); return err; } while (true) { ret = fscanf(f, "%llx %c %499s%*[^\n]\n", &sym_addr, &sym_type, sym_name); if (ret == EOF && feof(f)) break; if (ret != 3) { pr_warn("failed to read kallsyms entry: %d\n", ret); err = -EINVAL; break; } err = cb(sym_addr, sym_type, sym_name, ctx); if (err) break; } fclose(f); return err; } static int kallsyms_cb(unsigned long long sym_addr, char sym_type, const char *sym_name, void *ctx) { struct bpf_object *obj = ctx; const struct btf_type *t; struct extern_desc *ext; char *res; res = strstr(sym_name, ".llvm."); if (sym_type == 'd' && res) ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name); else ext = find_extern_by_name(obj, sym_name); if (!ext || ext->type != EXT_KSYM) return 0; t = btf__type_by_id(obj->btf, ext->btf_id); if (!btf_is_var(t)) return 0; if (ext->is_set && ext->ksym.addr != sym_addr) { pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n", sym_name, ext->ksym.addr, sym_addr); return -EINVAL; } if (!ext->is_set) { ext->is_set = true; ext->ksym.addr = sym_addr; pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr); } return 0; } static int bpf_object__read_kallsyms_file(struct bpf_object *obj) { return libbpf_kallsyms_parse(kallsyms_cb, obj); } static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, __u16 kind, struct btf **res_btf, struct module_btf **res_mod_btf) { struct module_btf *mod_btf; struct btf *btf; int i, id, err; btf = obj->btf_vmlinux; mod_btf = NULL; id = btf__find_by_name_kind(btf, ksym_name, kind); if (id == -ENOENT) { err = load_module_btfs(obj); if (err) return err; for (i = 0; i < obj->btf_module_cnt; i++) { /* we assume module_btf's BTF FD is always >0 */ mod_btf = &obj->btf_modules[i]; btf = mod_btf->btf; id = btf__find_by_name_kind_own(btf, ksym_name, kind); if (id != -ENOENT) break; } } if (id <= 0) return -ESRCH; *res_btf = btf; *res_mod_btf = mod_btf; return id; } static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, struct extern_desc *ext) { const struct btf_type *targ_var, *targ_type; __u32 targ_type_id, local_type_id; struct module_btf *mod_btf = NULL; const char *targ_var_name; struct btf *btf = NULL; int id, err; id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf); if (id < 0) { if (id == -ESRCH && ext->is_weak) return 0; pr_warn("extern (var ksym) '%s': not found in kernel BTF\n", ext->name); return id; } /* find local type_id */ local_type_id = ext->ksym.type_id; /* find target type_id */ targ_var = btf__type_by_id(btf, id); targ_var_name = btf__name_by_offset(btf, targ_var->name_off); targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id); err = bpf_core_types_are_compat(obj->btf, local_type_id, btf, targ_type_id); if (err <= 0) { const struct btf_type *local_type; const char *targ_name, *local_name; local_type = btf__type_by_id(obj->btf, local_type_id); local_name = btf__name_by_offset(obj->btf, local_type->name_off); targ_name = btf__name_by_offset(btf, targ_type->name_off); pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n", ext->name, local_type_id, btf_kind_str(local_type), local_name, targ_type_id, btf_kind_str(targ_type), targ_name); return -EINVAL; } ext->is_set = true; ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; ext->ksym.kernel_btf_id = id; pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n", ext->name, id, btf_kind_str(targ_var), targ_var_name); return 0; } static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, struct extern_desc *ext) { int local_func_proto_id, kfunc_proto_id, kfunc_id; struct module_btf *mod_btf = NULL; const struct btf_type *kern_func; struct btf *kern_btf = NULL; int ret; local_func_proto_id = ext->ksym.type_id; kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf); if (kfunc_id < 0) { if (kfunc_id == -ESRCH && ext->is_weak) return 0; pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n", ext->name); return kfunc_id; } kern_func = btf__type_by_id(kern_btf, kfunc_id); kfunc_proto_id = kern_func->type; ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, kern_btf, kfunc_proto_id); if (ret <= 0) { if (ext->is_weak) return 0; pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n", ext->name, local_func_proto_id, mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id); return -EINVAL; } /* set index for module BTF fd in fd_array, if unset */ if (mod_btf && !mod_btf->fd_array_idx) { /* insn->off is s16 */ if (obj->fd_array_cnt == INT16_MAX) { pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n", ext->name, mod_btf->fd_array_idx); return -E2BIG; } /* Cannot use index 0 for module BTF fd */ if (!obj->fd_array_cnt) obj->fd_array_cnt = 1; ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int), obj->fd_array_cnt + 1); if (ret) return ret; mod_btf->fd_array_idx = obj->fd_array_cnt; /* we assume module BTF FD is always >0 */ obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd; } ext->is_set = true; ext->ksym.kernel_btf_id = kfunc_id; ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0; /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data() * populates FD into ld_imm64 insn when it's used to point to kfunc. * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call. * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64. */ ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n", ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id); return 0; } static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) { const struct btf_type *t; struct extern_desc *ext; int i, err; for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; if (ext->type != EXT_KSYM || !ext->ksym.type_id) continue; if (obj->gen_loader) { ext->is_set = true; ext->ksym.kernel_btf_obj_fd = 0; ext->ksym.kernel_btf_id = 0; continue; } t = btf__type_by_id(obj->btf, ext->btf_id); if (btf_is_var(t)) err = bpf_object__resolve_ksym_var_btf_id(obj, ext); else err = bpf_object__resolve_ksym_func_btf_id(obj, ext); if (err) return err; } return 0; } static int bpf_object__resolve_externs(struct bpf_object *obj, const char *extra_kconfig) { bool need_config = false, need_kallsyms = false; bool need_vmlinux_btf = false; struct extern_desc *ext; void *kcfg_data = NULL; int err, i; if (obj->nr_extern == 0) return 0; if (obj->kconfig_map_idx >= 0) kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; if (ext->type == EXT_KSYM) { if (ext->ksym.type_id) need_vmlinux_btf = true; else need_kallsyms = true; continue; } else if (ext->type == EXT_KCFG) { void *ext_ptr = kcfg_data + ext->kcfg.data_off; __u64 value = 0; /* Kconfig externs need actual /proc/config.gz */ if (str_has_pfx(ext->name, "CONFIG_")) { need_config = true; continue; } /* Virtual kcfg externs are customly handled by libbpf */ if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { value = get_kernel_version(); if (!value) { pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name); return -EINVAL; } } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) { value = kernel_supports(obj, FEAT_BPF_COOKIE); } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) { value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER); } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) { /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed * __kconfig externs, where LINUX_ ones are virtual and filled out * customly by libbpf (their values don't come from Kconfig). * If LINUX_xxx variable is not recognized by libbpf, but is marked * __weak, it defaults to zero value, just like for CONFIG_xxx * externs. */ pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name); return -EINVAL; } err = set_kcfg_value_num(ext, ext_ptr, value); if (err) return err; pr_debug("extern (kcfg) '%s': set to 0x%llx\n", ext->name, (long long)value); } else { pr_warn("extern '%s': unrecognized extern kind\n", ext->name); return -EINVAL; } } if (need_config && extra_kconfig) { err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); if (err) return -EINVAL; need_config = false; for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; if (ext->type == EXT_KCFG && !ext->is_set) { need_config = true; break; } } } if (need_config) { err = bpf_object__read_kconfig_file(obj, kcfg_data); if (err) return -EINVAL; } if (need_kallsyms) { err = bpf_object__read_kallsyms_file(obj); if (err) return -EINVAL; } if (need_vmlinux_btf) { err = bpf_object__resolve_ksyms_btf_id(obj); if (err) return -EINVAL; } for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; if (!ext->is_set && !ext->is_weak) { pr_warn("extern '%s' (strong): not resolved\n", ext->name); return -ESRCH; } else if (!ext->is_set) { pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n", ext->name); } } return 0; } static void bpf_map_prepare_vdata(const struct bpf_map *map) { const struct btf_type *type; struct bpf_struct_ops *st_ops; __u32 i; st_ops = map->st_ops; type = btf__type_by_id(map->obj->btf, st_ops->type_id); for (i = 0; i < btf_vlen(type); i++) { struct bpf_program *prog = st_ops->progs[i]; void *kern_data; int prog_fd; if (!prog) continue; prog_fd = bpf_program__fd(prog); kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; *(unsigned long *)kern_data = prog_fd; } } static int bpf_object_prepare_struct_ops(struct bpf_object *obj) { struct bpf_map *map; int i; for (i = 0; i < obj->nr_maps; i++) { map = &obj->maps[i]; if (!bpf_map__is_struct_ops(map)) continue; if (!map->autocreate) continue; bpf_map_prepare_vdata(map); } return 0; } static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) { int err, i; if (!obj) return libbpf_err(-EINVAL); if (obj->loaded) { pr_warn("object '%s': load can't be attempted twice\n", obj->name); return libbpf_err(-EINVAL); } /* Disallow kernel loading programs of non-native endianness but * permit cross-endian creation of "light skeleton". */ if (obj->gen_loader) { bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps); } else if (!is_native_endianness(obj)) { pr_warn("object '%s': loading non-native endianness is unsupported\n", obj->name); return libbpf_err(-LIBBPF_ERRNO__ENDIAN); } err = bpf_object_prepare_token(obj); err = err ? : bpf_object__probe_loading(obj); err = err ? : bpf_object__load_vmlinux_btf(obj, false); err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); err = err ? : bpf_object__sanitize_maps(obj); err = err ? : bpf_object__init_kern_struct_ops_maps(obj); err = err ? : bpf_object_adjust_struct_ops_autoload(obj); err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); err = err ? : bpf_object__sanitize_and_load_btf(obj); err = err ? : bpf_object__create_maps(obj); err = err ? : bpf_object__load_progs(obj, extra_log_level); err = err ? : bpf_object_init_prog_arrays(obj); err = err ? : bpf_object_prepare_struct_ops(obj); if (obj->gen_loader) { /* reset FDs */ if (obj->btf) btf__set_fd(obj->btf, -1); if (!err) err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); } /* clean up fd_array */ zfree(&obj->fd_array); /* clean up module BTFs */ for (i = 0; i < obj->btf_module_cnt; i++) { close(obj->btf_modules[i].fd); btf__free(obj->btf_modules[i].btf); free(obj->btf_modules[i].name); } free(obj->btf_modules); /* clean up vmlinux BTF */ btf__free(obj->btf_vmlinux); obj->btf_vmlinux = NULL; obj->loaded = true; /* doesn't matter if successfully or not */ if (err) goto out; return 0; out: /* unpin any maps that were auto-pinned during load */ for (i = 0; i < obj->nr_maps; i++) if (obj->maps[i].pinned && !obj->maps[i].reused) bpf_map__unpin(&obj->maps[i], NULL); bpf_object_unload(obj); pr_warn("failed to load object '%s'\n", obj->path); return libbpf_err(err); } int bpf_object__load(struct bpf_object *obj) { return bpf_object_load(obj, 0, NULL); } static int make_parent_dir(const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; char *dname, *dir; int err = 0; dname = strdup(path); if (dname == NULL) return -ENOMEM; dir = dirname(dname); if (mkdir(dir, 0700) && errno != EEXIST) err = -errno; free(dname); if (err) { cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); pr_warn("failed to mkdir %s: %s\n", path, cp); } return err; } static int check_path(const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; struct statfs st_fs; char *dname, *dir; int err = 0; if (path == NULL) return -EINVAL; dname = strdup(path); if (dname == NULL) return -ENOMEM; dir = dirname(dname); if (statfs(dir, &st_fs)) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warn("failed to statfs %s: %s\n", dir, cp); err = -errno; } free(dname); if (!err && st_fs.f_type != BPF_FS_MAGIC) { pr_warn("specified path %s is not on BPF FS\n", path); err = -EINVAL; } return err; } int bpf_program__pin(struct bpf_program *prog, const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; int err; if (prog->fd < 0) { pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name); return libbpf_err(-EINVAL); } err = make_parent_dir(path); if (err) return libbpf_err(err); err = check_path(path); if (err) return libbpf_err(err); if (bpf_obj_pin(prog->fd, path)) { err = -errno; cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp); return libbpf_err(err); } pr_debug("prog '%s': pinned at '%s'\n", prog->name, path); return 0; } int bpf_program__unpin(struct bpf_program *prog, const char *path) { int err; if (prog->fd < 0) { pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name); return libbpf_err(-EINVAL); } err = check_path(path); if (err) return libbpf_err(err); err = unlink(path); if (err) return libbpf_err(-errno); pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path); return 0; } int bpf_map__pin(struct bpf_map *map, const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; int err; if (map == NULL) { pr_warn("invalid map pointer\n"); return libbpf_err(-EINVAL); } if (map->fd < 0) { pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name); return libbpf_err(-EINVAL); } if (map->pin_path) { if (path && strcmp(path, map->pin_path)) { pr_warn("map '%s' already has pin path '%s' different from '%s'\n", bpf_map__name(map), map->pin_path, path); return libbpf_err(-EINVAL); } else if (map->pinned) { pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", bpf_map__name(map), map->pin_path); return 0; } } else { if (!path) { pr_warn("missing a path to pin map '%s' at\n", bpf_map__name(map)); return libbpf_err(-EINVAL); } else if (map->pinned) { pr_warn("map '%s' already pinned\n", bpf_map__name(map)); return libbpf_err(-EEXIST); } map->pin_path = strdup(path); if (!map->pin_path) { err = -errno; goto out_err; } } err = make_parent_dir(map->pin_path); if (err) return libbpf_err(err); err = check_path(map->pin_path); if (err) return libbpf_err(err); if (bpf_obj_pin(map->fd, map->pin_path)) { err = -errno; goto out_err; } map->pinned = true; pr_debug("pinned map '%s'\n", map->pin_path); return 0; out_err: cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); pr_warn("failed to pin map: %s\n", cp); return libbpf_err(err); } int bpf_map__unpin(struct bpf_map *map, const char *path) { int err; if (map == NULL) { pr_warn("invalid map pointer\n"); return libbpf_err(-EINVAL); } if (map->pin_path) { if (path && strcmp(path, map->pin_path)) { pr_warn("map '%s' already has pin path '%s' different from '%s'\n", bpf_map__name(map), map->pin_path, path); return libbpf_err(-EINVAL); } path = map->pin_path; } else if (!path) { pr_warn("no path to unpin map '%s' from\n", bpf_map__name(map)); return libbpf_err(-EINVAL); } err = check_path(path); if (err) return libbpf_err(err); err = unlink(path); if (err != 0) return libbpf_err(-errno); map->pinned = false; pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); return 0; } int bpf_map__set_pin_path(struct bpf_map *map, const char *path) { char *new = NULL; if (path) { new = strdup(path); if (!new) return libbpf_err(-errno); } free(map->pin_path); map->pin_path = new; return 0; } __alias(bpf_map__pin_path) const char *bpf_map__get_pin_path(const struct bpf_map *map); const char *bpf_map__pin_path(const struct bpf_map *map) { return map->pin_path; } bool bpf_map__is_pinned(const struct bpf_map *map) { return map->pinned; } static void sanitize_pin_path(char *s) { /* bpffs disallows periods in path names */ while (*s) { if (*s == '.') *s = '_'; s++; } } int bpf_object__pin_maps(struct bpf_object *obj, const char *path) { struct bpf_map *map; int err; if (!obj) return libbpf_err(-ENOENT); if (!obj->loaded) { pr_warn("object not yet loaded; load it first\n"); return libbpf_err(-ENOENT); } bpf_object__for_each_map(map, obj) { char *pin_path = NULL; char buf[PATH_MAX]; if (!map->autocreate) continue; if (path) { err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); if (err) goto err_unpin_maps; sanitize_pin_path(buf); pin_path = buf; } else if (!map->pin_path) { continue; } err = bpf_map__pin(map, pin_path); if (err) goto err_unpin_maps; } return 0; err_unpin_maps: while ((map = bpf_object__prev_map(obj, map))) { if (!map->pin_path) continue; bpf_map__unpin(map, NULL); } return libbpf_err(err); } int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) { struct bpf_map *map; int err; if (!obj) return libbpf_err(-ENOENT); bpf_object__for_each_map(map, obj) { char *pin_path = NULL; char buf[PATH_MAX]; if (path) { err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); if (err) return libbpf_err(err); sanitize_pin_path(buf); pin_path = buf; } else if (!map->pin_path) { continue; } err = bpf_map__unpin(map, pin_path); if (err) return libbpf_err(err); } return 0; } int bpf_object__pin_programs(struct bpf_object *obj, const char *path) { struct bpf_program *prog; char buf[PATH_MAX]; int err; if (!obj) return libbpf_err(-ENOENT); if (!obj->loaded) { pr_warn("object not yet loaded; load it first\n"); return libbpf_err(-ENOENT); } bpf_object__for_each_program(prog, obj) { err = pathname_concat(buf, sizeof(buf), path, prog->name); if (err) goto err_unpin_programs; err = bpf_program__pin(prog, buf); if (err) goto err_unpin_programs; } return 0; err_unpin_programs: while ((prog = bpf_object__prev_program(obj, prog))) { if (pathname_concat(buf, sizeof(buf), path, prog->name)) continue; bpf_program__unpin(prog, buf); } return libbpf_err(err); } int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) { struct bpf_program *prog; int err; if (!obj) return libbpf_err(-ENOENT); bpf_object__for_each_program(prog, obj) { char buf[PATH_MAX]; err = pathname_concat(buf, sizeof(buf), path, prog->name); if (err) return libbpf_err(err); err = bpf_program__unpin(prog, buf); if (err) return libbpf_err(err); } return 0; } int bpf_object__pin(struct bpf_object *obj, const char *path) { int err; err = bpf_object__pin_maps(obj, path); if (err) return libbpf_err(err); err = bpf_object__pin_programs(obj, path); if (err) { bpf_object__unpin_maps(obj, path); return libbpf_err(err); } return 0; } int bpf_object__unpin(struct bpf_object *obj, const char *path) { int err; err = bpf_object__unpin_programs(obj, path); if (err) return libbpf_err(err); err = bpf_object__unpin_maps(obj, path); if (err) return libbpf_err(err); return 0; } static void bpf_map__destroy(struct bpf_map *map) { if (map->inner_map) { bpf_map__destroy(map->inner_map); zfree(&map->inner_map); } zfree(&map->init_slots); map->init_slots_sz = 0; if (map->mmaped && map->mmaped != map->obj->arena_data) munmap(map->mmaped, bpf_map_mmap_sz(map)); map->mmaped = NULL; if (map->st_ops) { zfree(&map->st_ops->data); zfree(&map->st_ops->progs); zfree(&map->st_ops->kern_func_off); zfree(&map->st_ops); } zfree(&map->name); zfree(&map->real_name); zfree(&map->pin_path); if (map->fd >= 0) zclose(map->fd); } void bpf_object__close(struct bpf_object *obj) { size_t i; if (IS_ERR_OR_NULL(obj)) return; usdt_manager_free(obj->usdt_man); obj->usdt_man = NULL; bpf_gen__free(obj->gen_loader); bpf_object__elf_finish(obj); bpf_object_unload(obj); btf__free(obj->btf); btf__free(obj->btf_vmlinux); btf_ext__free(obj->btf_ext); for (i = 0; i < obj->nr_maps; i++) bpf_map__destroy(&obj->maps[i]); zfree(&obj->btf_custom_path); zfree(&obj->kconfig); for (i = 0; i < obj->nr_extern; i++) zfree(&obj->externs[i].essent_name); zfree(&obj->externs); obj->nr_extern = 0; zfree(&obj->maps); obj->nr_maps = 0; if (obj->programs && obj->nr_programs) { for (i = 0; i < obj->nr_programs; i++) bpf_program__exit(&obj->programs[i]); } zfree(&obj->programs); zfree(&obj->feat_cache); zfree(&obj->token_path); if (obj->token_fd > 0) close(obj->token_fd); zfree(&obj->arena_data); free(obj); } const char *bpf_object__name(const struct bpf_object *obj) { return obj ? obj->name : libbpf_err_ptr(-EINVAL); } unsigned int bpf_object__kversion(const struct bpf_object *obj) { return obj ? obj->kern_version : 0; } int bpf_object__token_fd(const struct bpf_object *obj) { return obj->token_fd ?: -1; } struct btf *bpf_object__btf(const struct bpf_object *obj) { return obj ? obj->btf : NULL; } int bpf_object__btf_fd(const struct bpf_object *obj) { return obj->btf ? btf__fd(obj->btf) : -1; } int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) { if (obj->loaded) return libbpf_err(-EINVAL); obj->kern_version = kern_version; return 0; } int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) { struct bpf_gen *gen; if (!opts) return -EFAULT; if (!OPTS_VALID(opts, gen_loader_opts)) return -EINVAL; gen = calloc(sizeof(*gen), 1); if (!gen) return -ENOMEM; gen->opts = opts; gen->swapped_endian = !is_native_endianness(obj); obj->gen_loader = gen; return 0; } static struct bpf_program * __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, bool forward) { size_t nr_programs = obj->nr_programs; ssize_t idx; if (!nr_programs) return NULL; if (!p) /* Iter from the beginning */ return forward ? &obj->programs[0] : &obj->programs[nr_programs - 1]; if (p->obj != obj) { pr_warn("error: program handler doesn't match object\n"); return errno = EINVAL, NULL; } idx = (p - obj->programs) + (forward ? 1 : -1); if (idx >= obj->nr_programs || idx < 0) return NULL; return &obj->programs[idx]; } struct bpf_program * bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev) { struct bpf_program *prog = prev; do { prog = __bpf_program__iter(prog, obj, true); } while (prog && prog_is_subprog(obj, prog)); return prog; } struct bpf_program * bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next) { struct bpf_program *prog = next; do { prog = __bpf_program__iter(prog, obj, false); } while (prog && prog_is_subprog(obj, prog)); return prog; } void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) { prog->prog_ifindex = ifindex; } const char *bpf_program__name(const struct bpf_program *prog) { return prog->name; } const char *bpf_program__section_name(const struct bpf_program *prog) { return prog->sec_name; } bool bpf_program__autoload(const struct bpf_program *prog) { return prog->autoload; } int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) { if (prog->obj->loaded) return libbpf_err(-EINVAL); prog->autoload = autoload; return 0; } bool bpf_program__autoattach(const struct bpf_program *prog) { return prog->autoattach; } void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach) { prog->autoattach = autoattach; } const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog) { return prog->insns; } size_t bpf_program__insn_cnt(const struct bpf_program *prog) { return prog->insns_cnt; } int bpf_program__set_insns(struct bpf_program *prog, struct bpf_insn *new_insns, size_t new_insn_cnt) { struct bpf_insn *insns; if (prog->obj->loaded) return -EBUSY; insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); /* NULL is a valid return from reallocarray if the new count is zero */ if (!insns && new_insn_cnt) { pr_warn("prog '%s': failed to realloc prog code\n", prog->name); return -ENOMEM; } memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns)); prog->insns = insns; prog->insns_cnt = new_insn_cnt; return 0; } int bpf_program__fd(const struct bpf_program *prog) { if (!prog) return libbpf_err(-EINVAL); if (prog->fd < 0) return libbpf_err(-ENOENT); return prog->fd; } __alias(bpf_program__type) enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) { return prog->type; } static size_t custom_sec_def_cnt; static struct bpf_sec_def *custom_sec_defs; static struct bpf_sec_def custom_fallback_def; static bool has_custom_fallback_def; static int last_custom_sec_def_handler_id; int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) { if (prog->obj->loaded) return libbpf_err(-EBUSY); /* if type is not changed, do nothing */ if (prog->type == type) return 0; prog->type = type; /* If a program type was changed, we need to reset associated SEC() * handler, as it will be invalid now. The only exception is a generic * fallback handler, which by definition is program type-agnostic and * is a catch-all custom handler, optionally set by the application, * so should be able to handle any type of BPF program. */ if (prog->sec_def != &custom_fallback_def) prog->sec_def = NULL; return 0; } __alias(bpf_program__expected_attach_type) enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog) { return prog->expected_attach_type; } int bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type) { if (prog->obj->loaded) return libbpf_err(-EBUSY); prog->expected_attach_type = type; return 0; } __u32 bpf_program__flags(const struct bpf_program *prog) { return prog->prog_flags; } int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) { if (prog->obj->loaded) return libbpf_err(-EBUSY); prog->prog_flags = flags; return 0; } __u32 bpf_program__log_level(const struct bpf_program *prog) { return prog->log_level; } int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) { if (prog->obj->loaded) return libbpf_err(-EBUSY); prog->log_level = log_level; return 0; } const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size) { *log_size = prog->log_size; return prog->log_buf; } int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) { if (log_size && !log_buf) return -EINVAL; if (prog->log_size > UINT_MAX) return -EINVAL; if (prog->obj->loaded) return -EBUSY; prog->log_buf = log_buf; prog->log_size = log_size; return 0; } #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ .sec = (char *)sec_pfx, \ .prog_type = BPF_PROG_TYPE_##ptype, \ .expected_attach_type = atype, \ .cookie = (long)(flags), \ .prog_prepare_load_fn = libbpf_prepare_prog_load, \ __VA_ARGS__ \ } static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); static const struct bpf_sec_def section_defs[] = { SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE), SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE), SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE), SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session), SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt), SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt), SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */ SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */ SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */ SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE), SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE), SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp), SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp), SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace), SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace), SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace), SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace), SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace), SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF), SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter), SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS), SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS), SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT), SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE), SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE), SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE), SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE), SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE), SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT), SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT), SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT), SEC_DEF("sk_skb/verdict", SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT), SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE), SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT), SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT), SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT), SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT), SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT), SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE), SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE), SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE), SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT), SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE), SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE), SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE), SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE), SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE), SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE), SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE), SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE), SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE), SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE), SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE), SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE), SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE), SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE), SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE), SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE), SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE), SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE), SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE), SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE), SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE), SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE), SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT), SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE), SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE), SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE), }; int libbpf_register_prog_handler(const char *sec, enum bpf_prog_type prog_type, enum bpf_attach_type exp_attach_type, const struct libbpf_prog_handler_opts *opts) { struct bpf_sec_def *sec_def; if (!OPTS_VALID(opts, libbpf_prog_handler_opts)) return libbpf_err(-EINVAL); if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */ return libbpf_err(-E2BIG); if (sec) { sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1, sizeof(*sec_def)); if (!sec_def) return libbpf_err(-ENOMEM); custom_sec_defs = sec_def; sec_def = &custom_sec_defs[custom_sec_def_cnt]; } else { if (has_custom_fallback_def) return libbpf_err(-EBUSY); sec_def = &custom_fallback_def; } sec_def->sec = sec ? strdup(sec) : NULL; if (sec && !sec_def->sec) return libbpf_err(-ENOMEM); sec_def->prog_type = prog_type; sec_def->expected_attach_type = exp_attach_type; sec_def->cookie = OPTS_GET(opts, cookie, 0); sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL); sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL); sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL); sec_def->handler_id = ++last_custom_sec_def_handler_id; if (sec) custom_sec_def_cnt++; else has_custom_fallback_def = true; return sec_def->handler_id; } int libbpf_unregister_prog_handler(int handler_id) { struct bpf_sec_def *sec_defs; int i; if (handler_id <= 0) return libbpf_err(-EINVAL); if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) { memset(&custom_fallback_def, 0, sizeof(custom_fallback_def)); has_custom_fallback_def = false; return 0; } for (i = 0; i < custom_sec_def_cnt; i++) { if (custom_sec_defs[i].handler_id == handler_id) break; } if (i == custom_sec_def_cnt) return libbpf_err(-ENOENT); free(custom_sec_defs[i].sec); for (i = i + 1; i < custom_sec_def_cnt; i++) custom_sec_defs[i - 1] = custom_sec_defs[i]; custom_sec_def_cnt--; /* try to shrink the array, but it's ok if we couldn't */ sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); /* if new count is zero, reallocarray can return a valid NULL result; * in this case the previous pointer will be freed, so we *have to* * reassign old pointer to the new value (even if it's NULL) */ if (sec_defs || custom_sec_def_cnt == 0) custom_sec_defs = sec_defs; return 0; } static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name) { size_t len = strlen(sec_def->sec); /* "type/" always has to have proper SEC("type/extras") form */ if (sec_def->sec[len - 1] == '/') { if (str_has_pfx(sec_name, sec_def->sec)) return true; return false; } /* "type+" means it can be either exact SEC("type") or * well-formed SEC("type/extras") with proper '/' separator */ if (sec_def->sec[len - 1] == '+') { len--; /* not even a prefix */ if (strncmp(sec_name, sec_def->sec, len) != 0) return false; /* exact match or has '/' separator */ if (sec_name[len] == '\0' || sec_name[len] == '/') return true; return false; } return strcmp(sec_name, sec_def->sec) == 0; } static const struct bpf_sec_def *find_sec_def(const char *sec_name) { const struct bpf_sec_def *sec_def; int i, n; n = custom_sec_def_cnt; for (i = 0; i < n; i++) { sec_def = &custom_sec_defs[i]; if (sec_def_matches(sec_def, sec_name)) return sec_def; } n = ARRAY_SIZE(section_defs); for (i = 0; i < n; i++) { sec_def = §ion_defs[i]; if (sec_def_matches(sec_def, sec_name)) return sec_def; } if (has_custom_fallback_def) return &custom_fallback_def; return NULL; } #define MAX_TYPE_NAME_SIZE 32 static char *libbpf_get_type_names(bool attach_type) { int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; char *buf; buf = malloc(len); if (!buf) return NULL; buf[0] = '\0'; /* Forge string buf with all available names */ for (i = 0; i < ARRAY_SIZE(section_defs); i++) { const struct bpf_sec_def *sec_def = §ion_defs[i]; if (attach_type) { if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) continue; if (!(sec_def->cookie & SEC_ATTACHABLE)) continue; } if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { free(buf); return NULL; } strcat(buf, " "); strcat(buf, section_defs[i].sec); } return buf; } int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type) { const struct bpf_sec_def *sec_def; char *type_names; if (!name) return libbpf_err(-EINVAL); sec_def = find_sec_def(name); if (sec_def) { *prog_type = sec_def->prog_type; *expected_attach_type = sec_def->expected_attach_type; return 0; } pr_debug("failed to guess program type from ELF section '%s'\n", name); type_names = libbpf_get_type_names(false); if (type_names != NULL) { pr_debug("supported section(type) names are:%s\n", type_names); free(type_names); } return libbpf_err(-ESRCH); } const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t) { if (t < 0 || t >= ARRAY_SIZE(attach_type_name)) return NULL; return attach_type_name[t]; } const char *libbpf_bpf_link_type_str(enum bpf_link_type t) { if (t < 0 || t >= ARRAY_SIZE(link_type_name)) return NULL; return link_type_name[t]; } const char *libbpf_bpf_map_type_str(enum bpf_map_type t) { if (t < 0 || t >= ARRAY_SIZE(map_type_name)) return NULL; return map_type_name[t]; } const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t) { if (t < 0 || t >= ARRAY_SIZE(prog_type_name)) return NULL; return prog_type_name[t]; } static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, int sec_idx, size_t offset) { struct bpf_map *map; size_t i; for (i = 0; i < obj->nr_maps; i++) { map = &obj->maps[i]; if (!bpf_map__is_struct_ops(map)) continue; if (map->sec_idx == sec_idx && map->sec_offset <= offset && offset - map->sec_offset < map->def.value_size) return map; } return NULL; } /* Collect the reloc from ELF, populate the st_ops->progs[], and update * st_ops->data for shadow type. */ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) { const struct btf_type *type; const struct btf_member *member; struct bpf_struct_ops *st_ops; struct bpf_program *prog; unsigned int shdr_idx; const struct btf *btf; struct bpf_map *map; unsigned int moff, insn_idx; const char *name; __u32 member_idx; Elf64_Sym *sym; Elf64_Rel *rel; int i, nrels; btf = obj->btf; nrels = shdr->sh_size / shdr->sh_entsize; for (i = 0; i < nrels; i++) { rel = elf_rel_by_idx(data, i); if (!rel) { pr_warn("struct_ops reloc: failed to get %d reloc\n", i); return -LIBBPF_ERRNO__FORMAT; } sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); if (!sym) { pr_warn("struct_ops reloc: symbol %zx not found\n", (size_t)ELF64_R_SYM(rel->r_info)); return -LIBBPF_ERRNO__FORMAT; } name = elf_sym_str(obj, sym->st_name) ?: ""; map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset); if (!map) { pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n", (size_t)rel->r_offset); return -EINVAL; } moff = rel->r_offset - map->sec_offset; shdr_idx = sym->st_shndx; st_ops = map->st_ops; pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", map->name, (long long)(rel->r_info >> 32), (long long)sym->st_value, shdr_idx, (size_t)rel->r_offset, map->sec_offset, sym->st_name, name); if (shdr_idx >= SHN_LORESERVE) { pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n", map->name, (size_t)rel->r_offset, shdr_idx); return -LIBBPF_ERRNO__RELOC; } if (sym->st_value % BPF_INSN_SZ) { pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", map->name, (unsigned long long)sym->st_value); return -LIBBPF_ERRNO__FORMAT; } insn_idx = sym->st_value / BPF_INSN_SZ; type = btf__type_by_id(btf, st_ops->type_id); member = find_member_by_offset(type, moff * 8); if (!member) { pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", map->name, moff); return -EINVAL; } member_idx = member - btf_members(type); name = btf__name_by_offset(btf, member->name_off); if (!resolve_func_ptr(btf, member->type, NULL)) { pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n", map->name, name); return -EINVAL; } prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx); if (!prog) { pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n", map->name, shdr_idx, name); return -EINVAL; } /* prevent the use of BPF prog with invalid type */ if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) { pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n", map->name, prog->name); return -EINVAL; } st_ops->progs[member_idx] = prog; /* st_ops->data will be exposed to users, being returned by * bpf_map__initial_value() as a pointer to the shadow * type. All function pointers in the original struct type * should be converted to a pointer to struct bpf_program * in the shadow type. */ *((struct bpf_program **)(st_ops->data + moff)) = prog; } return 0; } #define BTF_TRACE_PREFIX "btf_trace_" #define BTF_LSM_PREFIX "bpf_lsm_" #define BTF_ITER_PREFIX "bpf_iter_" #define BTF_MAX_NAME_SIZE 128 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, const char **prefix, int *kind) { switch (attach_type) { case BPF_TRACE_RAW_TP: *prefix = BTF_TRACE_PREFIX; *kind = BTF_KIND_TYPEDEF; break; case BPF_LSM_MAC: case BPF_LSM_CGROUP: *prefix = BTF_LSM_PREFIX; *kind = BTF_KIND_FUNC; break; case BPF_TRACE_ITER: *prefix = BTF_ITER_PREFIX; *kind = BTF_KIND_FUNC; break; default: *prefix = ""; *kind = BTF_KIND_FUNC; } } static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, const char *name, __u32 kind) { char btf_type_name[BTF_MAX_NAME_SIZE]; int ret; ret = snprintf(btf_type_name, sizeof(btf_type_name), "%s%s", prefix, name); /* snprintf returns the number of characters written excluding the * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it * indicates truncation. */ if (ret < 0 || ret >= sizeof(btf_type_name)) return -ENAMETOOLONG; return btf__find_by_name_kind(btf, btf_type_name, kind); } static inline int find_attach_btf_id(struct btf *btf, const char *name, enum bpf_attach_type attach_type) { const char *prefix; int kind; btf_get_kernel_prefix_kind(attach_type, &prefix, &kind); return find_btf_by_prefix_kind(btf, prefix, name, kind); } int libbpf_find_vmlinux_btf_id(const char *name, enum bpf_attach_type attach_type) { struct btf *btf; int err; btf = btf__load_vmlinux_btf(); err = libbpf_get_error(btf); if (err) { pr_warn("vmlinux BTF is not found\n"); return libbpf_err(err); } err = find_attach_btf_id(btf, name, attach_type); if (err <= 0) pr_warn("%s is not found in vmlinux BTF\n", name); btf__free(btf); return libbpf_err(err); } static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) { struct bpf_prog_info info; __u32 info_len = sizeof(info); struct btf *btf; int err; memset(&info, 0, info_len); err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len); if (err) { pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n", attach_prog_fd, err); return err; } err = -EINVAL; if (!info.btf_id) { pr_warn("The target program doesn't have BTF\n"); goto out; } btf = btf__load_from_kernel_by_id(info.btf_id); err = libbpf_get_error(btf); if (err) { pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err); goto out; } err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); btf__free(btf); if (err <= 0) { pr_warn("%s is not found in prog's BTF\n", name); goto out; } out: return err; } static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, enum bpf_attach_type attach_type, int *btf_obj_fd, int *btf_type_id) { int ret, i, mod_len; const char *fn_name, *mod_name = NULL; fn_name = strchr(attach_name, ':'); if (fn_name) { mod_name = attach_name; mod_len = fn_name - mod_name; fn_name++; } if (!mod_name || strncmp(mod_name, "vmlinux", mod_len) == 0) { ret = find_attach_btf_id(obj->btf_vmlinux, mod_name ? fn_name : attach_name, attach_type); if (ret > 0) { *btf_obj_fd = 0; /* vmlinux BTF */ *btf_type_id = ret; return 0; } if (ret != -ENOENT) return ret; } ret = load_module_btfs(obj); if (ret) return ret; for (i = 0; i < obj->btf_module_cnt; i++) { const struct module_btf *mod = &obj->btf_modules[i]; if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0) continue; ret = find_attach_btf_id(mod->btf, mod_name ? fn_name : attach_name, attach_type); if (ret > 0) { *btf_obj_fd = mod->fd; *btf_type_id = ret; return 0; } if (ret == -ENOENT) continue; return ret; } return -ESRCH; } static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, int *btf_obj_fd, int *btf_type_id) { enum bpf_attach_type attach_type = prog->expected_attach_type; __u32 attach_prog_fd = prog->attach_prog_fd; int err = 0; /* BPF program's BTF ID */ if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) { if (!attach_prog_fd) { pr_warn("prog '%s': attach program FD is not set\n", prog->name); return -EINVAL; } err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); if (err < 0) { pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n", prog->name, attach_prog_fd, attach_name, err); return err; } *btf_obj_fd = 0; *btf_type_id = err; return 0; } /* kernel/module BTF ID */ if (prog->obj->gen_loader) { bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type); *btf_obj_fd = 0; *btf_type_id = 1; } else { err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); } if (err) { pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n", prog->name, attach_name, err); return err; } return 0; } int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type) { char *type_names; const struct bpf_sec_def *sec_def; if (!name) return libbpf_err(-EINVAL); sec_def = find_sec_def(name); if (!sec_def) { pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); type_names = libbpf_get_type_names(true); if (type_names != NULL) { pr_debug("attachable section(type) names are:%s\n", type_names); free(type_names); } return libbpf_err(-EINVAL); } if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) return libbpf_err(-EINVAL); if (!(sec_def->cookie & SEC_ATTACHABLE)) return libbpf_err(-EINVAL); *attach_type = sec_def->expected_attach_type; return 0; } int bpf_map__fd(const struct bpf_map *map) { if (!map) return libbpf_err(-EINVAL); if (!map_is_created(map)) return -1; return map->fd; } static bool map_uses_real_name(const struct bpf_map *map) { /* Since libbpf started to support custom .data.* and .rodata.* maps, * their user-visible name differs from kernel-visible name. Users see * such map's corresponding ELF section name as a map name. * This check distinguishes .data/.rodata from .data.* and .rodata.* * maps to know which name has to be returned to the user. */ if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0) return true; if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0) return true; return false; } const char *bpf_map__name(const struct bpf_map *map) { if (!map) return NULL; if (map_uses_real_name(map)) return map->real_name; return map->name; } enum bpf_map_type bpf_map__type(const struct bpf_map *map) { return map->def.type; } int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) { if (map_is_created(map)) return libbpf_err(-EBUSY); map->def.type = type; return 0; } __u32 bpf_map__map_flags(const struct bpf_map *map) { return map->def.map_flags; } int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) { if (map_is_created(map)) return libbpf_err(-EBUSY); map->def.map_flags = flags; return 0; } __u64 bpf_map__map_extra(const struct bpf_map *map) { return map->map_extra; } int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra) { if (map_is_created(map)) return libbpf_err(-EBUSY); map->map_extra = map_extra; return 0; } __u32 bpf_map__numa_node(const struct bpf_map *map) { return map->numa_node; } int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) { if (map_is_created(map)) return libbpf_err(-EBUSY); map->numa_node = numa_node; return 0; } __u32 bpf_map__key_size(const struct bpf_map *map) { return map->def.key_size; } int bpf_map__set_key_size(struct bpf_map *map, __u32 size) { if (map_is_created(map)) return libbpf_err(-EBUSY); map->def.key_size = size; return 0; } __u32 bpf_map__value_size(const struct bpf_map *map) { return map->def.value_size; } static int map_btf_datasec_resize(struct bpf_map *map, __u32 size) { struct btf *btf; struct btf_type *datasec_type, *var_type; struct btf_var_secinfo *var; const struct btf_type *array_type; const struct btf_array *array; int vlen, element_sz, new_array_id; __u32 nr_elements; /* check btf existence */ btf = bpf_object__btf(map->obj); if (!btf) return -ENOENT; /* verify map is datasec */ datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map)); if (!btf_is_datasec(datasec_type)) { pr_warn("map '%s': cannot be resized, map value type is not a datasec\n", bpf_map__name(map)); return -EINVAL; } /* verify datasec has at least one var */ vlen = btf_vlen(datasec_type); if (vlen == 0) { pr_warn("map '%s': cannot be resized, map value datasec is empty\n", bpf_map__name(map)); return -EINVAL; } /* verify last var in the datasec is an array */ var = &btf_var_secinfos(datasec_type)[vlen - 1]; var_type = btf_type_by_id(btf, var->type); array_type = skip_mods_and_typedefs(btf, var_type->type, NULL); if (!btf_is_array(array_type)) { pr_warn("map '%s': cannot be resized, last var must be an array\n", bpf_map__name(map)); return -EINVAL; } /* verify request size aligns with array */ array = btf_array(array_type); element_sz = btf__resolve_size(btf, array->type); if (element_sz <= 0 || (size - var->offset) % element_sz != 0) { pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n", bpf_map__name(map), element_sz, size); return -EINVAL; } /* create a new array based on the existing array, but with new length */ nr_elements = (size - var->offset) / element_sz; new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements); if (new_array_id < 0) return new_array_id; /* adding a new btf type invalidates existing pointers to btf objects, * so refresh pointers before proceeding */ datasec_type = btf_type_by_id(btf, map->btf_value_type_id); var = &btf_var_secinfos(datasec_type)[vlen - 1]; var_type = btf_type_by_id(btf, var->type); /* finally update btf info */ datasec_type->size = size; var->size = size - var->offset; var_type->type = new_array_id; return 0; } int bpf_map__set_value_size(struct bpf_map *map, __u32 size) { if (map->obj->loaded || map->reused) return libbpf_err(-EBUSY); if (map->mmaped) { size_t mmap_old_sz, mmap_new_sz; int err; if (map->def.type != BPF_MAP_TYPE_ARRAY) return -EOPNOTSUPP; mmap_old_sz = bpf_map_mmap_sz(map); mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries); err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz); if (err) { pr_warn("map '%s': failed to resize memory-mapped region: %d\n", bpf_map__name(map), err); return err; } err = map_btf_datasec_resize(map, size); if (err && err != -ENOENT) { pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n", bpf_map__name(map), err); map->btf_value_type_id = 0; map->btf_key_type_id = 0; } } map->def.value_size = size; return 0; } __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) { return map ? map->btf_key_type_id : 0; } __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) { return map ? map->btf_value_type_id : 0; } int bpf_map__set_initial_value(struct bpf_map *map, const void *data, size_t size) { size_t actual_sz; if (map->obj->loaded || map->reused) return libbpf_err(-EBUSY); if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG) return libbpf_err(-EINVAL); if (map->def.type == BPF_MAP_TYPE_ARENA) actual_sz = map->obj->arena_data_sz; else actual_sz = map->def.value_size; if (size != actual_sz) return libbpf_err(-EINVAL); memcpy(map->mmaped, data, size); return 0; } void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize) { if (bpf_map__is_struct_ops(map)) { if (psize) *psize = map->def.value_size; return map->st_ops->data; } if (!map->mmaped) return NULL; if (map->def.type == BPF_MAP_TYPE_ARENA) *psize = map->obj->arena_data_sz; else *psize = map->def.value_size; return map->mmaped; } bool bpf_map__is_internal(const struct bpf_map *map) { return map->libbpf_type != LIBBPF_MAP_UNSPEC; } __u32 bpf_map__ifindex(const struct bpf_map *map) { return map->map_ifindex; } int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) { if (map_is_created(map)) return libbpf_err(-EBUSY); map->map_ifindex = ifindex; return 0; } int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) { if (!bpf_map_type__is_map_in_map(map->def.type)) { pr_warn("error: unsupported map type\n"); return libbpf_err(-EINVAL); } if (map->inner_map_fd != -1) { pr_warn("error: inner_map_fd already specified\n"); return libbpf_err(-EINVAL); } if (map->inner_map) { bpf_map__destroy(map->inner_map); zfree(&map->inner_map); } map->inner_map_fd = fd; return 0; } static struct bpf_map * __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) { ssize_t idx; struct bpf_map *s, *e; if (!obj || !obj->maps) return errno = EINVAL, NULL; s = obj->maps; e = obj->maps + obj->nr_maps; if ((m < s) || (m >= e)) { pr_warn("error in %s: map handler doesn't belong to object\n", __func__); return errno = EINVAL, NULL; } idx = (m - obj->maps) + i; if (idx >= obj->nr_maps || idx < 0) return NULL; return &obj->maps[idx]; } struct bpf_map * bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) { if (prev == NULL && obj != NULL) return obj->maps; return __bpf_map__iter(prev, obj, 1); } struct bpf_map * bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) { if (next == NULL && obj != NULL) { if (!obj->nr_maps) return NULL; return obj->maps + obj->nr_maps - 1; } return __bpf_map__iter(next, obj, -1); } struct bpf_map * bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) { struct bpf_map *pos; bpf_object__for_each_map(pos, obj) { /* if it's a special internal map name (which always starts * with dot) then check if that special name matches the * real map name (ELF section name) */ if (name[0] == '.') { if (pos->real_name && strcmp(pos->real_name, name) == 0) return pos; continue; } /* otherwise map name has to be an exact match */ if (map_uses_real_name(pos)) { if (strcmp(pos->real_name, name) == 0) return pos; continue; } if (strcmp(pos->name, name) == 0) return pos; } return errno = ENOENT, NULL; } int bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) { return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); } static int validate_map_op(const struct bpf_map *map, size_t key_sz, size_t value_sz, bool check_value_sz) { if (!map_is_created(map)) /* map is not yet created */ return -ENOENT; if (map->def.key_size != key_sz) { pr_warn("map '%s': unexpected key size %zu provided, expected %u\n", map->name, key_sz, map->def.key_size); return -EINVAL; } if (map->fd < 0) { pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); return -EINVAL; } if (!check_value_sz) return 0; switch (map->def.type) { case BPF_MAP_TYPE_PERCPU_ARRAY: case BPF_MAP_TYPE_PERCPU_HASH: case BPF_MAP_TYPE_LRU_PERCPU_HASH: case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: { int num_cpu = libbpf_num_possible_cpus(); size_t elem_sz = roundup(map->def.value_size, 8); if (value_sz != num_cpu * elem_sz) { pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n", map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz); return -EINVAL; } break; } default: if (map->def.value_size != value_sz) { pr_warn("map '%s': unexpected value size %zu provided, expected %u\n", map->name, value_sz, map->def.value_size); return -EINVAL; } break; } return 0; } int bpf_map__lookup_elem(const struct bpf_map *map, const void *key, size_t key_sz, void *value, size_t value_sz, __u64 flags) { int err; err = validate_map_op(map, key_sz, value_sz, true); if (err) return libbpf_err(err); return bpf_map_lookup_elem_flags(map->fd, key, value, flags); } int bpf_map__update_elem(const struct bpf_map *map, const void *key, size_t key_sz, const void *value, size_t value_sz, __u64 flags) { int err; err = validate_map_op(map, key_sz, value_sz, true); if (err) return libbpf_err(err); return bpf_map_update_elem(map->fd, key, value, flags); } int bpf_map__delete_elem(const struct bpf_map *map, const void *key, size_t key_sz, __u64 flags) { int err; err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); if (err) return libbpf_err(err); return bpf_map_delete_elem_flags(map->fd, key, flags); } int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, const void *key, size_t key_sz, void *value, size_t value_sz, __u64 flags) { int err; err = validate_map_op(map, key_sz, value_sz, true); if (err) return libbpf_err(err); return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags); } int bpf_map__get_next_key(const struct bpf_map *map, const void *cur_key, void *next_key, size_t key_sz) { int err; err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); if (err) return libbpf_err(err); return bpf_map_get_next_key(map->fd, cur_key, next_key); } long libbpf_get_error(const void *ptr) { if (!IS_ERR_OR_NULL(ptr)) return 0; if (IS_ERR(ptr)) errno = -PTR_ERR(ptr); /* If ptr == NULL, then errno should be already set by the failing * API, because libbpf never returns NULL on success and it now always * sets errno on error. So no extra errno handling for ptr == NULL * case. */ return -errno; } /* Replace link's underlying BPF program with the new one */ int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) { int ret; int prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n", prog->name); return libbpf_err(-EINVAL); } ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL); return libbpf_err_errno(ret); } /* Release "ownership" of underlying BPF resource (typically, BPF program * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected * link, when destructed through bpf_link__destroy() call won't attempt to * detach/unregisted that BPF resource. This is useful in situations where, * say, attached BPF program has to outlive userspace program that attached it * in the system. Depending on type of BPF program, though, there might be * additional steps (like pinning BPF program in BPF FS) necessary to ensure * exit of userspace program doesn't trigger automatic detachment and clean up * inside the kernel. */ void bpf_link__disconnect(struct bpf_link *link) { link->disconnected = true; } int bpf_link__destroy(struct bpf_link *link) { int err = 0; if (IS_ERR_OR_NULL(link)) return 0; if (!link->disconnected && link->detach) err = link->detach(link); if (link->pin_path) free(link->pin_path); if (link->dealloc) link->dealloc(link); else free(link); return libbpf_err(err); } int bpf_link__fd(const struct bpf_link *link) { return link->fd; } const char *bpf_link__pin_path(const struct bpf_link *link) { return link->pin_path; } static int bpf_link__detach_fd(struct bpf_link *link) { return libbpf_err_errno(close(link->fd)); } struct bpf_link *bpf_link__open(const char *path) { struct bpf_link *link; int fd; fd = bpf_obj_get(path); if (fd < 0) { fd = -errno; pr_warn("failed to open link at %s: %d\n", path, fd); return libbpf_err_ptr(fd); } link = calloc(1, sizeof(*link)); if (!link) { close(fd); return libbpf_err_ptr(-ENOMEM); } link->detach = &bpf_link__detach_fd; link->fd = fd; link->pin_path = strdup(path); if (!link->pin_path) { bpf_link__destroy(link); return libbpf_err_ptr(-ENOMEM); } return link; } int bpf_link__detach(struct bpf_link *link) { return bpf_link_detach(link->fd) ? -errno : 0; } int bpf_link__pin(struct bpf_link *link, const char *path) { int err; if (link->pin_path) return libbpf_err(-EBUSY); err = make_parent_dir(path); if (err) return libbpf_err(err); err = check_path(path); if (err) return libbpf_err(err); link->pin_path = strdup(path); if (!link->pin_path) return libbpf_err(-ENOMEM); if (bpf_obj_pin(link->fd, link->pin_path)) { err = -errno; zfree(&link->pin_path); return libbpf_err(err); } pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); return 0; } int bpf_link__unpin(struct bpf_link *link) { int err; if (!link->pin_path) return libbpf_err(-EINVAL); err = unlink(link->pin_path); if (err != 0) return -errno; pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); zfree(&link->pin_path); return 0; } struct bpf_link_perf { struct bpf_link link; int perf_event_fd; /* legacy kprobe support: keep track of probe identifier and type */ char *legacy_probe_name; bool legacy_is_kprobe; bool legacy_is_retprobe; }; static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe); static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe); static int bpf_link_perf_detach(struct bpf_link *link) { struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); int err = 0; if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0) err = -errno; if (perf_link->perf_event_fd != link->fd) close(perf_link->perf_event_fd); close(link->fd); /* legacy uprobe/kprobe needs to be removed after perf event fd closure */ if (perf_link->legacy_probe_name) { if (perf_link->legacy_is_kprobe) { err = remove_kprobe_event_legacy(perf_link->legacy_probe_name, perf_link->legacy_is_retprobe); } else { err = remove_uprobe_event_legacy(perf_link->legacy_probe_name, perf_link->legacy_is_retprobe); } } return err; } static void bpf_link_perf_dealloc(struct bpf_link *link) { struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); free(perf_link->legacy_probe_name); free(perf_link); } struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, const struct bpf_perf_event_opts *opts) { char errmsg[STRERR_BUFSIZE]; struct bpf_link_perf *link; int prog_fd, link_fd = -1, err; bool force_ioctl_attach; if (!OPTS_VALID(opts, bpf_perf_event_opts)) return libbpf_err_ptr(-EINVAL); if (pfd < 0) { pr_warn("prog '%s': invalid perf event FD %d\n", prog->name, pfd); return libbpf_err_ptr(-EINVAL); } prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", prog->name); return libbpf_err_ptr(-EINVAL); } link = calloc(1, sizeof(*link)); if (!link) return libbpf_err_ptr(-ENOMEM); link->link.detach = &bpf_link_perf_detach; link->link.dealloc = &bpf_link_perf_dealloc; link->perf_event_fd = pfd; force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false); if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) { DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts, .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0)); link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts); if (link_fd < 0) { err = -errno; pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n", prog->name, pfd, err, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto err_out; } link->link.fd = link_fd; } else { if (OPTS_GET(opts, bpf_cookie, 0)) { pr_warn("prog '%s': user context value is not supported\n", prog->name); err = -EOPNOTSUPP; goto err_out; } if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { err = -errno; pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n", prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); if (err == -EPROTO) pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", prog->name, pfd); goto err_out; } link->link.fd = pfd; } if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { err = -errno; pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto err_out; } return &link->link; err_out: if (link_fd >= 0) close(link_fd); free(link); return libbpf_err_ptr(err); } struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd) { return bpf_program__attach_perf_event_opts(prog, pfd, NULL); } /* * this function is expected to parse integer in the range of [0, 2^31-1] from * given file using scanf format string fmt. If actual parsed value is * negative, the result might be indistinguishable from error */ static int parse_uint_from_file(const char *file, const char *fmt) { char buf[STRERR_BUFSIZE]; int err, ret; FILE *f; f = fopen(file, "re"); if (!f) { err = -errno; pr_debug("failed to open '%s': %s\n", file, libbpf_strerror_r(err, buf, sizeof(buf))); return err; } err = fscanf(f, fmt, &ret); if (err != 1) { err = err == EOF ? -EIO : -errno; pr_debug("failed to parse '%s': %s\n", file, libbpf_strerror_r(err, buf, sizeof(buf))); fclose(f); return err; } fclose(f); return ret; } static int determine_kprobe_perf_type(void) { const char *file = "/sys/bus/event_source/devices/kprobe/type"; return parse_uint_from_file(file, "%d\n"); } static int determine_uprobe_perf_type(void) { const char *file = "/sys/bus/event_source/devices/uprobe/type"; return parse_uint_from_file(file, "%d\n"); } static int determine_kprobe_retprobe_bit(void) { const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe"; return parse_uint_from_file(file, "config:%d\n"); } static int determine_uprobe_retprobe_bit(void) { const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; return parse_uint_from_file(file, "config:%d\n"); } #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, uint64_t offset, int pid, size_t ref_ctr_off) { const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_event_attr attr; char errmsg[STRERR_BUFSIZE]; int type, pfd; if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS)) return -EINVAL; memset(&attr, 0, attr_sz); type = uprobe ? determine_uprobe_perf_type() : determine_kprobe_perf_type(); if (type < 0) { pr_warn("failed to determine %s perf type: %s\n", uprobe ? "uprobe" : "kprobe", libbpf_strerror_r(type, errmsg, sizeof(errmsg))); return type; } if (retprobe) { int bit = uprobe ? determine_uprobe_retprobe_bit() : determine_kprobe_retprobe_bit(); if (bit < 0) { pr_warn("failed to determine %s retprobe bit: %s\n", uprobe ? "uprobe" : "kprobe", libbpf_strerror_r(bit, errmsg, sizeof(errmsg))); return bit; } attr.config |= 1 << bit; } attr.size = attr_sz; attr.type = type; attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT; attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ attr.config2 = offset; /* kprobe_addr or probe_offset */ /* pid filter is meaningful only for uprobes */ pfd = syscall(__NR_perf_event_open, &attr, pid < 0 ? -1 : pid /* pid */, pid == -1 ? 0 : -1 /* cpu */, -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); return pfd >= 0 ? pfd : -errno; } static int append_to_file(const char *file, const char *fmt, ...) { int fd, n, err = 0; va_list ap; char buf[1024]; va_start(ap, fmt); n = vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); if (n < 0 || n >= sizeof(buf)) return -EINVAL; fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0); if (fd < 0) return -errno; if (write(fd, buf, n) < 0) err = -errno; close(fd); return err; } #define DEBUGFS "/sys/kernel/debug/tracing" #define TRACEFS "/sys/kernel/tracing" static bool use_debugfs(void) { static int has_debugfs = -1; if (has_debugfs < 0) has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0; return has_debugfs == 1; } static const char *tracefs_path(void) { return use_debugfs() ? DEBUGFS : TRACEFS; } static const char *tracefs_kprobe_events(void) { return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events"; } static const char *tracefs_uprobe_events(void) { return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events"; } static const char *tracefs_available_filter_functions(void) { return use_debugfs() ? DEBUGFS"/available_filter_functions" : TRACEFS"/available_filter_functions"; } static const char *tracefs_available_filter_functions_addrs(void) { return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs" : TRACEFS"/available_filter_functions_addrs"; } static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, const char *kfunc_name, size_t offset) { static int index = 0; int i; snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, __sync_fetch_and_add(&index, 1)); /* sanitize binary_path in the probe name */ for (i = 0; buf[i]; i++) { if (!isalnum(buf[i])) buf[i] = '_'; } } static int add_kprobe_event_legacy(const char *probe_name, bool retprobe, const char *kfunc_name, size_t offset) { return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx", retprobe ? 'r' : 'p', retprobe ? "kretprobes" : "kprobes", probe_name, kfunc_name, offset); } static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe) { return append_to_file(tracefs_kprobe_events(), "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name); } static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe) { char file[256]; snprintf(file, sizeof(file), "%s/events/%s/%s/id", tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name); return parse_uint_from_file(file, "%d\n"); } static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, const char *kfunc_name, size_t offset, int pid) { const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_event_attr attr; char errmsg[STRERR_BUFSIZE]; int type, pfd, err; err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset); if (err < 0) { pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n", kfunc_name, offset, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return err; } type = determine_kprobe_perf_type_legacy(probe_name, retprobe); if (type < 0) { err = type; pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n", kfunc_name, offset, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto err_clean_legacy; } memset(&attr, 0, attr_sz); attr.size = attr_sz; attr.config = type; attr.type = PERF_TYPE_TRACEPOINT; pfd = syscall(__NR_perf_event_open, &attr, pid < 0 ? -1 : pid, /* pid */ pid == -1 ? 0 : -1, /* cpu */ -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); if (pfd < 0) { err = -errno; pr_warn("legacy kprobe perf_event_open() failed: %s\n", libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto err_clean_legacy; } return pfd; err_clean_legacy: /* Clear the newly added legacy kprobe_event */ remove_kprobe_event_legacy(probe_name, retprobe); return err; } static const char *arch_specific_syscall_pfx(void) { #if defined(__x86_64__) return "x64"; #elif defined(__i386__) return "ia32"; #elif defined(__s390x__) return "s390x"; #elif defined(__s390__) return "s390"; #elif defined(__arm__) return "arm"; #elif defined(__aarch64__) return "arm64"; #elif defined(__mips__) return "mips"; #elif defined(__riscv) return "riscv"; #elif defined(__powerpc__) return "powerpc"; #elif defined(__powerpc64__) return "powerpc64"; #else return NULL; #endif } int probe_kern_syscall_wrapper(int token_fd) { char syscall_name[64]; const char *ksys_pfx; ksys_pfx = arch_specific_syscall_pfx(); if (!ksys_pfx) return 0; snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx); if (determine_kprobe_perf_type() >= 0) { int pfd; pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0); if (pfd >= 0) close(pfd); return pfd >= 0 ? 1 : 0; } else { /* legacy mode */ char probe_name[128]; gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0) return 0; (void)remove_kprobe_event_legacy(probe_name, false); return 1; } } struct bpf_link * bpf_program__attach_kprobe_opts(const struct bpf_program *prog, const char *func_name, const struct bpf_kprobe_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); enum probe_attach_mode attach_mode; char errmsg[STRERR_BUFSIZE]; char *legacy_probe = NULL; struct bpf_link *link; size_t offset; bool retprobe, legacy; int pfd, err; if (!OPTS_VALID(opts, bpf_kprobe_opts)) return libbpf_err_ptr(-EINVAL); attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); retprobe = OPTS_GET(opts, retprobe, false); offset = OPTS_GET(opts, offset, 0); pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); legacy = determine_kprobe_perf_type() < 0; switch (attach_mode) { case PROBE_ATTACH_MODE_LEGACY: legacy = true; pe_opts.force_ioctl_attach = true; break; case PROBE_ATTACH_MODE_PERF: if (legacy) return libbpf_err_ptr(-ENOTSUP); pe_opts.force_ioctl_attach = true; break; case PROBE_ATTACH_MODE_LINK: if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) return libbpf_err_ptr(-ENOTSUP); break; case PROBE_ATTACH_MODE_DEFAULT: break; default: return libbpf_err_ptr(-EINVAL); } if (!legacy) { pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name, offset, -1 /* pid */, 0 /* ref_ctr_off */); } else { char probe_name[256]; gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), func_name, offset); legacy_probe = strdup(probe_name); if (!legacy_probe) return libbpf_err_ptr(-ENOMEM); pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name, offset, -1 /* pid */); } if (pfd < 0) { err = -errno; pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", prog->name, retprobe ? "kretprobe" : "kprobe", func_name, offset, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto err_out; } link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); err = libbpf_get_error(link); if (err) { close(pfd); pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", prog->name, retprobe ? "kretprobe" : "kprobe", func_name, offset, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto err_clean_legacy; } if (legacy) { struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); perf_link->legacy_probe_name = legacy_probe; perf_link->legacy_is_kprobe = true; perf_link->legacy_is_retprobe = retprobe; } return link; err_clean_legacy: if (legacy) remove_kprobe_event_legacy(legacy_probe, retprobe); err_out: free(legacy_probe); return libbpf_err_ptr(err); } struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, const char *func_name) { DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, .retprobe = retprobe, ); return bpf_program__attach_kprobe_opts(prog, func_name, &opts); } struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog, const char *syscall_name, const struct bpf_ksyscall_opts *opts) { LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); char func_name[128]; if (!OPTS_VALID(opts, bpf_ksyscall_opts)) return libbpf_err_ptr(-EINVAL); if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) { /* arch_specific_syscall_pfx() should never return NULL here * because it is guarded by kernel_supports(). However, since * compiler does not know that we have an explicit conditional * as well. */ snprintf(func_name, sizeof(func_name), "__%s_sys_%s", arch_specific_syscall_pfx() ? : "", syscall_name); } else { snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name); } kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false); kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts); } /* Adapted from perf/util/string.c */ bool glob_match(const char *str, const char *pat) { while (*str && *pat && *pat != '*') { if (*pat == '?') { /* Matches any single character */ str++; pat++; continue; } if (*str != *pat) return false; str++; pat++; } /* Check wild card */ if (*pat == '*') { while (*pat == '*') pat++; if (!*pat) /* Tail wild card matches all */ return true; while (*str) if (glob_match(str++, pat)) return true; } return !*str && !*pat; } struct kprobe_multi_resolve { const char *pattern; unsigned long *addrs; size_t cap; size_t cnt; }; struct avail_kallsyms_data { char **syms; size_t cnt; struct kprobe_multi_resolve *res; }; static int avail_func_cmp(const void *a, const void *b) { return strcmp(*(const char **)a, *(const char **)b); } static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type, const char *sym_name, void *ctx) { struct avail_kallsyms_data *data = ctx; struct kprobe_multi_resolve *res = data->res; int err; if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) return 0; err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1); if (err) return err; res->addrs[res->cnt++] = (unsigned long)sym_addr; return 0; } static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res) { const char *available_functions_file = tracefs_available_filter_functions(); struct avail_kallsyms_data data; char sym_name[500]; FILE *f; int err = 0, ret, i; char **syms = NULL; size_t cap = 0, cnt = 0; f = fopen(available_functions_file, "re"); if (!f) { err = -errno; pr_warn("failed to open %s: %d\n", available_functions_file, err); return err; } while (true) { char *name; ret = fscanf(f, "%499s%*[^\n]\n", sym_name); if (ret == EOF && feof(f)) break; if (ret != 1) { pr_warn("failed to parse available_filter_functions entry: %d\n", ret); err = -EINVAL; goto cleanup; } if (!glob_match(sym_name, res->pattern)) continue; err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1); if (err) goto cleanup; name = strdup(sym_name); if (!name) { err = -errno; goto cleanup; } syms[cnt++] = name; } /* no entries found, bail out */ if (cnt == 0) { err = -ENOENT; goto cleanup; } /* sort available functions */ qsort(syms, cnt, sizeof(*syms), avail_func_cmp); data.syms = syms; data.res = res; data.cnt = cnt; libbpf_kallsyms_parse(avail_kallsyms_cb, &data); if (res->cnt == 0) err = -ENOENT; cleanup: for (i = 0; i < cnt; i++) free((char *)syms[i]); free(syms); fclose(f); return err; } static bool has_available_filter_functions_addrs(void) { return access(tracefs_available_filter_functions_addrs(), R_OK) != -1; } static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res) { const char *available_path = tracefs_available_filter_functions_addrs(); char sym_name[500]; FILE *f; int ret, err = 0; unsigned long long sym_addr; f = fopen(available_path, "re"); if (!f) { err = -errno; pr_warn("failed to open %s: %d\n", available_path, err); return err; } while (true) { ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name); if (ret == EOF && feof(f)) break; if (ret != 2) { pr_warn("failed to parse available_filter_functions_addrs entry: %d\n", ret); err = -EINVAL; goto cleanup; } if (!glob_match(sym_name, res->pattern)) continue; err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1); if (err) goto cleanup; res->addrs[res->cnt++] = (unsigned long)sym_addr; } if (res->cnt == 0) err = -ENOENT; cleanup: fclose(f); return err; } struct bpf_link * bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, const char *pattern, const struct bpf_kprobe_multi_opts *opts) { LIBBPF_OPTS(bpf_link_create_opts, lopts); struct kprobe_multi_resolve res = { .pattern = pattern, }; enum bpf_attach_type attach_type; struct bpf_link *link = NULL; char errmsg[STRERR_BUFSIZE]; const unsigned long *addrs; int err, link_fd, prog_fd; bool retprobe, session; const __u64 *cookies; const char **syms; size_t cnt; if (!OPTS_VALID(opts, bpf_kprobe_multi_opts)) return libbpf_err_ptr(-EINVAL); prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", prog->name); return libbpf_err_ptr(-EINVAL); } syms = OPTS_GET(opts, syms, false); addrs = OPTS_GET(opts, addrs, false); cnt = OPTS_GET(opts, cnt, false); cookies = OPTS_GET(opts, cookies, false); if (!pattern && !addrs && !syms) return libbpf_err_ptr(-EINVAL); if (pattern && (addrs || syms || cookies || cnt)) return libbpf_err_ptr(-EINVAL); if (!pattern && !cnt) return libbpf_err_ptr(-EINVAL); if (addrs && syms) return libbpf_err_ptr(-EINVAL); if (pattern) { if (has_available_filter_functions_addrs()) err = libbpf_available_kprobes_parse(&res); else err = libbpf_available_kallsyms_parse(&res); if (err) goto error; addrs = res.addrs; cnt = res.cnt; } retprobe = OPTS_GET(opts, retprobe, false); session = OPTS_GET(opts, session, false); if (retprobe && session) return libbpf_err_ptr(-EINVAL); attach_type = session ? BPF_TRACE_KPROBE_SESSION : BPF_TRACE_KPROBE_MULTI; lopts.kprobe_multi.syms = syms; lopts.kprobe_multi.addrs = addrs; lopts.kprobe_multi.cookies = cookies; lopts.kprobe_multi.cnt = cnt; lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0; link = calloc(1, sizeof(*link)); if (!link) { err = -ENOMEM; goto error; } link->detach = &bpf_link__detach_fd; link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts); if (link_fd < 0) { err = -errno; pr_warn("prog '%s': failed to attach: %s\n", prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto error; } link->fd = link_fd; free(res.addrs); return link; error: free(link); free(res.addrs); return libbpf_err_ptr(err); } static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) { DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); unsigned long offset = 0; const char *func_name; char *func; int n; *link = NULL; /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */ if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0) return 0; opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); if (opts.retprobe) func_name = prog->sec_name + sizeof("kretprobe/") - 1; else func_name = prog->sec_name + sizeof("kprobe/") - 1; n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); if (n < 1) { pr_warn("kprobe name is invalid: %s\n", func_name); return -EINVAL; } if (opts.retprobe && offset != 0) { free(func); pr_warn("kretprobes do not support offset specification\n"); return -EINVAL; } opts.offset = offset; *link = bpf_program__attach_kprobe_opts(prog, func, &opts); free(func); return libbpf_get_error(*link); } static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link) { LIBBPF_OPTS(bpf_ksyscall_opts, opts); const char *syscall_name; *link = NULL; /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */ if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0) return 0; opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/"); if (opts.retprobe) syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1; else syscall_name = prog->sec_name + sizeof("ksyscall/") - 1; *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts); return *link ? 0 : -errno; } static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) { LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); const char *spec; char *pattern; int n; *link = NULL; /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */ if (strcmp(prog->sec_name, "kprobe.multi") == 0 || strcmp(prog->sec_name, "kretprobe.multi") == 0) return 0; opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/"); if (opts.retprobe) spec = prog->sec_name + sizeof("kretprobe.multi/") - 1; else spec = prog->sec_name + sizeof("kprobe.multi/") - 1; n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); if (n < 1) { pr_warn("kprobe multi pattern is invalid: %s\n", spec); return -EINVAL; } *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); free(pattern); return libbpf_get_error(*link); } static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link) { LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, .session = true); const char *spec; char *pattern; int n; *link = NULL; /* no auto-attach for SEC("kprobe.session") */ if (strcmp(prog->sec_name, "kprobe.session") == 0) return 0; spec = prog->sec_name + sizeof("kprobe.session/") - 1; n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); if (n < 1) { pr_warn("kprobe session pattern is invalid: %s\n", spec); return -EINVAL; } *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); free(pattern); return *link ? 0 : -errno; } static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) { char *probe_type = NULL, *binary_path = NULL, *func_name = NULL; LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); int n, ret = -EINVAL; *link = NULL; n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]", &probe_type, &binary_path, &func_name); switch (n) { case 1: /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ ret = 0; break; case 3: opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi"); *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts); ret = libbpf_get_error(*link); break; default: pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, prog->sec_name); break; } free(probe_type); free(binary_path); free(func_name); return ret; } static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, const char *binary_path, uint64_t offset) { int i; snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); /* sanitize binary_path in the probe name */ for (i = 0; buf[i]; i++) { if (!isalnum(buf[i])) buf[i] = '_'; } } static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, const char *binary_path, size_t offset) { return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx", retprobe ? 'r' : 'p', retprobe ? "uretprobes" : "uprobes", probe_name, binary_path, offset); } static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe) { return append_to_file(tracefs_uprobe_events(), "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name); } static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe) { char file[512]; snprintf(file, sizeof(file), "%s/events/%s/%s/id", tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name); return parse_uint_from_file(file, "%d\n"); } static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, const char *binary_path, size_t offset, int pid) { const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_event_attr attr; int type, pfd, err; err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset); if (err < 0) { pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n", binary_path, (size_t)offset, err); return err; } type = determine_uprobe_perf_type_legacy(probe_name, retprobe); if (type < 0) { err = type; pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n", binary_path, offset, err); goto err_clean_legacy; } memset(&attr, 0, attr_sz); attr.size = attr_sz; attr.config = type; attr.type = PERF_TYPE_TRACEPOINT; pfd = syscall(__NR_perf_event_open, &attr, pid < 0 ? -1 : pid, /* pid */ pid == -1 ? 0 : -1, /* cpu */ -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); if (pfd < 0) { err = -errno; pr_warn("legacy uprobe perf_event_open() failed: %d\n", err); goto err_clean_legacy; } return pfd; err_clean_legacy: /* Clear the newly added legacy uprobe_event */ remove_uprobe_event_legacy(probe_name, retprobe); return err; } /* Find offset of function name in archive specified by path. Currently * supported are .zip files that do not compress their contents, as used on * Android in the form of APKs, for example. "file_name" is the name of the ELF * file inside the archive. "func_name" matches symbol name or name@@LIB for * library functions. * * An overview of the APK format specifically provided here: * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents */ static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name, const char *func_name) { struct zip_archive *archive; struct zip_entry entry; long ret; Elf *elf; archive = zip_archive_open(archive_path); if (IS_ERR(archive)) { ret = PTR_ERR(archive); pr_warn("zip: failed to open %s: %ld\n", archive_path, ret); return ret; } ret = zip_archive_find_entry(archive, file_name, &entry); if (ret) { pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name, archive_path, ret); goto out; } pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path, (unsigned long)entry.data_offset); if (entry.compression) { pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name, archive_path); ret = -LIBBPF_ERRNO__FORMAT; goto out; } elf = elf_memory((void *)entry.data, entry.data_length); if (!elf) { pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path, elf_errmsg(-1)); ret = -LIBBPF_ERRNO__LIBELF; goto out; } ret = elf_find_func_offset(elf, file_name, func_name); if (ret > 0) { pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n", func_name, file_name, archive_path, entry.data_offset, ret, ret + entry.data_offset); ret += entry.data_offset; } elf_end(elf); out: zip_archive_close(archive); return ret; } static const char *arch_specific_lib_paths(void) { /* * Based on https://packages.debian.org/sid/libc6. * * Assume that the traced program is built for the same architecture * as libbpf, which should cover the vast majority of cases. */ #if defined(__x86_64__) return "/lib/x86_64-linux-gnu"; #elif defined(__i386__) return "/lib/i386-linux-gnu"; #elif defined(__s390x__) return "/lib/s390x-linux-gnu"; #elif defined(__s390__) return "/lib/s390-linux-gnu"; #elif defined(__arm__) && defined(__SOFTFP__) return "/lib/arm-linux-gnueabi"; #elif defined(__arm__) && !defined(__SOFTFP__) return "/lib/arm-linux-gnueabihf"; #elif defined(__aarch64__) return "/lib/aarch64-linux-gnu"; #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64 return "/lib/mips64el-linux-gnuabi64"; #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32 return "/lib/mipsel-linux-gnu"; #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return "/lib/powerpc64le-linux-gnu"; #elif defined(__sparc__) && defined(__arch64__) return "/lib/sparc64-linux-gnu"; #elif defined(__riscv) && __riscv_xlen == 64 return "/lib/riscv64-linux-gnu"; #else return NULL; #endif } /* Get full path to program/shared library. */ static int resolve_full_path(const char *file, char *result, size_t result_sz) { const char *search_paths[3] = {}; int i, perm; if (str_has_sfx(file, ".so") || strstr(file, ".so.")) { search_paths[0] = getenv("LD_LIBRARY_PATH"); search_paths[1] = "/usr/lib64:/usr/lib"; search_paths[2] = arch_specific_lib_paths(); perm = R_OK; } else { search_paths[0] = getenv("PATH"); search_paths[1] = "/usr/bin:/usr/sbin"; perm = R_OK | X_OK; } for (i = 0; i < ARRAY_SIZE(search_paths); i++) { const char *s; if (!search_paths[i]) continue; for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) { char *next_path; int seg_len; if (s[0] == ':') s++; next_path = strchr(s, ':'); seg_len = next_path ? next_path - s : strlen(s); if (!seg_len) continue; snprintf(result, result_sz, "%.*s/%s", seg_len, s, file); /* ensure it has required permissions */ if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0) continue; pr_debug("resolved '%s' to '%s'\n", file, result); return 0; } } return -ENOENT; } struct bpf_link * bpf_program__attach_uprobe_multi(const struct bpf_program *prog, pid_t pid, const char *path, const char *func_pattern, const struct bpf_uprobe_multi_opts *opts) { const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL; LIBBPF_OPTS(bpf_link_create_opts, lopts); unsigned long *resolved_offsets = NULL; int err = 0, link_fd, prog_fd; struct bpf_link *link = NULL; char errmsg[STRERR_BUFSIZE]; char full_path[PATH_MAX]; const __u64 *cookies; const char **syms; size_t cnt; if (!OPTS_VALID(opts, bpf_uprobe_multi_opts)) return libbpf_err_ptr(-EINVAL); prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", prog->name); return libbpf_err_ptr(-EINVAL); } syms = OPTS_GET(opts, syms, NULL); offsets = OPTS_GET(opts, offsets, NULL); ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL); cookies = OPTS_GET(opts, cookies, NULL); cnt = OPTS_GET(opts, cnt, 0); /* * User can specify 2 mutually exclusive set of inputs: * * 1) use only path/func_pattern/pid arguments * * 2) use path/pid with allowed combinations of: * syms/offsets/ref_ctr_offsets/cookies/cnt * * - syms and offsets are mutually exclusive * - ref_ctr_offsets and cookies are optional * * Any other usage results in error. */ if (!path) return libbpf_err_ptr(-EINVAL); if (!func_pattern && cnt == 0) return libbpf_err_ptr(-EINVAL); if (func_pattern) { if (syms || offsets || ref_ctr_offsets || cookies || cnt) return libbpf_err_ptr(-EINVAL); } else { if (!!syms == !!offsets) return libbpf_err_ptr(-EINVAL); } if (func_pattern) { if (!strchr(path, '/')) { err = resolve_full_path(path, full_path, sizeof(full_path)); if (err) { pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", prog->name, path, err); return libbpf_err_ptr(err); } path = full_path; } err = elf_resolve_pattern_offsets(path, func_pattern, &resolved_offsets, &cnt); if (err < 0) return libbpf_err_ptr(err); offsets = resolved_offsets; } else if (syms) { err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC); if (err < 0) return libbpf_err_ptr(err); offsets = resolved_offsets; } lopts.uprobe_multi.path = path; lopts.uprobe_multi.offsets = offsets; lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets; lopts.uprobe_multi.cookies = cookies; lopts.uprobe_multi.cnt = cnt; lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0; if (pid == 0) pid = getpid(); if (pid > 0) lopts.uprobe_multi.pid = pid; link = calloc(1, sizeof(*link)); if (!link) { err = -ENOMEM; goto error; } link->detach = &bpf_link__detach_fd; link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts); if (link_fd < 0) { err = -errno; pr_warn("prog '%s': failed to attach multi-uprobe: %s\n", prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto error; } link->fd = link_fd; free(resolved_offsets); return link; error: free(resolved_offsets); free(link); return libbpf_err_ptr(err); } LIBBPF_API struct bpf_link * bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const char *binary_path, size_t func_offset, const struct bpf_uprobe_opts *opts) { const char *archive_path = NULL, *archive_sep = NULL; char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL; DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); enum probe_attach_mode attach_mode; char full_path[PATH_MAX]; struct bpf_link *link; size_t ref_ctr_off; int pfd, err; bool retprobe, legacy; const char *func_name; if (!OPTS_VALID(opts, bpf_uprobe_opts)) return libbpf_err_ptr(-EINVAL); attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); retprobe = OPTS_GET(opts, retprobe, false); ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0); pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); if (!binary_path) return libbpf_err_ptr(-EINVAL); /* Check if "binary_path" refers to an archive. */ archive_sep = strstr(binary_path, "!/"); if (archive_sep) { full_path[0] = '\0'; libbpf_strlcpy(full_path, binary_path, min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1))); archive_path = full_path; binary_path = archive_sep + 2; } else if (!strchr(binary_path, '/')) { err = resolve_full_path(binary_path, full_path, sizeof(full_path)); if (err) { pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", prog->name, binary_path, err); return libbpf_err_ptr(err); } binary_path = full_path; } func_name = OPTS_GET(opts, func_name, NULL); if (func_name) { long sym_off; if (archive_path) { sym_off = elf_find_func_offset_from_archive(archive_path, binary_path, func_name); binary_path = archive_path; } else { sym_off = elf_find_func_offset_from_file(binary_path, func_name); } if (sym_off < 0) return libbpf_err_ptr(sym_off); func_offset += sym_off; } legacy = determine_uprobe_perf_type() < 0; switch (attach_mode) { case PROBE_ATTACH_MODE_LEGACY: legacy = true; pe_opts.force_ioctl_attach = true; break; case PROBE_ATTACH_MODE_PERF: if (legacy) return libbpf_err_ptr(-ENOTSUP); pe_opts.force_ioctl_attach = true; break; case PROBE_ATTACH_MODE_LINK: if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) return libbpf_err_ptr(-ENOTSUP); break; case PROBE_ATTACH_MODE_DEFAULT: break; default: return libbpf_err_ptr(-EINVAL); } if (!legacy) { pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, func_offset, pid, ref_ctr_off); } else { char probe_name[PATH_MAX + 64]; if (ref_ctr_off) return libbpf_err_ptr(-EINVAL); gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), binary_path, func_offset); legacy_probe = strdup(probe_name); if (!legacy_probe) return libbpf_err_ptr(-ENOMEM); pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe, binary_path, func_offset, pid); } if (pfd < 0) { err = -errno; pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", prog->name, retprobe ? "uretprobe" : "uprobe", binary_path, func_offset, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto err_out; } link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); err = libbpf_get_error(link); if (err) { close(pfd); pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", prog->name, retprobe ? "uretprobe" : "uprobe", binary_path, func_offset, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); goto err_clean_legacy; } if (legacy) { struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); perf_link->legacy_probe_name = legacy_probe; perf_link->legacy_is_kprobe = false; perf_link->legacy_is_retprobe = retprobe; } return link; err_clean_legacy: if (legacy) remove_uprobe_event_legacy(legacy_probe, retprobe); err_out: free(legacy_probe); return libbpf_err_ptr(err); } /* Format of u[ret]probe section definition supporting auto-attach: * u[ret]probe/binary:function[+offset] * * binary can be an absolute/relative path or a filename; the latter is resolved to a * full binary path via bpf_program__attach_uprobe_opts. * * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be * specified (and auto-attach is not possible) or the above format is specified for * auto-attach. */ static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off; int n, c, ret = -EINVAL; long offset = 0; *link = NULL; n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]", &probe_type, &binary_path, &func_name); switch (n) { case 1: /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ ret = 0; break; case 2: pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n", prog->name, prog->sec_name); break; case 3: /* check if user specifies `+offset`, if yes, this should be * the last part of the string, make sure sscanf read to EOL */ func_off = strrchr(func_name, '+'); if (func_off) { n = sscanf(func_off, "+%li%n", &offset, &c); if (n == 1 && *(func_off + c) == '\0') func_off[0] = '\0'; else offset = 0; } opts.retprobe = strcmp(probe_type, "uretprobe") == 0 || strcmp(probe_type, "uretprobe.s") == 0; if (opts.retprobe && offset != 0) { pr_warn("prog '%s': uretprobes do not support offset specification\n", prog->name); break; } opts.func_name = func_name; *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts); ret = libbpf_get_error(*link); break; default: pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, prog->sec_name); break; } free(probe_type); free(binary_path); free(func_name); return ret; } struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, pid_t pid, const char *binary_path, size_t func_offset) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe); return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts); } struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog, pid_t pid, const char *binary_path, const char *usdt_provider, const char *usdt_name, const struct bpf_usdt_opts *opts) { char resolved_path[512]; struct bpf_object *obj = prog->obj; struct bpf_link *link; __u64 usdt_cookie; int err; if (!OPTS_VALID(opts, bpf_uprobe_opts)) return libbpf_err_ptr(-EINVAL); if (bpf_program__fd(prog) < 0) { pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", prog->name); return libbpf_err_ptr(-EINVAL); } if (!binary_path) return libbpf_err_ptr(-EINVAL); if (!strchr(binary_path, '/')) { err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path)); if (err) { pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", prog->name, binary_path, err); return libbpf_err_ptr(err); } binary_path = resolved_path; } /* USDT manager is instantiated lazily on first USDT attach. It will * be destroyed together with BPF object in bpf_object__close(). */ if (IS_ERR(obj->usdt_man)) return libbpf_ptr(obj->usdt_man); if (!obj->usdt_man) { obj->usdt_man = usdt_manager_new(obj); if (IS_ERR(obj->usdt_man)) return libbpf_ptr(obj->usdt_man); } usdt_cookie = OPTS_GET(opts, usdt_cookie, 0); link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path, usdt_provider, usdt_name, usdt_cookie); err = libbpf_get_error(link); if (err) return libbpf_err_ptr(err); return link; } static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link) { char *path = NULL, *provider = NULL, *name = NULL; const char *sec_name; int n, err; sec_name = bpf_program__section_name(prog); if (strcmp(sec_name, "usdt") == 0) { /* no auto-attach for just SEC("usdt") */ *link = NULL; return 0; } n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name); if (n != 3) { pr_warn("invalid section '%s', expected SEC(\"usdt/::\")\n", sec_name); err = -EINVAL; } else { *link = bpf_program__attach_usdt(prog, -1 /* any process */, path, provider, name, NULL); err = libbpf_get_error(*link); } free(path); free(provider); free(name); return err; } static int determine_tracepoint_id(const char *tp_category, const char *tp_name) { char file[PATH_MAX]; int ret; ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id", tracefs_path(), tp_category, tp_name); if (ret < 0) return -errno; if (ret >= sizeof(file)) { pr_debug("tracepoint %s/%s path is too long\n", tp_category, tp_name); return -E2BIG; } return parse_uint_from_file(file, "%d\n"); } static int perf_event_open_tracepoint(const char *tp_category, const char *tp_name) { const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_event_attr attr; char errmsg[STRERR_BUFSIZE]; int tp_id, pfd, err; tp_id = determine_tracepoint_id(tp_category, tp_name); if (tp_id < 0) { pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", tp_category, tp_name, libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); return tp_id; } memset(&attr, 0, attr_sz); attr.type = PERF_TYPE_TRACEPOINT; attr.size = attr_sz; attr.config = tp_id; pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); if (pfd < 0) { err = -errno; pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", tp_category, tp_name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return err; } return pfd; } struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, const char *tp_category, const char *tp_name, const struct bpf_tracepoint_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int pfd, err; if (!OPTS_VALID(opts, bpf_tracepoint_opts)) return libbpf_err_ptr(-EINVAL); pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); pfd = perf_event_open_tracepoint(tp_category, tp_name); if (pfd < 0) { pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", prog->name, tp_category, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); return libbpf_err_ptr(pfd); } link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); err = libbpf_get_error(link); if (err) { close(pfd); pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", prog->name, tp_category, tp_name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return libbpf_err_ptr(err); } return link; } struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, const char *tp_category, const char *tp_name) { return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); } static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) { char *sec_name, *tp_cat, *tp_name; *link = NULL; /* no auto-attach for SEC("tp") or SEC("tracepoint") */ if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0) return 0; sec_name = strdup(prog->sec_name); if (!sec_name) return -ENOMEM; /* extract "tp//" or "tracepoint//" */ if (str_has_pfx(prog->sec_name, "tp/")) tp_cat = sec_name + sizeof("tp/") - 1; else tp_cat = sec_name + sizeof("tracepoint/") - 1; tp_name = strchr(tp_cat, '/'); if (!tp_name) { free(sec_name); return -EINVAL; } *tp_name = '\0'; tp_name++; *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); free(sec_name); return libbpf_get_error(*link); } struct bpf_link * bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog, const char *tp_name, struct bpf_raw_tracepoint_opts *opts) { LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts); char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, pfd; if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts)) return libbpf_err_ptr(-EINVAL); prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("prog '%s': can't attach before loaded\n", prog->name); return libbpf_err_ptr(-EINVAL); } link = calloc(1, sizeof(*link)); if (!link) return libbpf_err_ptr(-ENOMEM); link->detach = &bpf_link__detach_fd; raw_opts.tp_name = tp_name; raw_opts.cookie = OPTS_GET(opts, cookie, 0); pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts); if (pfd < 0) { pfd = -errno; free(link); pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); return libbpf_err_ptr(pfd); } link->fd = pfd; return link; } struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, const char *tp_name) { return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL); } static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) { static const char *const prefixes[] = { "raw_tp", "raw_tracepoint", "raw_tp.w", "raw_tracepoint.w", }; size_t i; const char *tp_name = NULL; *link = NULL; for (i = 0; i < ARRAY_SIZE(prefixes); i++) { size_t pfx_len; if (!str_has_pfx(prog->sec_name, prefixes[i])) continue; pfx_len = strlen(prefixes[i]); /* no auto-attach case of, e.g., SEC("raw_tp") */ if (prog->sec_name[pfx_len] == '\0') return 0; if (prog->sec_name[pfx_len] != '/') continue; tp_name = prog->sec_name + pfx_len + 1; break; } if (!tp_name) { pr_warn("prog '%s': invalid section name '%s'\n", prog->name, prog->sec_name); return -EINVAL; } *link = bpf_program__attach_raw_tracepoint(prog, tp_name); return libbpf_get_error(*link); } /* Common logic for all BPF program types that attach to a btf_id */ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog, const struct bpf_trace_opts *opts) { LIBBPF_OPTS(bpf_link_create_opts, link_opts); char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, pfd; if (!OPTS_VALID(opts, bpf_trace_opts)) return libbpf_err_ptr(-EINVAL); prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("prog '%s': can't attach before loaded\n", prog->name); return libbpf_err_ptr(-EINVAL); } link = calloc(1, sizeof(*link)); if (!link) return libbpf_err_ptr(-ENOMEM); link->detach = &bpf_link__detach_fd; /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */ link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0); pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts); if (pfd < 0) { pfd = -errno; free(link); pr_warn("prog '%s': failed to attach: %s\n", prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); return libbpf_err_ptr(pfd); } link->fd = pfd; return link; } struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog) { return bpf_program__attach_btf_id(prog, NULL); } struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog, const struct bpf_trace_opts *opts) { return bpf_program__attach_btf_id(prog, opts); } struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) { return bpf_program__attach_btf_id(prog, NULL); } static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link) { *link = bpf_program__attach_trace(prog); return libbpf_get_error(*link); } static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link) { *link = bpf_program__attach_lsm(prog); return libbpf_get_error(*link); } static struct bpf_link * bpf_program_attach_fd(const struct bpf_program *prog, int target_fd, const char *target_name, const struct bpf_link_create_opts *opts) { enum bpf_attach_type attach_type; char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, link_fd; prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("prog '%s': can't attach before loaded\n", prog->name); return libbpf_err_ptr(-EINVAL); } link = calloc(1, sizeof(*link)); if (!link) return libbpf_err_ptr(-ENOMEM); link->detach = &bpf_link__detach_fd; attach_type = bpf_program__expected_attach_type(prog); link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts); if (link_fd < 0) { link_fd = -errno; free(link); pr_warn("prog '%s': failed to attach to %s: %s\n", prog->name, target_name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); return libbpf_err_ptr(link_fd); } link->fd = link_fd; return link; } struct bpf_link * bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd) { return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL); } struct bpf_link * bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd) { return bpf_program_attach_fd(prog, netns_fd, "netns", NULL); } struct bpf_link * bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd) { return bpf_program_attach_fd(prog, map_fd, "sockmap", NULL); } struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex) { /* target_fd/target_ifindex use the same field in LINK_CREATE */ return bpf_program_attach_fd(prog, ifindex, "xdp", NULL); } struct bpf_link * bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, const struct bpf_tcx_opts *opts) { LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); __u32 relative_id; int relative_fd; if (!OPTS_VALID(opts, bpf_tcx_opts)) return libbpf_err_ptr(-EINVAL); relative_id = OPTS_GET(opts, relative_id, 0); relative_fd = OPTS_GET(opts, relative_fd, 0); /* validate we don't have unexpected combinations of non-zero fields */ if (!ifindex) { pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", prog->name); return libbpf_err_ptr(-EINVAL); } if (relative_fd && relative_id) { pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", prog->name); return libbpf_err_ptr(-EINVAL); } link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0); link_create_opts.tcx.relative_fd = relative_fd; link_create_opts.tcx.relative_id = relative_id; link_create_opts.flags = OPTS_GET(opts, flags, 0); /* target_fd/target_ifindex use the same field in LINK_CREATE */ return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts); } struct bpf_link * bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, const struct bpf_netkit_opts *opts) { LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); __u32 relative_id; int relative_fd; if (!OPTS_VALID(opts, bpf_netkit_opts)) return libbpf_err_ptr(-EINVAL); relative_id = OPTS_GET(opts, relative_id, 0); relative_fd = OPTS_GET(opts, relative_fd, 0); /* validate we don't have unexpected combinations of non-zero fields */ if (!ifindex) { pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", prog->name); return libbpf_err_ptr(-EINVAL); } if (relative_fd && relative_id) { pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", prog->name); return libbpf_err_ptr(-EINVAL); } link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0); link_create_opts.netkit.relative_fd = relative_fd; link_create_opts.netkit.relative_id = relative_id; link_create_opts.flags = OPTS_GET(opts, flags, 0); return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts); } struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, int target_fd, const char *attach_func_name) { int btf_id; if (!!target_fd != !!attach_func_name) { pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", prog->name); return libbpf_err_ptr(-EINVAL); } if (prog->type != BPF_PROG_TYPE_EXT) { pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace\n", prog->name); return libbpf_err_ptr(-EINVAL); } if (target_fd) { LIBBPF_OPTS(bpf_link_create_opts, target_opts); btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); if (btf_id < 0) return libbpf_err_ptr(btf_id); target_opts.target_btf_id = btf_id; return bpf_program_attach_fd(prog, target_fd, "freplace", &target_opts); } else { /* no target, so use raw_tracepoint_open for compatibility * with old kernels */ return bpf_program__attach_trace(prog); } } struct bpf_link * bpf_program__attach_iter(const struct bpf_program *prog, const struct bpf_iter_attach_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, link_fd; __u32 target_fd = 0; if (!OPTS_VALID(opts, bpf_iter_attach_opts)) return libbpf_err_ptr(-EINVAL); link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("prog '%s': can't attach before loaded\n", prog->name); return libbpf_err_ptr(-EINVAL); } link = calloc(1, sizeof(*link)); if (!link) return libbpf_err_ptr(-ENOMEM); link->detach = &bpf_link__detach_fd; link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, &link_create_opts); if (link_fd < 0) { link_fd = -errno; free(link); pr_warn("prog '%s': failed to attach to iterator: %s\n", prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); return libbpf_err_ptr(link_fd); } link->fd = link_fd; return link; } static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link) { *link = bpf_program__attach_iter(prog, NULL); return libbpf_get_error(*link); } struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog, const struct bpf_netfilter_opts *opts) { LIBBPF_OPTS(bpf_link_create_opts, lopts); struct bpf_link *link; int prog_fd, link_fd; if (!OPTS_VALID(opts, bpf_netfilter_opts)) return libbpf_err_ptr(-EINVAL); prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("prog '%s': can't attach before loaded\n", prog->name); return libbpf_err_ptr(-EINVAL); } link = calloc(1, sizeof(*link)); if (!link) return libbpf_err_ptr(-ENOMEM); link->detach = &bpf_link__detach_fd; lopts.netfilter.pf = OPTS_GET(opts, pf, 0); lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0); lopts.netfilter.priority = OPTS_GET(opts, priority, 0); lopts.netfilter.flags = OPTS_GET(opts, flags, 0); link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts); if (link_fd < 0) { char errmsg[STRERR_BUFSIZE]; link_fd = -errno; free(link); pr_warn("prog '%s': failed to attach to netfilter: %s\n", prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); return libbpf_err_ptr(link_fd); } link->fd = link_fd; return link; } struct bpf_link *bpf_program__attach(const struct bpf_program *prog) { struct bpf_link *link = NULL; int err; if (!prog->sec_def || !prog->sec_def->prog_attach_fn) return libbpf_err_ptr(-EOPNOTSUPP); if (bpf_program__fd(prog) < 0) { pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n", prog->name); return libbpf_err_ptr(-EINVAL); } err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link); if (err) return libbpf_err_ptr(err); /* When calling bpf_program__attach() explicitly, auto-attach support * is expected to work, so NULL returned link is considered an error. * This is different for skeleton's attach, see comment in * bpf_object__attach_skeleton(). */ if (!link) return libbpf_err_ptr(-EOPNOTSUPP); return link; } struct bpf_link_struct_ops { struct bpf_link link; int map_fd; }; static int bpf_link__detach_struct_ops(struct bpf_link *link) { struct bpf_link_struct_ops *st_link; __u32 zero = 0; st_link = container_of(link, struct bpf_link_struct_ops, link); if (st_link->map_fd < 0) /* w/o a real link */ return bpf_map_delete_elem(link->fd, &zero); return close(link->fd); } struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) { struct bpf_link_struct_ops *link; __u32 zero = 0; int err, fd; if (!bpf_map__is_struct_ops(map)) { pr_warn("map '%s': can't attach non-struct_ops map\n", map->name); return libbpf_err_ptr(-EINVAL); } if (map->fd < 0) { pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name); return libbpf_err_ptr(-EINVAL); } link = calloc(1, sizeof(*link)); if (!link) return libbpf_err_ptr(-EINVAL); /* kern_vdata should be prepared during the loading phase. */ err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); /* It can be EBUSY if the map has been used to create or * update a link before. We don't allow updating the value of * a struct_ops once it is set. That ensures that the value * never changed. So, it is safe to skip EBUSY. */ if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) { free(link); return libbpf_err_ptr(err); } link->link.detach = bpf_link__detach_struct_ops; if (!(map->def.map_flags & BPF_F_LINK)) { /* w/o a real link */ link->link.fd = map->fd; link->map_fd = -1; return &link->link; } fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL); if (fd < 0) { free(link); return libbpf_err_ptr(fd); } link->link.fd = fd; link->map_fd = map->fd; return &link->link; } /* * Swap the back struct_ops of a link with a new struct_ops map. */ int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map) { struct bpf_link_struct_ops *st_ops_link; __u32 zero = 0; int err; if (!bpf_map__is_struct_ops(map)) return -EINVAL; if (map->fd < 0) { pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); return -EINVAL; } st_ops_link = container_of(link, struct bpf_link_struct_ops, link); /* Ensure the type of a link is correct */ if (st_ops_link->map_fd < 0) return -EINVAL; err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); /* It can be EBUSY if the map has been used to create or * update a link before. We don't allow updating the value of * a struct_ops once it is set. That ensures that the value * never changed. So, it is safe to skip EBUSY. */ if (err && err != -EBUSY) return err; err = bpf_link_update(link->fd, map->fd, NULL); if (err < 0) return err; st_ops_link->map_fd = map->fd; return 0; } typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr, void *private_data); static enum bpf_perf_event_ret perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, void **copy_mem, size_t *copy_size, bpf_perf_event_print_t fn, void *private_data) { struct perf_event_mmap_page *header = mmap_mem; __u64 data_head = ring_buffer_read_head(header); __u64 data_tail = header->data_tail; void *base = ((__u8 *)header) + page_size; int ret = LIBBPF_PERF_EVENT_CONT; struct perf_event_header *ehdr; size_t ehdr_size; while (data_head != data_tail) { ehdr = base + (data_tail & (mmap_size - 1)); ehdr_size = ehdr->size; if (((void *)ehdr) + ehdr_size > base + mmap_size) { void *copy_start = ehdr; size_t len_first = base + mmap_size - copy_start; size_t len_secnd = ehdr_size - len_first; if (*copy_size < ehdr_size) { free(*copy_mem); *copy_mem = malloc(ehdr_size); if (!*copy_mem) { *copy_size = 0; ret = LIBBPF_PERF_EVENT_ERROR; break; } *copy_size = ehdr_size; } memcpy(*copy_mem, copy_start, len_first); memcpy(*copy_mem + len_first, base, len_secnd); ehdr = *copy_mem; } ret = fn(ehdr, private_data); data_tail += ehdr_size; if (ret != LIBBPF_PERF_EVENT_CONT) break; } ring_buffer_write_tail(header, data_tail); return libbpf_err(ret); } struct perf_buffer; struct perf_buffer_params { struct perf_event_attr *attr; /* if event_cb is specified, it takes precendence */ perf_buffer_event_fn event_cb; /* sample_cb and lost_cb are higher-level common-case callbacks */ perf_buffer_sample_fn sample_cb; perf_buffer_lost_fn lost_cb; void *ctx; int cpu_cnt; int *cpus; int *map_keys; }; struct perf_cpu_buf { struct perf_buffer *pb; void *base; /* mmap()'ed memory */ void *buf; /* for reconstructing segmented data */ size_t buf_size; int fd; int cpu; int map_key; }; struct perf_buffer { perf_buffer_event_fn event_cb; perf_buffer_sample_fn sample_cb; perf_buffer_lost_fn lost_cb; void *ctx; /* passed into callbacks */ size_t page_size; size_t mmap_size; struct perf_cpu_buf **cpu_bufs; struct epoll_event *events; int cpu_cnt; /* number of allocated CPU buffers */ int epoll_fd; /* perf event FD */ int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ }; static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, struct perf_cpu_buf *cpu_buf) { if (!cpu_buf) return; if (cpu_buf->base && munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); if (cpu_buf->fd >= 0) { ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); close(cpu_buf->fd); } free(cpu_buf->buf); free(cpu_buf); } void perf_buffer__free(struct perf_buffer *pb) { int i; if (IS_ERR_OR_NULL(pb)) return; if (pb->cpu_bufs) { for (i = 0; i < pb->cpu_cnt; i++) { struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; if (!cpu_buf) continue; bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); perf_buffer__free_cpu_buf(pb, cpu_buf); } free(pb->cpu_bufs); } if (pb->epoll_fd >= 0) close(pb->epoll_fd); free(pb->events); free(pb); } static struct perf_cpu_buf * perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, int cpu, int map_key) { struct perf_cpu_buf *cpu_buf; char msg[STRERR_BUFSIZE]; int err; cpu_buf = calloc(1, sizeof(*cpu_buf)); if (!cpu_buf) return ERR_PTR(-ENOMEM); cpu_buf->pb = pb; cpu_buf->cpu = cpu; cpu_buf->map_key = map_key; cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, -1, PERF_FLAG_FD_CLOEXEC); if (cpu_buf->fd < 0) { err = -errno; pr_warn("failed to open perf buffer event on cpu #%d: %s\n", cpu, libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, cpu_buf->fd, 0); if (cpu_buf->base == MAP_FAILED) { cpu_buf->base = NULL; err = -errno; pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", cpu, libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { err = -errno; pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", cpu, libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } return cpu_buf; error: perf_buffer__free_cpu_buf(pb, cpu_buf); return (struct perf_cpu_buf *)ERR_PTR(err); } static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params *p); struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx, const struct perf_buffer_opts *opts) { const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_buffer_params p = {}; struct perf_event_attr attr; __u32 sample_period; if (!OPTS_VALID(opts, perf_buffer_opts)) return libbpf_err_ptr(-EINVAL); sample_period = OPTS_GET(opts, sample_period, 1); if (!sample_period) sample_period = 1; memset(&attr, 0, attr_sz); attr.size = attr_sz; attr.config = PERF_COUNT_SW_BPF_OUTPUT; attr.type = PERF_TYPE_SOFTWARE; attr.sample_type = PERF_SAMPLE_RAW; attr.sample_period = sample_period; attr.wakeup_events = sample_period; p.attr = &attr; p.sample_cb = sample_cb; p.lost_cb = lost_cb; p.ctx = ctx; return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); } struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr, perf_buffer_event_fn event_cb, void *ctx, const struct perf_buffer_raw_opts *opts) { struct perf_buffer_params p = {}; if (!attr) return libbpf_err_ptr(-EINVAL); if (!OPTS_VALID(opts, perf_buffer_raw_opts)) return libbpf_err_ptr(-EINVAL); p.attr = attr; p.event_cb = event_cb; p.ctx = ctx; p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0); p.cpus = OPTS_GET(opts, cpus, NULL); p.map_keys = OPTS_GET(opts, map_keys, NULL); return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); } static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params *p) { const char *online_cpus_file = "/sys/devices/system/cpu/online"; struct bpf_map_info map; char msg[STRERR_BUFSIZE]; struct perf_buffer *pb; bool *online = NULL; __u32 map_info_len; int err, i, j, n; if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) { pr_warn("page count should be power of two, but is %zu\n", page_cnt); return ERR_PTR(-EINVAL); } /* best-effort sanity checks */ memset(&map, 0, sizeof(map)); map_info_len = sizeof(map); err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len); if (err) { err = -errno; /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return * -EBADFD, -EFAULT, or -E2BIG on real error */ if (err != -EINVAL) { pr_warn("failed to get map info for map FD %d: %s\n", map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); return ERR_PTR(err); } pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", map_fd); } else { if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", map.name); return ERR_PTR(-EINVAL); } } pb = calloc(1, sizeof(*pb)); if (!pb) return ERR_PTR(-ENOMEM); pb->event_cb = p->event_cb; pb->sample_cb = p->sample_cb; pb->lost_cb = p->lost_cb; pb->ctx = p->ctx; pb->page_size = getpagesize(); pb->mmap_size = pb->page_size * page_cnt; pb->map_fd = map_fd; pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (pb->epoll_fd < 0) { err = -errno; pr_warn("failed to create epoll instance: %s\n", libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } if (p->cpu_cnt > 0) { pb->cpu_cnt = p->cpu_cnt; } else { pb->cpu_cnt = libbpf_num_possible_cpus(); if (pb->cpu_cnt < 0) { err = pb->cpu_cnt; goto error; } if (map.max_entries && map.max_entries < pb->cpu_cnt) pb->cpu_cnt = map.max_entries; } pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); if (!pb->events) { err = -ENOMEM; pr_warn("failed to allocate events: out of memory\n"); goto error; } pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); if (!pb->cpu_bufs) { err = -ENOMEM; pr_warn("failed to allocate buffers: out of memory\n"); goto error; } err = parse_cpu_mask_file(online_cpus_file, &online, &n); if (err) { pr_warn("failed to get online CPU mask: %d\n", err); goto error; } for (i = 0, j = 0; i < pb->cpu_cnt; i++) { struct perf_cpu_buf *cpu_buf; int cpu, map_key; cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; /* in case user didn't explicitly requested particular CPUs to * be attached to, skip offline/not present CPUs */ if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) continue; cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); if (IS_ERR(cpu_buf)) { err = PTR_ERR(cpu_buf); goto error; } pb->cpu_bufs[j] = cpu_buf; err = bpf_map_update_elem(pb->map_fd, &map_key, &cpu_buf->fd, 0); if (err) { err = -errno; pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", cpu, map_key, cpu_buf->fd, libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } pb->events[j].events = EPOLLIN; pb->events[j].data.ptr = cpu_buf; if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, &pb->events[j]) < 0) { err = -errno; pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", cpu, cpu_buf->fd, libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } j++; } pb->cpu_cnt = j; free(online); return pb; error: free(online); if (pb) perf_buffer__free(pb); return ERR_PTR(err); } struct perf_sample_raw { struct perf_event_header header; uint32_t size; char data[]; }; struct perf_sample_lost { struct perf_event_header header; uint64_t id; uint64_t lost; uint64_t sample_id; }; static enum bpf_perf_event_ret perf_buffer__process_record(struct perf_event_header *e, void *ctx) { struct perf_cpu_buf *cpu_buf = ctx; struct perf_buffer *pb = cpu_buf->pb; void *data = e; /* user wants full control over parsing perf event */ if (pb->event_cb) return pb->event_cb(pb->ctx, cpu_buf->cpu, e); switch (e->type) { case PERF_RECORD_SAMPLE: { struct perf_sample_raw *s = data; if (pb->sample_cb) pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); break; } case PERF_RECORD_LOST: { struct perf_sample_lost *s = data; if (pb->lost_cb) pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); break; } default: pr_warn("unknown perf sample type %d\n", e->type); return LIBBPF_PERF_EVENT_ERROR; } return LIBBPF_PERF_EVENT_CONT; } static int perf_buffer__process_records(struct perf_buffer *pb, struct perf_cpu_buf *cpu_buf) { enum bpf_perf_event_ret ret; ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size, pb->page_size, &cpu_buf->buf, &cpu_buf->buf_size, perf_buffer__process_record, cpu_buf); if (ret != LIBBPF_PERF_EVENT_CONT) return ret; return 0; } int perf_buffer__epoll_fd(const struct perf_buffer *pb) { return pb->epoll_fd; } int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) { int i, cnt, err; cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); if (cnt < 0) return -errno; for (i = 0; i < cnt; i++) { struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; err = perf_buffer__process_records(pb, cpu_buf); if (err) { pr_warn("error while processing records: %d\n", err); return libbpf_err(err); } } return cnt; } /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer * manager. */ size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) { return pb->cpu_cnt; } /* * Return perf_event FD of a ring buffer in *buf_idx* slot of * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using * select()/poll()/epoll() Linux syscalls. */ int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) { struct perf_cpu_buf *cpu_buf; if (buf_idx >= pb->cpu_cnt) return libbpf_err(-EINVAL); cpu_buf = pb->cpu_bufs[buf_idx]; if (!cpu_buf) return libbpf_err(-ENOENT); return cpu_buf->fd; } int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size) { struct perf_cpu_buf *cpu_buf; if (buf_idx >= pb->cpu_cnt) return libbpf_err(-EINVAL); cpu_buf = pb->cpu_bufs[buf_idx]; if (!cpu_buf) return libbpf_err(-ENOENT); *buf = cpu_buf->base; *buf_size = pb->mmap_size; return 0; } /* * Consume data from perf ring buffer corresponding to slot *buf_idx* in * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to * consume, do nothing and return success. * Returns: * - 0 on success; * - <0 on failure. */ int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) { struct perf_cpu_buf *cpu_buf; if (buf_idx >= pb->cpu_cnt) return libbpf_err(-EINVAL); cpu_buf = pb->cpu_bufs[buf_idx]; if (!cpu_buf) return libbpf_err(-ENOENT); return perf_buffer__process_records(pb, cpu_buf); } int perf_buffer__consume(struct perf_buffer *pb) { int i, err; for (i = 0; i < pb->cpu_cnt; i++) { struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; if (!cpu_buf) continue; err = perf_buffer__process_records(pb, cpu_buf); if (err) { pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); return libbpf_err(err); } } return 0; } int bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, const char *attach_func_name) { int btf_obj_fd = 0, btf_id = 0, err; if (!prog || attach_prog_fd < 0) return libbpf_err(-EINVAL); if (prog->obj->loaded) return libbpf_err(-EINVAL); if (attach_prog_fd && !attach_func_name) { /* remember attach_prog_fd and let bpf_program__load() find * BTF ID during the program load */ prog->attach_prog_fd = attach_prog_fd; return 0; } if (attach_prog_fd) { btf_id = libbpf_find_prog_btf_id(attach_func_name, attach_prog_fd); if (btf_id < 0) return libbpf_err(btf_id); } else { if (!attach_func_name) return libbpf_err(-EINVAL); /* load btf_vmlinux, if not yet */ err = bpf_object__load_vmlinux_btf(prog->obj, true); if (err) return libbpf_err(err); err = find_kernel_btf_id(prog->obj, attach_func_name, prog->expected_attach_type, &btf_obj_fd, &btf_id); if (err) return libbpf_err(err); } prog->attach_btf_id = btf_id; prog->attach_btf_obj_fd = btf_obj_fd; prog->attach_prog_fd = attach_prog_fd; return 0; } int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) { int err = 0, n, len, start, end = -1; bool *tmp; *mask = NULL; *mask_sz = 0; /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ while (*s) { if (*s == ',' || *s == '\n') { s++; continue; } n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); if (n <= 0 || n > 2) { pr_warn("Failed to get CPU range %s: %d\n", s, n); err = -EINVAL; goto cleanup; } else if (n == 1) { end = start; } if (start < 0 || start > end) { pr_warn("Invalid CPU range [%d,%d] in %s\n", start, end, s); err = -EINVAL; goto cleanup; } tmp = realloc(*mask, end + 1); if (!tmp) { err = -ENOMEM; goto cleanup; } *mask = tmp; memset(tmp + *mask_sz, 0, start - *mask_sz); memset(tmp + start, 1, end - start + 1); *mask_sz = end + 1; s += len; } if (!*mask_sz) { pr_warn("Empty CPU range\n"); return -EINVAL; } return 0; cleanup: free(*mask); *mask = NULL; return err; } int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) { int fd, err = 0, len; char buf[128]; fd = open(fcpu, O_RDONLY | O_CLOEXEC); if (fd < 0) { err = -errno; pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err); return err; } len = read(fd, buf, sizeof(buf)); close(fd); if (len <= 0) { err = len ? -errno : -EINVAL; pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err); return err; } if (len >= sizeof(buf)) { pr_warn("CPU mask is too big in file %s\n", fcpu); return -E2BIG; } buf[len] = '\0'; return parse_cpu_mask_str(buf, mask, mask_sz); } int libbpf_num_possible_cpus(void) { static const char *fcpu = "/sys/devices/system/cpu/possible"; static int cpus; int err, n, i, tmp_cpus; bool *mask; tmp_cpus = READ_ONCE(cpus); if (tmp_cpus > 0) return tmp_cpus; err = parse_cpu_mask_file(fcpu, &mask, &n); if (err) return libbpf_err(err); tmp_cpus = 0; for (i = 0; i < n; i++) { if (mask[i]) tmp_cpus++; } free(mask); WRITE_ONCE(cpus, tmp_cpus); return tmp_cpus; } static int populate_skeleton_maps(const struct bpf_object *obj, struct bpf_map_skeleton *maps, size_t map_cnt, size_t map_skel_sz) { int i; for (i = 0; i < map_cnt; i++) { struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz; struct bpf_map **map = map_skel->map; const char *name = map_skel->name; void **mmaped = map_skel->mmaped; *map = bpf_object__find_map_by_name(obj, name); if (!*map) { pr_warn("failed to find skeleton map '%s'\n", name); return -ESRCH; } /* externs shouldn't be pre-setup from user code */ if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) *mmaped = (*map)->mmaped; } return 0; } static int populate_skeleton_progs(const struct bpf_object *obj, struct bpf_prog_skeleton *progs, size_t prog_cnt, size_t prog_skel_sz) { int i; for (i = 0; i < prog_cnt; i++) { struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz; struct bpf_program **prog = prog_skel->prog; const char *name = prog_skel->name; *prog = bpf_object__find_program_by_name(obj, name); if (!*prog) { pr_warn("failed to find skeleton program '%s'\n", name); return -ESRCH; } } return 0; } int bpf_object__open_skeleton(struct bpf_object_skeleton *s, const struct bpf_object_open_opts *opts) { struct bpf_object *obj; int err; obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err); return libbpf_err(err); } *s->obj = obj; err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz); if (err) { pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err); return libbpf_err(err); } err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz); if (err) { pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err); return libbpf_err(err); } return 0; } int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) { int err, len, var_idx, i; const char *var_name; const struct bpf_map *map; struct btf *btf; __u32 map_type_id; const struct btf_type *map_type, *var_type; const struct bpf_var_skeleton *var_skel; struct btf_var_secinfo *var; if (!s->obj) return libbpf_err(-EINVAL); btf = bpf_object__btf(s->obj); if (!btf) { pr_warn("subskeletons require BTF at runtime (object %s)\n", bpf_object__name(s->obj)); return libbpf_err(-errno); } err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz); if (err) { pr_warn("failed to populate subskeleton maps: %d\n", err); return libbpf_err(err); } err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz); if (err) { pr_warn("failed to populate subskeleton maps: %d\n", err); return libbpf_err(err); } for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { var_skel = (void *)s->vars + var_idx * s->var_skel_sz; map = *var_skel->map; map_type_id = bpf_map__btf_value_type_id(map); map_type = btf__type_by_id(btf, map_type_id); if (!btf_is_datasec(map_type)) { pr_warn("type for map '%1$s' is not a datasec: %2$s\n", bpf_map__name(map), __btf_kind_str(btf_kind(map_type))); return libbpf_err(-EINVAL); } len = btf_vlen(map_type); var = btf_var_secinfos(map_type); for (i = 0; i < len; i++, var++) { var_type = btf__type_by_id(btf, var->type); var_name = btf__name_by_offset(btf, var_type->name_off); if (strcmp(var_name, var_skel->name) == 0) { *var_skel->addr = map->mmaped + var->offset; break; } } } return 0; } void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s) { if (!s) return; free(s->maps); free(s->progs); free(s->vars); free(s); } int bpf_object__load_skeleton(struct bpf_object_skeleton *s) { int i, err; err = bpf_object__load(*s->obj); if (err) { pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); return libbpf_err(err); } for (i = 0; i < s->map_cnt; i++) { struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; struct bpf_map *map = *map_skel->map; if (!map_skel->mmaped) continue; *map_skel->mmaped = map->mmaped; } return 0; } int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) { int i, err; for (i = 0; i < s->prog_cnt; i++) { struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; struct bpf_program *prog = *prog_skel->prog; struct bpf_link **link = prog_skel->link; if (!prog->autoload || !prog->autoattach) continue; /* auto-attaching not supported for this program */ if (!prog->sec_def || !prog->sec_def->prog_attach_fn) continue; /* if user already set the link manually, don't attempt auto-attach */ if (*link) continue; err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link); if (err) { pr_warn("prog '%s': failed to auto-attach: %d\n", bpf_program__name(prog), err); return libbpf_err(err); } /* It's possible that for some SEC() definitions auto-attach * is supported in some cases (e.g., if definition completely * specifies target information), but is not in other cases. * SEC("uprobe") is one such case. If user specified target * binary and function name, such BPF program can be * auto-attached. But if not, it shouldn't trigger skeleton's * attach to fail. It should just be skipped. * attach_fn signals such case with returning 0 (no error) and * setting link to NULL. */ } for (i = 0; i < s->map_cnt; i++) { struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; struct bpf_map *map = *map_skel->map; struct bpf_link **link; if (!map->autocreate || !map->autoattach) continue; /* only struct_ops maps can be attached */ if (!bpf_map__is_struct_ops(map)) continue; /* skeleton is created with earlier version of bpftool, notify user */ if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) { pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n", bpf_map__name(map)); continue; } link = map_skel->link; if (*link) continue; *link = bpf_map__attach_struct_ops(map); if (!*link) { err = -errno; pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err); return libbpf_err(err); } } return 0; } void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) { int i; for (i = 0; i < s->prog_cnt; i++) { struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; struct bpf_link **link = prog_skel->link; bpf_link__destroy(*link); *link = NULL; } if (s->map_skel_sz < sizeof(struct bpf_map_skeleton)) return; for (i = 0; i < s->map_cnt; i++) { struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; struct bpf_link **link = map_skel->link; if (link) { bpf_link__destroy(*link); *link = NULL; } } } void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) { if (!s) return; bpf_object__detach_skeleton(s); if (s->obj) bpf_object__close(*s->obj); free(s->maps); free(s->progs); free(s); } xdp-tools-1.5.4/lib/libbpf/src/bpf_tracing.h0000644000175100001660000011320214706536574020257 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __BPF_TRACING_H__ #define __BPF_TRACING_H__ #include "bpf_helpers.h" /* Scan the ARCH passed in from ARCH env variable (see Makefile) */ #if defined(__TARGET_ARCH_x86) #define bpf_target_x86 #define bpf_target_defined #elif defined(__TARGET_ARCH_s390) #define bpf_target_s390 #define bpf_target_defined #elif defined(__TARGET_ARCH_arm) #define bpf_target_arm #define bpf_target_defined #elif defined(__TARGET_ARCH_arm64) #define bpf_target_arm64 #define bpf_target_defined #elif defined(__TARGET_ARCH_mips) #define bpf_target_mips #define bpf_target_defined #elif defined(__TARGET_ARCH_powerpc) #define bpf_target_powerpc #define bpf_target_defined #elif defined(__TARGET_ARCH_sparc) #define bpf_target_sparc #define bpf_target_defined #elif defined(__TARGET_ARCH_riscv) #define bpf_target_riscv #define bpf_target_defined #elif defined(__TARGET_ARCH_arc) #define bpf_target_arc #define bpf_target_defined #elif defined(__TARGET_ARCH_loongarch) #define bpf_target_loongarch #define bpf_target_defined #else /* Fall back to what the compiler says */ #if defined(__x86_64__) #define bpf_target_x86 #define bpf_target_defined #elif defined(__s390__) #define bpf_target_s390 #define bpf_target_defined #elif defined(__arm__) #define bpf_target_arm #define bpf_target_defined #elif defined(__aarch64__) #define bpf_target_arm64 #define bpf_target_defined #elif defined(__mips__) #define bpf_target_mips #define bpf_target_defined #elif defined(__powerpc__) #define bpf_target_powerpc #define bpf_target_defined #elif defined(__sparc__) #define bpf_target_sparc #define bpf_target_defined #elif defined(__riscv) && __riscv_xlen == 64 #define bpf_target_riscv #define bpf_target_defined #elif defined(__arc__) #define bpf_target_arc #define bpf_target_defined #elif defined(__loongarch__) #define bpf_target_loongarch #define bpf_target_defined #endif /* no compiler target */ #endif #ifndef __BPF_TARGET_MISSING #define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\"" #endif #if defined(bpf_target_x86) /* * https://en.wikipedia.org/wiki/X86_calling_conventions#System_V_AMD64_ABI */ #if defined(__KERNEL__) || defined(__VMLINUX_H__) #define __PT_PARM1_REG di #define __PT_PARM2_REG si #define __PT_PARM3_REG dx #define __PT_PARM4_REG cx #define __PT_PARM5_REG r8 #define __PT_PARM6_REG r9 /* * Syscall uses r10 for PARM4. See arch/x86/entry/entry_64.S:entry_SYSCALL_64 * comments in Linux sources. And refer to syscall(2) manpage. */ #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG r10 #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG #define __PT_RET_REG sp #define __PT_FP_REG bp #define __PT_RC_REG ax #define __PT_SP_REG sp #define __PT_IP_REG ip #else #ifdef __i386__ /* i386 kernel is built with -mregparm=3 */ #define __PT_PARM1_REG eax #define __PT_PARM2_REG edx #define __PT_PARM3_REG ecx /* i386 syscall ABI is very different, refer to syscall(2) manpage */ #define __PT_PARM1_SYSCALL_REG ebx #define __PT_PARM2_SYSCALL_REG ecx #define __PT_PARM3_SYSCALL_REG edx #define __PT_PARM4_SYSCALL_REG esi #define __PT_PARM5_SYSCALL_REG edi #define __PT_PARM6_SYSCALL_REG ebp #define __PT_RET_REG esp #define __PT_FP_REG ebp #define __PT_RC_REG eax #define __PT_SP_REG esp #define __PT_IP_REG eip #else /* __i386__ */ #define __PT_PARM1_REG rdi #define __PT_PARM2_REG rsi #define __PT_PARM3_REG rdx #define __PT_PARM4_REG rcx #define __PT_PARM5_REG r8 #define __PT_PARM6_REG r9 #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG r10 #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG #define __PT_RET_REG rsp #define __PT_FP_REG rbp #define __PT_RC_REG rax #define __PT_SP_REG rsp #define __PT_IP_REG rip #endif /* __i386__ */ #endif /* __KERNEL__ || __VMLINUX_H__ */ #elif defined(bpf_target_s390) /* * https://github.com/IBM/s390x-abi/releases/download/v1.6/lzsabi_s390x.pdf */ struct pt_regs___s390 { unsigned long orig_gpr2; } __attribute__((preserve_access_index)); /* s390 provides user_pt_regs instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const user_pt_regs *)(x)) #define __PT_PARM1_REG gprs[2] #define __PT_PARM2_REG gprs[3] #define __PT_PARM3_REG gprs[4] #define __PT_PARM4_REG gprs[5] #define __PT_PARM5_REG gprs[6] #define __PT_PARM1_SYSCALL_REG orig_gpr2 #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG gprs[7] #define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___s390 *)(x))->__PT_PARM1_SYSCALL_REG) #define PT_REGS_PARM1_CORE_SYSCALL(x) \ BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG) #define __PT_RET_REG gprs[14] #define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */ #define __PT_RC_REG gprs[2] #define __PT_SP_REG gprs[15] #define __PT_IP_REG psw.addr #elif defined(bpf_target_arm) /* * https://github.com/ARM-software/abi-aa/blob/main/aapcs32/aapcs32.rst#machine-registers */ #define __PT_PARM1_REG uregs[0] #define __PT_PARM2_REG uregs[1] #define __PT_PARM3_REG uregs[2] #define __PT_PARM4_REG uregs[3] #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG uregs[4] #define __PT_PARM6_SYSCALL_REG uregs[5] #define __PT_PARM7_SYSCALL_REG uregs[6] #define __PT_RET_REG uregs[14] #define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */ #define __PT_RC_REG uregs[0] #define __PT_SP_REG uregs[13] #define __PT_IP_REG uregs[12] #elif defined(bpf_target_arm64) /* * https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#machine-registers */ struct pt_regs___arm64 { unsigned long orig_x0; } __attribute__((preserve_access_index)); /* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x)) #define __PT_PARM1_REG regs[0] #define __PT_PARM2_REG regs[1] #define __PT_PARM3_REG regs[2] #define __PT_PARM4_REG regs[3] #define __PT_PARM5_REG regs[4] #define __PT_PARM6_REG regs[5] #define __PT_PARM7_REG regs[6] #define __PT_PARM8_REG regs[7] #define __PT_PARM1_SYSCALL_REG orig_x0 #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG #define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___arm64 *)(x))->__PT_PARM1_SYSCALL_REG) #define PT_REGS_PARM1_CORE_SYSCALL(x) \ BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG) #define __PT_RET_REG regs[30] #define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */ #define __PT_RC_REG regs[0] #define __PT_SP_REG sp #define __PT_IP_REG pc #elif defined(bpf_target_mips) /* * N64 ABI is assumed right now. * https://en.wikipedia.org/wiki/MIPS_architecture#Calling_conventions */ #define __PT_PARM1_REG regs[4] #define __PT_PARM2_REG regs[5] #define __PT_PARM3_REG regs[6] #define __PT_PARM4_REG regs[7] #define __PT_PARM5_REG regs[8] #define __PT_PARM6_REG regs[9] #define __PT_PARM7_REG regs[10] #define __PT_PARM8_REG regs[11] #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG /* only N32/N64 */ #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG /* only N32/N64 */ #define __PT_RET_REG regs[31] #define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */ #define __PT_RC_REG regs[2] #define __PT_SP_REG regs[29] #define __PT_IP_REG cp0_epc #elif defined(bpf_target_powerpc) /* * http://refspecs.linux-foundation.org/elf/elfspec_ppc.pdf (page 3-14, * section "Function Calling Sequence") */ #define __PT_PARM1_REG gpr[3] #define __PT_PARM2_REG gpr[4] #define __PT_PARM3_REG gpr[5] #define __PT_PARM4_REG gpr[6] #define __PT_PARM5_REG gpr[7] #define __PT_PARM6_REG gpr[8] #define __PT_PARM7_REG gpr[9] #define __PT_PARM8_REG gpr[10] /* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */ #define PT_REGS_SYSCALL_REGS(ctx) ctx #define __PT_PARM1_SYSCALL_REG orig_gpr3 #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG #if !defined(__arch64__) #define __PT_PARM7_SYSCALL_REG __PT_PARM7_REG /* only powerpc (not powerpc64) */ #endif #define __PT_RET_REG regs[31] #define __PT_FP_REG __unsupported__ #define __PT_RC_REG gpr[3] #define __PT_SP_REG sp #define __PT_IP_REG nip #elif defined(bpf_target_sparc) /* * https://en.wikipedia.org/wiki/Calling_convention#SPARC */ #define __PT_PARM1_REG u_regs[UREG_I0] #define __PT_PARM2_REG u_regs[UREG_I1] #define __PT_PARM3_REG u_regs[UREG_I2] #define __PT_PARM4_REG u_regs[UREG_I3] #define __PT_PARM5_REG u_regs[UREG_I4] #define __PT_PARM6_REG u_regs[UREG_I5] #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG #define __PT_RET_REG u_regs[UREG_I7] #define __PT_FP_REG __unsupported__ #define __PT_RC_REG u_regs[UREG_I0] #define __PT_SP_REG u_regs[UREG_FP] /* Should this also be a bpf_target check for the sparc case? */ #if defined(__arch64__) #define __PT_IP_REG tpc #else #define __PT_IP_REG pc #endif #elif defined(bpf_target_riscv) /* * https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions */ struct pt_regs___riscv { unsigned long orig_a0; } __attribute__((preserve_access_index)); /* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) #define __PT_PARM1_REG a0 #define __PT_PARM2_REG a1 #define __PT_PARM3_REG a2 #define __PT_PARM4_REG a3 #define __PT_PARM5_REG a4 #define __PT_PARM6_REG a5 #define __PT_PARM7_REG a6 #define __PT_PARM8_REG a7 #define __PT_PARM1_SYSCALL_REG orig_a0 #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG #define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___riscv *)(x))->__PT_PARM1_SYSCALL_REG) #define PT_REGS_PARM1_CORE_SYSCALL(x) \ BPF_CORE_READ((const struct pt_regs___riscv *)(x), __PT_PARM1_SYSCALL_REG) #define __PT_RET_REG ra #define __PT_FP_REG s0 #define __PT_RC_REG a0 #define __PT_SP_REG sp #define __PT_IP_REG pc #elif defined(bpf_target_arc) /* * Section "Function Calling Sequence" (page 24): * https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf */ /* arc provides struct user_regs_struct instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) #define __PT_PARM1_REG scratch.r0 #define __PT_PARM2_REG scratch.r1 #define __PT_PARM3_REG scratch.r2 #define __PT_PARM4_REG scratch.r3 #define __PT_PARM5_REG scratch.r4 #define __PT_PARM6_REG scratch.r5 #define __PT_PARM7_REG scratch.r6 #define __PT_PARM8_REG scratch.r7 /* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */ #define PT_REGS_SYSCALL_REGS(ctx) ctx #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG #define __PT_RET_REG scratch.blink #define __PT_FP_REG scratch.fp #define __PT_RC_REG scratch.r0 #define __PT_SP_REG scratch.sp #define __PT_IP_REG scratch.ret #elif defined(bpf_target_loongarch) /* * https://docs.kernel.org/loongarch/introduction.html * https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html */ /* loongarch provides struct user_pt_regs instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x)) #define __PT_PARM1_REG regs[4] #define __PT_PARM2_REG regs[5] #define __PT_PARM3_REG regs[6] #define __PT_PARM4_REG regs[7] #define __PT_PARM5_REG regs[8] #define __PT_PARM6_REG regs[9] #define __PT_PARM7_REG regs[10] #define __PT_PARM8_REG regs[11] /* loongarch does not select ARCH_HAS_SYSCALL_WRAPPER. */ #define PT_REGS_SYSCALL_REGS(ctx) ctx #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG #define __PT_RET_REG regs[1] #define __PT_FP_REG regs[22] #define __PT_RC_REG regs[4] #define __PT_SP_REG regs[3] #define __PT_IP_REG csr_era #endif #if defined(bpf_target_defined) struct pt_regs; /* allow some architectures to override `struct pt_regs` */ #ifndef __PT_REGS_CAST #define __PT_REGS_CAST(x) (x) #endif /* * Different architectures support different number of arguments passed * through registers. i386 supports just 3, some arches support up to 8. */ #ifndef __PT_PARM4_REG #define __PT_PARM4_REG __unsupported__ #endif #ifndef __PT_PARM5_REG #define __PT_PARM5_REG __unsupported__ #endif #ifndef __PT_PARM6_REG #define __PT_PARM6_REG __unsupported__ #endif #ifndef __PT_PARM7_REG #define __PT_PARM7_REG __unsupported__ #endif #ifndef __PT_PARM8_REG #define __PT_PARM8_REG __unsupported__ #endif /* * Similarly, syscall-specific conventions might differ between function call * conventions within each architecture. All supported architectures pass * either 6 or 7 syscall arguments in registers. * * See syscall(2) manpage for succinct table with information on each arch. */ #ifndef __PT_PARM7_SYSCALL_REG #define __PT_PARM7_SYSCALL_REG __unsupported__ #endif #define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG) #define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG) #define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG) #define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG) #define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG) #define PT_REGS_PARM6(x) (__PT_REGS_CAST(x)->__PT_PARM6_REG) #define PT_REGS_PARM7(x) (__PT_REGS_CAST(x)->__PT_PARM7_REG) #define PT_REGS_PARM8(x) (__PT_REGS_CAST(x)->__PT_PARM8_REG) #define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG) #define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG) #define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG) #define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG) #define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG) #define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG) #define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG) #define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG) #define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG) #define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG) #define PT_REGS_PARM6_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_REG) #define PT_REGS_PARM7_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_REG) #define PT_REGS_PARM8_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM8_REG) #define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG) #define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG) #define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG) #define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG) #define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG) #if defined(bpf_target_powerpc) #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP #elif defined(bpf_target_sparc) || defined(bpf_target_arm64) #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP #else #define BPF_KPROBE_READ_RET_IP(ip, ctx) \ ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \ ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) #endif #ifndef PT_REGS_PARM1_SYSCALL #define PT_REGS_PARM1_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM1_SYSCALL_REG) #define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_SYSCALL_REG) #endif #ifndef PT_REGS_PARM2_SYSCALL #define PT_REGS_PARM2_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM2_SYSCALL_REG) #define PT_REGS_PARM2_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_SYSCALL_REG) #endif #ifndef PT_REGS_PARM3_SYSCALL #define PT_REGS_PARM3_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM3_SYSCALL_REG) #define PT_REGS_PARM3_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_SYSCALL_REG) #endif #ifndef PT_REGS_PARM4_SYSCALL #define PT_REGS_PARM4_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM4_SYSCALL_REG) #define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_SYSCALL_REG) #endif #ifndef PT_REGS_PARM5_SYSCALL #define PT_REGS_PARM5_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM5_SYSCALL_REG) #define PT_REGS_PARM5_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_SYSCALL_REG) #endif #ifndef PT_REGS_PARM6_SYSCALL #define PT_REGS_PARM6_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM6_SYSCALL_REG) #define PT_REGS_PARM6_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_SYSCALL_REG) #endif #ifndef PT_REGS_PARM7_SYSCALL #define PT_REGS_PARM7_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM7_SYSCALL_REG) #define PT_REGS_PARM7_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_SYSCALL_REG) #endif #else /* defined(bpf_target_defined) */ #define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM6(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM7(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM8(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM6_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM7_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM8_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM6_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM7_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM6_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define PT_REGS_PARM7_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #endif /* defined(bpf_target_defined) */ /* * When invoked from a syscall handler kprobe, returns a pointer to a * struct pt_regs containing syscall arguments and suitable for passing to * PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL(). */ #ifndef PT_REGS_SYSCALL_REGS /* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */ #define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx)) #endif #ifndef ___bpf_concat #define ___bpf_concat(a, b) a ## b #endif #ifndef ___bpf_apply #define ___bpf_apply(fn, n) ___bpf_concat(fn, n) #endif #ifndef ___bpf_nth #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N #endif #ifndef ___bpf_narg #define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) #endif #define ___bpf_ctx_cast0() ctx #define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), ctx[0] #define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), ctx[1] #define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), ctx[2] #define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), ctx[3] #define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), ctx[4] #define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), ctx[5] #define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), ctx[6] #define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), ctx[7] #define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), ctx[8] #define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), ctx[9] #define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), ctx[10] #define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), ctx[11] #define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) /* * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and * similar kinds of BPF programs, that accept input arguments as a single * pointer to untyped u64 array, where each u64 can actually be a typed * pointer or integer of different size. Instead of requiring user to write * manual casts and work with array elements by index, BPF_PROG macro * allows user to declare a list of named and typed input arguments in the * same syntax as for normal C function. All the casting is hidden and * performed transparently, while user code can just assume working with * function arguments of specified type and name. * * Original raw context argument is preserved as well as 'ctx' argument. * This is useful when using BPF helpers that expect original context * as one of the parameters (e.g., for bpf_perf_event_output()). */ #define BPF_PROG(name, args...) \ name(unsigned long long *ctx); \ static __always_inline typeof(name(0)) \ ____##name(unsigned long long *ctx, ##args); \ typeof(name(0)) name(unsigned long long *ctx) \ { \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ return ____##name(___bpf_ctx_cast(args)); \ _Pragma("GCC diagnostic pop") \ } \ static __always_inline typeof(name(0)) \ ____##name(unsigned long long *ctx, ##args) #ifndef ___bpf_nth2 #define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, \ _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N #endif #ifndef ___bpf_narg2 #define ___bpf_narg2(...) \ ___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, \ 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0) #endif #define ___bpf_treg_cnt(t) \ __builtin_choose_expr(sizeof(t) == 1, 1, \ __builtin_choose_expr(sizeof(t) == 2, 1, \ __builtin_choose_expr(sizeof(t) == 4, 1, \ __builtin_choose_expr(sizeof(t) == 8, 1, \ __builtin_choose_expr(sizeof(t) == 16, 2, \ (void)0))))) #define ___bpf_reg_cnt0() (0) #define ___bpf_reg_cnt1(t, x) (___bpf_reg_cnt0() + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt2(t, x, args...) (___bpf_reg_cnt1(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt3(t, x, args...) (___bpf_reg_cnt2(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt4(t, x, args...) (___bpf_reg_cnt3(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt5(t, x, args...) (___bpf_reg_cnt4(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt6(t, x, args...) (___bpf_reg_cnt5(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt7(t, x, args...) (___bpf_reg_cnt6(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt8(t, x, args...) (___bpf_reg_cnt7(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt9(t, x, args...) (___bpf_reg_cnt8(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt10(t, x, args...) (___bpf_reg_cnt9(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt11(t, x, args...) (___bpf_reg_cnt10(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt12(t, x, args...) (___bpf_reg_cnt11(args) + ___bpf_treg_cnt(t)) #define ___bpf_reg_cnt(args...) ___bpf_apply(___bpf_reg_cnt, ___bpf_narg2(args))(args) #define ___bpf_union_arg(t, x, n) \ __builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 z[1]; t x; } ___t = { .z = {ctx[n]}}; ___t.x; }), \ __builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \ __builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \ __builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 z[1]; t x; } ___t = {.z = {ctx[n]} }; ___t.x; }), \ __builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 z[2]; t x; } ___t = {.z = {ctx[n], ctx[n + 1]} }; ___t.x; }), \ (void)0))))) #define ___bpf_ctx_arg0(n, args...) #define ___bpf_ctx_arg1(n, t, x) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt1(t, x)) #define ___bpf_ctx_arg2(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt2(t, x, args)) ___bpf_ctx_arg1(n, args) #define ___bpf_ctx_arg3(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt3(t, x, args)) ___bpf_ctx_arg2(n, args) #define ___bpf_ctx_arg4(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt4(t, x, args)) ___bpf_ctx_arg3(n, args) #define ___bpf_ctx_arg5(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt5(t, x, args)) ___bpf_ctx_arg4(n, args) #define ___bpf_ctx_arg6(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt6(t, x, args)) ___bpf_ctx_arg5(n, args) #define ___bpf_ctx_arg7(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt7(t, x, args)) ___bpf_ctx_arg6(n, args) #define ___bpf_ctx_arg8(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt8(t, x, args)) ___bpf_ctx_arg7(n, args) #define ___bpf_ctx_arg9(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt9(t, x, args)) ___bpf_ctx_arg8(n, args) #define ___bpf_ctx_arg10(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt10(t, x, args)) ___bpf_ctx_arg9(n, args) #define ___bpf_ctx_arg11(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt11(t, x, args)) ___bpf_ctx_arg10(n, args) #define ___bpf_ctx_arg12(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt12(t, x, args)) ___bpf_ctx_arg11(n, args) #define ___bpf_ctx_arg(args...) ___bpf_apply(___bpf_ctx_arg, ___bpf_narg2(args))(___bpf_reg_cnt(args), args) #define ___bpf_ctx_decl0() #define ___bpf_ctx_decl1(t, x) , t x #define ___bpf_ctx_decl2(t, x, args...) , t x ___bpf_ctx_decl1(args) #define ___bpf_ctx_decl3(t, x, args...) , t x ___bpf_ctx_decl2(args) #define ___bpf_ctx_decl4(t, x, args...) , t x ___bpf_ctx_decl3(args) #define ___bpf_ctx_decl5(t, x, args...) , t x ___bpf_ctx_decl4(args) #define ___bpf_ctx_decl6(t, x, args...) , t x ___bpf_ctx_decl5(args) #define ___bpf_ctx_decl7(t, x, args...) , t x ___bpf_ctx_decl6(args) #define ___bpf_ctx_decl8(t, x, args...) , t x ___bpf_ctx_decl7(args) #define ___bpf_ctx_decl9(t, x, args...) , t x ___bpf_ctx_decl8(args) #define ___bpf_ctx_decl10(t, x, args...) , t x ___bpf_ctx_decl9(args) #define ___bpf_ctx_decl11(t, x, args...) , t x ___bpf_ctx_decl10(args) #define ___bpf_ctx_decl12(t, x, args...) , t x ___bpf_ctx_decl11(args) #define ___bpf_ctx_decl(args...) ___bpf_apply(___bpf_ctx_decl, ___bpf_narg2(args))(args) /* * BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct * arguments. Since each struct argument might take one or two u64 values * in the trampoline stack, argument type size is needed to place proper number * of u64 values for each argument. Therefore, BPF_PROG2 has different * syntax from BPF_PROG. For example, for the following BPF_PROG syntax: * * int BPF_PROG(test2, int a, int b) { ... } * * the corresponding BPF_PROG2 syntax is: * * int BPF_PROG2(test2, int, a, int, b) { ... } * * where type and the corresponding argument name are separated by comma. * * Use BPF_PROG2 macro if one of the arguments might be a struct/union larger * than 8 bytes: * * int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b, * int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret) * { * // access a, b, c, d, e, and ret directly * ... * } */ #define BPF_PROG2(name, args...) \ name(unsigned long long *ctx); \ static __always_inline typeof(name(0)) \ ____##name(unsigned long long *ctx ___bpf_ctx_decl(args)); \ typeof(name(0)) name(unsigned long long *ctx) \ { \ return ____##name(ctx ___bpf_ctx_arg(args)); \ } \ static __always_inline typeof(name(0)) \ ____##name(unsigned long long *ctx ___bpf_ctx_decl(args)) struct pt_regs; #define ___bpf_kprobe_args0() ctx #define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (unsigned long long)PT_REGS_PARM1(ctx) #define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (unsigned long long)PT_REGS_PARM2(ctx) #define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (unsigned long long)PT_REGS_PARM3(ctx) #define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (unsigned long long)PT_REGS_PARM4(ctx) #define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (unsigned long long)PT_REGS_PARM5(ctx) #define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (unsigned long long)PT_REGS_PARM6(ctx) #define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (unsigned long long)PT_REGS_PARM7(ctx) #define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (unsigned long long)PT_REGS_PARM8(ctx) #define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) /* * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific * low-level way of getting kprobe input arguments from struct pt_regs, and * provides a familiar typed and named function arguments syntax and * semantics of accessing kprobe input parameters. * * Original struct pt_regs* context is preserved as 'ctx' argument. This might * be necessary when using BPF helpers like bpf_perf_event_output(). */ #define BPF_KPROBE(name, args...) \ name(struct pt_regs *ctx); \ static __always_inline typeof(name(0)) \ ____##name(struct pt_regs *ctx, ##args); \ typeof(name(0)) name(struct pt_regs *ctx) \ { \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ return ____##name(___bpf_kprobe_args(args)); \ _Pragma("GCC diagnostic pop") \ } \ static __always_inline typeof(name(0)) \ ____##name(struct pt_regs *ctx, ##args) #define ___bpf_kretprobe_args0() ctx #define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (unsigned long long)PT_REGS_RC(ctx) #define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args) /* * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional * return value (in addition to `struct pt_regs *ctx`), but no input * arguments, because they will be clobbered by the time probed function * returns. */ #define BPF_KRETPROBE(name, args...) \ name(struct pt_regs *ctx); \ static __always_inline typeof(name(0)) \ ____##name(struct pt_regs *ctx, ##args); \ typeof(name(0)) name(struct pt_regs *ctx) \ { \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ return ____##name(___bpf_kretprobe_args(args)); \ _Pragma("GCC diagnostic pop") \ } \ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) /* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */ #define ___bpf_syscall_args0() ctx #define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (unsigned long long)PT_REGS_PARM1_SYSCALL(regs) #define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (unsigned long long)PT_REGS_PARM2_SYSCALL(regs) #define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (unsigned long long)PT_REGS_PARM3_SYSCALL(regs) #define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (unsigned long long)PT_REGS_PARM4_SYSCALL(regs) #define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (unsigned long long)PT_REGS_PARM5_SYSCALL(regs) #define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (unsigned long long)PT_REGS_PARM6_SYSCALL(regs) #define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (unsigned long long)PT_REGS_PARM7_SYSCALL(regs) #define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args) /* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */ #define ___bpf_syswrap_args0() ctx #define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (unsigned long long)PT_REGS_PARM1_CORE_SYSCALL(regs) #define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (unsigned long long)PT_REGS_PARM2_CORE_SYSCALL(regs) #define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (unsigned long long)PT_REGS_PARM3_CORE_SYSCALL(regs) #define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (unsigned long long)PT_REGS_PARM4_CORE_SYSCALL(regs) #define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (unsigned long long)PT_REGS_PARM5_CORE_SYSCALL(regs) #define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (unsigned long long)PT_REGS_PARM6_CORE_SYSCALL(regs) #define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (unsigned long long)PT_REGS_PARM7_CORE_SYSCALL(regs) #define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args) /* * BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for * tracing syscall functions, like __x64_sys_close. It hides the underlying * platform-specific low-level way of getting syscall input arguments from * struct pt_regs, and provides a familiar typed and named function arguments * syntax and semantics of accessing syscall input parameters. * * Original struct pt_regs * context is preserved as 'ctx' argument. This might * be necessary when using BPF helpers like bpf_perf_event_output(). * * At the moment BPF_KSYSCALL does not transparently handle all the calling * convention quirks for the following syscalls: * * - mmap(): __ARCH_WANT_SYS_OLD_MMAP. * - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and * CONFIG_CLONE_BACKWARDS3. * - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL. * - compat syscalls. * * This may or may not change in the future. User needs to take extra measures * to handle such quirks explicitly, if necessary. * * This macro relies on BPF CO-RE support and virtual __kconfig externs. */ #define BPF_KSYSCALL(name, args...) \ name(struct pt_regs *ctx); \ extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig; \ static __always_inline typeof(name(0)) \ ____##name(struct pt_regs *ctx, ##args); \ typeof(name(0)) name(struct pt_regs *ctx) \ { \ struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER \ ? (struct pt_regs *)PT_REGS_PARM1(ctx) \ : ctx; \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ if (LINUX_HAS_SYSCALL_WRAPPER) \ return ____##name(___bpf_syswrap_args(args)); \ else \ return ____##name(___bpf_syscall_args(args)); \ _Pragma("GCC diagnostic pop") \ } \ static __always_inline typeof(name(0)) \ ____##name(struct pt_regs *ctx, ##args) #define BPF_KPROBE_SYSCALL BPF_KSYSCALL /* BPF_UPROBE and BPF_URETPROBE are identical to BPF_KPROBE and BPF_KRETPROBE, * but are named way less confusingly for SEC("uprobe") and SEC("uretprobe") * use cases. */ #define BPF_UPROBE(name, args...) BPF_KPROBE(name, ##args) #define BPF_URETPROBE(name, args...) BPF_KRETPROBE(name, ##args) #endif xdp-tools-1.5.4/lib/libbpf/src/netlink.c0000644000175100001660000005322614706536574017451 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ #include #include #include #include #include #include #include #include #include #include #include #include #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" #include "nlattr.h" #ifndef SOL_NETLINK #define SOL_NETLINK 270 #endif typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t, void *cookie); struct xdp_link_info { __u32 prog_id; __u32 drv_prog_id; __u32 hw_prog_id; __u32 skb_prog_id; __u8 attach_mode; }; struct xdp_id_md { int ifindex; __u32 flags; struct xdp_link_info info; __u64 feature_flags; }; struct xdp_features_md { int ifindex; __u32 xdp_zc_max_segs; __u64 flags; }; static int libbpf_netlink_open(__u32 *nl_pid, int proto) { struct sockaddr_nl sa; socklen_t addrlen; int one = 1, ret; int sock; memset(&sa, 0, sizeof(sa)); sa.nl_family = AF_NETLINK; sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, proto); if (sock < 0) return -errno; if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)) < 0) { pr_warn("Netlink error reporting not supported\n"); } if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { ret = -errno; goto cleanup; } addrlen = sizeof(sa); if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) { ret = -errno; goto cleanup; } if (addrlen != sizeof(sa)) { ret = -LIBBPF_ERRNO__INTERNAL; goto cleanup; } *nl_pid = sa.nl_pid; return sock; cleanup: close(sock); return ret; } static void libbpf_netlink_close(int sock) { close(sock); } enum { NL_CONT, NL_NEXT, NL_DONE, }; static int netlink_recvmsg(int sock, struct msghdr *mhdr, int flags) { int len; do { len = recvmsg(sock, mhdr, flags); } while (len < 0 && (errno == EINTR || errno == EAGAIN)); if (len < 0) return -errno; return len; } static int alloc_iov(struct iovec *iov, int len) { void *nbuf; nbuf = realloc(iov->iov_base, len); if (!nbuf) return -ENOMEM; iov->iov_base = nbuf; iov->iov_len = len; return 0; } static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq, __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn, void *cookie) { struct iovec iov = {}; struct msghdr mhdr = { .msg_iov = &iov, .msg_iovlen = 1, }; bool multipart = true; struct nlmsgerr *err; struct nlmsghdr *nh; int len, ret; ret = alloc_iov(&iov, 4096); if (ret) goto done; while (multipart) { start: multipart = false; len = netlink_recvmsg(sock, &mhdr, MSG_PEEK | MSG_TRUNC); if (len < 0) { ret = len; goto done; } if (len > iov.iov_len) { ret = alloc_iov(&iov, len); if (ret) goto done; } len = netlink_recvmsg(sock, &mhdr, 0); if (len < 0) { ret = len; goto done; } if (len == 0) break; for (nh = (struct nlmsghdr *)iov.iov_base; NLMSG_OK(nh, len); nh = NLMSG_NEXT(nh, len)) { if (nh->nlmsg_pid != nl_pid) { ret = -LIBBPF_ERRNO__WRNGPID; goto done; } if (nh->nlmsg_seq != seq) { ret = -LIBBPF_ERRNO__INVSEQ; goto done; } if (nh->nlmsg_flags & NLM_F_MULTI) multipart = true; switch (nh->nlmsg_type) { case NLMSG_ERROR: err = (struct nlmsgerr *)NLMSG_DATA(nh); if (!err->error) continue; ret = err->error; libbpf_nla_dump_errormsg(nh); goto done; case NLMSG_DONE: ret = 0; goto done; default: break; } if (_fn) { ret = _fn(nh, fn, cookie); switch (ret) { case NL_CONT: break; case NL_NEXT: goto start; case NL_DONE: ret = 0; goto done; default: goto done; } } } } ret = 0; done: free(iov.iov_base); return ret; } static int libbpf_netlink_send_recv(struct libbpf_nla_req *req, int proto, __dump_nlmsg_t parse_msg, libbpf_dump_nlmsg_t parse_attr, void *cookie) { __u32 nl_pid = 0; int sock, ret; sock = libbpf_netlink_open(&nl_pid, proto); if (sock < 0) return sock; req->nh.nlmsg_pid = 0; req->nh.nlmsg_seq = time(NULL); if (send(sock, req, req->nh.nlmsg_len, 0) < 0) { ret = -errno; goto out; } ret = libbpf_netlink_recv(sock, nl_pid, req->nh.nlmsg_seq, parse_msg, parse_attr, cookie); out: libbpf_netlink_close(sock); return ret; } static int parse_genl_family_id(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn, void *cookie) { struct genlmsghdr *gnl = NLMSG_DATA(nh); struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN); struct nlattr *tb[CTRL_ATTR_FAMILY_ID + 1]; __u16 *id = cookie; libbpf_nla_parse(tb, CTRL_ATTR_FAMILY_ID, na, NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL); if (!tb[CTRL_ATTR_FAMILY_ID]) return NL_CONT; *id = libbpf_nla_getattr_u16(tb[CTRL_ATTR_FAMILY_ID]); return NL_DONE; } static int libbpf_netlink_resolve_genl_family_id(const char *name, __u16 len, __u16 *id) { struct libbpf_nla_req req = { .nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN), .nh.nlmsg_type = GENL_ID_CTRL, .nh.nlmsg_flags = NLM_F_REQUEST, .gnl.cmd = CTRL_CMD_GETFAMILY, .gnl.version = 2, }; int err; err = nlattr_add(&req, CTRL_ATTR_FAMILY_NAME, name, len); if (err < 0) return err; return libbpf_netlink_send_recv(&req, NETLINK_GENERIC, parse_genl_family_id, NULL, id); } static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd, __u32 flags) { struct nlattr *nla; int ret; struct libbpf_nla_req req; memset(&req, 0, sizeof(req)); req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; req.nh.nlmsg_type = RTM_SETLINK; req.ifinfo.ifi_family = AF_UNSPEC; req.ifinfo.ifi_index = ifindex; nla = nlattr_begin_nested(&req, IFLA_XDP); if (!nla) return -EMSGSIZE; ret = nlattr_add(&req, IFLA_XDP_FD, &fd, sizeof(fd)); if (ret < 0) return ret; if (flags) { ret = nlattr_add(&req, IFLA_XDP_FLAGS, &flags, sizeof(flags)); if (ret < 0) return ret; } if (flags & XDP_FLAGS_REPLACE) { ret = nlattr_add(&req, IFLA_XDP_EXPECTED_FD, &old_fd, sizeof(old_fd)); if (ret < 0) return ret; } nlattr_end_nested(&req, nla); return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL); } int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts) { int old_prog_fd, err; if (!OPTS_VALID(opts, bpf_xdp_attach_opts)) return libbpf_err(-EINVAL); old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); if (old_prog_fd) flags |= XDP_FLAGS_REPLACE; else old_prog_fd = -1; err = __bpf_set_link_xdp_fd_replace(ifindex, prog_fd, old_prog_fd, flags); return libbpf_err(err); } int bpf_xdp_detach(int ifindex, __u32 flags, const struct bpf_xdp_attach_opts *opts) { return bpf_xdp_attach(ifindex, -1, flags, opts); } static int __dump_link_nlmsg(struct nlmsghdr *nlh, libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) { struct nlattr *tb[IFLA_MAX + 1], *attr; struct ifinfomsg *ifi = NLMSG_DATA(nlh); int len; len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)); attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi))); if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0) return -LIBBPF_ERRNO__NLPARSE; return dump_link_nlmsg(cookie, ifi, tb); } static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb) { struct nlattr *xdp_tb[IFLA_XDP_MAX + 1]; struct xdp_id_md *xdp_id = cookie; struct ifinfomsg *ifinfo = msg; int ret; if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index) return 0; if (!tb[IFLA_XDP]) return 0; ret = libbpf_nla_parse_nested(xdp_tb, IFLA_XDP_MAX, tb[IFLA_XDP], NULL); if (ret) return ret; if (!xdp_tb[IFLA_XDP_ATTACHED]) return 0; xdp_id->info.attach_mode = libbpf_nla_getattr_u8( xdp_tb[IFLA_XDP_ATTACHED]); if (xdp_id->info.attach_mode == XDP_ATTACHED_NONE) return 0; if (xdp_tb[IFLA_XDP_PROG_ID]) xdp_id->info.prog_id = libbpf_nla_getattr_u32( xdp_tb[IFLA_XDP_PROG_ID]); if (xdp_tb[IFLA_XDP_SKB_PROG_ID]) xdp_id->info.skb_prog_id = libbpf_nla_getattr_u32( xdp_tb[IFLA_XDP_SKB_PROG_ID]); if (xdp_tb[IFLA_XDP_DRV_PROG_ID]) xdp_id->info.drv_prog_id = libbpf_nla_getattr_u32( xdp_tb[IFLA_XDP_DRV_PROG_ID]); if (xdp_tb[IFLA_XDP_HW_PROG_ID]) xdp_id->info.hw_prog_id = libbpf_nla_getattr_u32( xdp_tb[IFLA_XDP_HW_PROG_ID]); return 0; } static int parse_xdp_features(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn, void *cookie) { struct genlmsghdr *gnl = NLMSG_DATA(nh); struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN); struct nlattr *tb[NETDEV_CMD_MAX + 1]; struct xdp_features_md *md = cookie; __u32 ifindex; libbpf_nla_parse(tb, NETDEV_CMD_MAX, na, NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL); if (!tb[NETDEV_A_DEV_IFINDEX] || !tb[NETDEV_A_DEV_XDP_FEATURES]) return NL_CONT; ifindex = libbpf_nla_getattr_u32(tb[NETDEV_A_DEV_IFINDEX]); if (ifindex != md->ifindex) return NL_CONT; md->flags = libbpf_nla_getattr_u64(tb[NETDEV_A_DEV_XDP_FEATURES]); if (tb[NETDEV_A_DEV_XDP_ZC_MAX_SEGS]) md->xdp_zc_max_segs = libbpf_nla_getattr_u32(tb[NETDEV_A_DEV_XDP_ZC_MAX_SEGS]); return NL_DONE; } int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts) { struct libbpf_nla_req req = { .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), .nh.nlmsg_type = RTM_GETLINK, .nh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, .ifinfo.ifi_family = AF_PACKET, }; struct xdp_id_md xdp_id = {}; struct xdp_features_md md = { .ifindex = ifindex, }; __u16 id; int err; if (!OPTS_VALID(opts, bpf_xdp_query_opts)) return libbpf_err(-EINVAL); if (xdp_flags & ~XDP_FLAGS_MASK) return libbpf_err(-EINVAL); /* Check whether the single {HW,DRV,SKB} mode is set */ xdp_flags &= XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE; if (xdp_flags & (xdp_flags - 1)) return libbpf_err(-EINVAL); xdp_id.ifindex = ifindex; xdp_id.flags = xdp_flags; err = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, __dump_link_nlmsg, get_xdp_info, &xdp_id); if (err) return libbpf_err(err); OPTS_SET(opts, prog_id, xdp_id.info.prog_id); OPTS_SET(opts, drv_prog_id, xdp_id.info.drv_prog_id); OPTS_SET(opts, hw_prog_id, xdp_id.info.hw_prog_id); OPTS_SET(opts, skb_prog_id, xdp_id.info.skb_prog_id); OPTS_SET(opts, attach_mode, xdp_id.info.attach_mode); if (!OPTS_HAS(opts, feature_flags)) return 0; err = libbpf_netlink_resolve_genl_family_id("netdev", sizeof("netdev"), &id); if (err < 0) { if (err == -ENOENT) { opts->feature_flags = 0; goto skip_feature_flags; } return libbpf_err(err); } memset(&req, 0, sizeof(req)); req.nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); req.nh.nlmsg_flags = NLM_F_REQUEST; req.nh.nlmsg_type = id; req.gnl.cmd = NETDEV_CMD_DEV_GET; req.gnl.version = 2; err = nlattr_add(&req, NETDEV_A_DEV_IFINDEX, &ifindex, sizeof(ifindex)); if (err < 0) return libbpf_err(err); err = libbpf_netlink_send_recv(&req, NETLINK_GENERIC, parse_xdp_features, NULL, &md); if (err) return libbpf_err(err); OPTS_SET(opts, feature_flags, md.flags); OPTS_SET(opts, xdp_zc_max_segs, md.xdp_zc_max_segs); skip_feature_flags: return 0; } int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id) { LIBBPF_OPTS(bpf_xdp_query_opts, opts); int ret; ret = bpf_xdp_query(ifindex, flags, &opts); if (ret) return libbpf_err(ret); flags &= XDP_FLAGS_MODES; if (opts.attach_mode != XDP_ATTACHED_MULTI && !flags) *prog_id = opts.prog_id; else if (flags & XDP_FLAGS_DRV_MODE) *prog_id = opts.drv_prog_id; else if (flags & XDP_FLAGS_HW_MODE) *prog_id = opts.hw_prog_id; else if (flags & XDP_FLAGS_SKB_MODE) *prog_id = opts.skb_prog_id; else *prog_id = 0; return 0; } typedef int (*qdisc_config_t)(struct libbpf_nla_req *req); static int clsact_config(struct libbpf_nla_req *req) { req->tc.tcm_parent = TC_H_CLSACT; req->tc.tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0); return nlattr_add(req, TCA_KIND, "clsact", sizeof("clsact")); } static int attach_point_to_config(struct bpf_tc_hook *hook, qdisc_config_t *config) { switch (OPTS_GET(hook, attach_point, 0)) { case BPF_TC_INGRESS: case BPF_TC_EGRESS: case BPF_TC_INGRESS | BPF_TC_EGRESS: if (OPTS_GET(hook, parent, 0)) return -EINVAL; *config = &clsact_config; return 0; case BPF_TC_CUSTOM: return -EOPNOTSUPP; default: return -EINVAL; } } static int tc_get_tcm_parent(enum bpf_tc_attach_point attach_point, __u32 *parent) { switch (attach_point) { case BPF_TC_INGRESS: case BPF_TC_EGRESS: if (*parent) return -EINVAL; *parent = TC_H_MAKE(TC_H_CLSACT, attach_point == BPF_TC_INGRESS ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS); break; case BPF_TC_CUSTOM: if (!*parent) return -EINVAL; break; default: return -EINVAL; } return 0; } static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags) { qdisc_config_t config; int ret; struct libbpf_nla_req req; ret = attach_point_to_config(hook, &config); if (ret < 0) return ret; memset(&req, 0, sizeof(req)); req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags; req.nh.nlmsg_type = cmd; req.tc.tcm_family = AF_UNSPEC; req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0); ret = config(&req); if (ret < 0) return ret; return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL); } static int tc_qdisc_create_excl(struct bpf_tc_hook *hook) { return tc_qdisc_modify(hook, RTM_NEWQDISC, NLM_F_CREATE | NLM_F_EXCL); } static int tc_qdisc_delete(struct bpf_tc_hook *hook) { return tc_qdisc_modify(hook, RTM_DELQDISC, 0); } int bpf_tc_hook_create(struct bpf_tc_hook *hook) { int ret; if (!hook || !OPTS_VALID(hook, bpf_tc_hook) || OPTS_GET(hook, ifindex, 0) <= 0) return libbpf_err(-EINVAL); ret = tc_qdisc_create_excl(hook); return libbpf_err(ret); } static int __bpf_tc_detach(const struct bpf_tc_hook *hook, const struct bpf_tc_opts *opts, const bool flush); int bpf_tc_hook_destroy(struct bpf_tc_hook *hook) { if (!hook || !OPTS_VALID(hook, bpf_tc_hook) || OPTS_GET(hook, ifindex, 0) <= 0) return libbpf_err(-EINVAL); switch (OPTS_GET(hook, attach_point, 0)) { case BPF_TC_INGRESS: case BPF_TC_EGRESS: return libbpf_err(__bpf_tc_detach(hook, NULL, true)); case BPF_TC_INGRESS | BPF_TC_EGRESS: return libbpf_err(tc_qdisc_delete(hook)); case BPF_TC_CUSTOM: return libbpf_err(-EOPNOTSUPP); default: return libbpf_err(-EINVAL); } } struct bpf_cb_ctx { struct bpf_tc_opts *opts; bool processed; }; static int __get_tc_info(void *cookie, struct tcmsg *tc, struct nlattr **tb, bool unicast) { struct nlattr *tbb[TCA_BPF_MAX + 1]; struct bpf_cb_ctx *info = cookie; if (!info || !info->opts) return -EINVAL; if (unicast && info->processed) return -EINVAL; if (!tb[TCA_OPTIONS]) return NL_CONT; libbpf_nla_parse_nested(tbb, TCA_BPF_MAX, tb[TCA_OPTIONS], NULL); if (!tbb[TCA_BPF_ID]) return -EINVAL; OPTS_SET(info->opts, prog_id, libbpf_nla_getattr_u32(tbb[TCA_BPF_ID])); OPTS_SET(info->opts, handle, tc->tcm_handle); OPTS_SET(info->opts, priority, TC_H_MAJ(tc->tcm_info) >> 16); info->processed = true; return unicast ? NL_NEXT : NL_DONE; } static int get_tc_info(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn, void *cookie) { struct tcmsg *tc = NLMSG_DATA(nh); struct nlattr *tb[TCA_MAX + 1]; libbpf_nla_parse(tb, TCA_MAX, (struct nlattr *)((void *)tc + NLMSG_ALIGN(sizeof(*tc))), NLMSG_PAYLOAD(nh, sizeof(*tc)), NULL); if (!tb[TCA_KIND]) return NL_CONT; return __get_tc_info(cookie, tc, tb, nh->nlmsg_flags & NLM_F_ECHO); } static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd) { struct bpf_prog_info info; __u32 info_len = sizeof(info); char name[256]; int len, ret; memset(&info, 0, info_len); ret = bpf_prog_get_info_by_fd(fd, &info, &info_len); if (ret < 0) return ret; ret = nlattr_add(req, TCA_BPF_FD, &fd, sizeof(fd)); if (ret < 0) return ret; len = snprintf(name, sizeof(name), "%s:[%u]", info.name, info.id); if (len < 0) return -errno; if (len >= sizeof(name)) return -ENAMETOOLONG; return nlattr_add(req, TCA_BPF_NAME, name, len + 1); } int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts) { __u32 protocol, bpf_flags, handle, priority, parent, prog_id, flags; int ret, ifindex, attach_point, prog_fd; struct bpf_cb_ctx info = {}; struct libbpf_nla_req req; struct nlattr *nla; if (!hook || !opts || !OPTS_VALID(hook, bpf_tc_hook) || !OPTS_VALID(opts, bpf_tc_opts)) return libbpf_err(-EINVAL); ifindex = OPTS_GET(hook, ifindex, 0); parent = OPTS_GET(hook, parent, 0); attach_point = OPTS_GET(hook, attach_point, 0); handle = OPTS_GET(opts, handle, 0); priority = OPTS_GET(opts, priority, 0); prog_fd = OPTS_GET(opts, prog_fd, 0); prog_id = OPTS_GET(opts, prog_id, 0); flags = OPTS_GET(opts, flags, 0); if (ifindex <= 0 || !prog_fd || prog_id) return libbpf_err(-EINVAL); if (priority > UINT16_MAX) return libbpf_err(-EINVAL); if (flags & ~BPF_TC_F_REPLACE) return libbpf_err(-EINVAL); flags = (flags & BPF_TC_F_REPLACE) ? NLM_F_REPLACE : NLM_F_EXCL; protocol = ETH_P_ALL; memset(&req, 0, sizeof(req)); req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_ECHO | flags; req.nh.nlmsg_type = RTM_NEWTFILTER; req.tc.tcm_family = AF_UNSPEC; req.tc.tcm_ifindex = ifindex; req.tc.tcm_handle = handle; req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol)); ret = tc_get_tcm_parent(attach_point, &parent); if (ret < 0) return libbpf_err(ret); req.tc.tcm_parent = parent; ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf")); if (ret < 0) return libbpf_err(ret); nla = nlattr_begin_nested(&req, TCA_OPTIONS); if (!nla) return libbpf_err(-EMSGSIZE); ret = tc_add_fd_and_name(&req, prog_fd); if (ret < 0) return libbpf_err(ret); bpf_flags = TCA_BPF_FLAG_ACT_DIRECT; ret = nlattr_add(&req, TCA_BPF_FLAGS, &bpf_flags, sizeof(bpf_flags)); if (ret < 0) return libbpf_err(ret); nlattr_end_nested(&req, nla); info.opts = opts; ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL, &info); if (ret < 0) return libbpf_err(ret); if (!info.processed) return libbpf_err(-ENOENT); return ret; } static int __bpf_tc_detach(const struct bpf_tc_hook *hook, const struct bpf_tc_opts *opts, const bool flush) { __u32 protocol = 0, handle, priority, parent, prog_id, flags; int ret, ifindex, attach_point, prog_fd; struct libbpf_nla_req req; if (!hook || !OPTS_VALID(hook, bpf_tc_hook) || !OPTS_VALID(opts, bpf_tc_opts)) return -EINVAL; ifindex = OPTS_GET(hook, ifindex, 0); parent = OPTS_GET(hook, parent, 0); attach_point = OPTS_GET(hook, attach_point, 0); handle = OPTS_GET(opts, handle, 0); priority = OPTS_GET(opts, priority, 0); prog_fd = OPTS_GET(opts, prog_fd, 0); prog_id = OPTS_GET(opts, prog_id, 0); flags = OPTS_GET(opts, flags, 0); if (ifindex <= 0 || flags || prog_fd || prog_id) return -EINVAL; if (priority > UINT16_MAX) return -EINVAL; if (!flush) { if (!handle || !priority) return -EINVAL; protocol = ETH_P_ALL; } else { if (handle || priority) return -EINVAL; } memset(&req, 0, sizeof(req)); req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; req.nh.nlmsg_type = RTM_DELTFILTER; req.tc.tcm_family = AF_UNSPEC; req.tc.tcm_ifindex = ifindex; if (!flush) { req.tc.tcm_handle = handle; req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol)); } ret = tc_get_tcm_parent(attach_point, &parent); if (ret < 0) return ret; req.tc.tcm_parent = parent; if (!flush) { ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf")); if (ret < 0) return ret; } return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL); } int bpf_tc_detach(const struct bpf_tc_hook *hook, const struct bpf_tc_opts *opts) { int ret; if (!opts) return libbpf_err(-EINVAL); ret = __bpf_tc_detach(hook, opts, false); return libbpf_err(ret); } int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts) { __u32 protocol, handle, priority, parent, prog_id, flags; int ret, ifindex, attach_point, prog_fd; struct bpf_cb_ctx info = {}; struct libbpf_nla_req req; if (!hook || !opts || !OPTS_VALID(hook, bpf_tc_hook) || !OPTS_VALID(opts, bpf_tc_opts)) return libbpf_err(-EINVAL); ifindex = OPTS_GET(hook, ifindex, 0); parent = OPTS_GET(hook, parent, 0); attach_point = OPTS_GET(hook, attach_point, 0); handle = OPTS_GET(opts, handle, 0); priority = OPTS_GET(opts, priority, 0); prog_fd = OPTS_GET(opts, prog_fd, 0); prog_id = OPTS_GET(opts, prog_id, 0); flags = OPTS_GET(opts, flags, 0); if (ifindex <= 0 || flags || prog_fd || prog_id || !handle || !priority) return libbpf_err(-EINVAL); if (priority > UINT16_MAX) return libbpf_err(-EINVAL); protocol = ETH_P_ALL; memset(&req, 0, sizeof(req)); req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); req.nh.nlmsg_flags = NLM_F_REQUEST; req.nh.nlmsg_type = RTM_GETTFILTER; req.tc.tcm_family = AF_UNSPEC; req.tc.tcm_ifindex = ifindex; req.tc.tcm_handle = handle; req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol)); ret = tc_get_tcm_parent(attach_point, &parent); if (ret < 0) return libbpf_err(ret); req.tc.tcm_parent = parent; ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf")); if (ret < 0) return libbpf_err(ret); info.opts = opts; ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL, &info); if (ret < 0) return libbpf_err(ret); if (!info.processed) return libbpf_err(-ENOENT); return ret; } xdp-tools-1.5.4/lib/libbpf/src/hashmap.c0000644000175100001660000001166214706536574017424 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * Generic non-thread safe hash map implementation. * * Copyright (c) 2019 Facebook */ #include #include #include #include #include #include "hashmap.h" /* make sure libbpf doesn't use kernel-only integer typedefs */ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 /* prevent accidental re-addition of reallocarray() */ #pragma GCC poison reallocarray /* start with 4 buckets */ #define HASHMAP_MIN_CAP_BITS 2 static void hashmap_add_entry(struct hashmap_entry **pprev, struct hashmap_entry *entry) { entry->next = *pprev; *pprev = entry; } static void hashmap_del_entry(struct hashmap_entry **pprev, struct hashmap_entry *entry) { *pprev = entry->next; entry->next = NULL; } void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, hashmap_equal_fn equal_fn, void *ctx) { map->hash_fn = hash_fn; map->equal_fn = equal_fn; map->ctx = ctx; map->buckets = NULL; map->cap = 0; map->cap_bits = 0; map->sz = 0; } struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, hashmap_equal_fn equal_fn, void *ctx) { struct hashmap *map = malloc(sizeof(struct hashmap)); if (!map) return ERR_PTR(-ENOMEM); hashmap__init(map, hash_fn, equal_fn, ctx); return map; } void hashmap__clear(struct hashmap *map) { struct hashmap_entry *cur, *tmp; size_t bkt; hashmap__for_each_entry_safe(map, cur, tmp, bkt) { free(cur); } free(map->buckets); map->buckets = NULL; map->cap = map->cap_bits = map->sz = 0; } void hashmap__free(struct hashmap *map) { if (IS_ERR_OR_NULL(map)) return; hashmap__clear(map); free(map); } size_t hashmap__size(const struct hashmap *map) { return map->sz; } size_t hashmap__capacity(const struct hashmap *map) { return map->cap; } static bool hashmap_needs_to_grow(struct hashmap *map) { /* grow if empty or more than 75% filled */ return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap); } static int hashmap_grow(struct hashmap *map) { struct hashmap_entry **new_buckets; struct hashmap_entry *cur, *tmp; size_t new_cap_bits, new_cap; size_t h, bkt; new_cap_bits = map->cap_bits + 1; if (new_cap_bits < HASHMAP_MIN_CAP_BITS) new_cap_bits = HASHMAP_MIN_CAP_BITS; new_cap = 1UL << new_cap_bits; new_buckets = calloc(new_cap, sizeof(new_buckets[0])); if (!new_buckets) return -ENOMEM; hashmap__for_each_entry_safe(map, cur, tmp, bkt) { h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits); hashmap_add_entry(&new_buckets[h], cur); } map->cap = new_cap; map->cap_bits = new_cap_bits; free(map->buckets); map->buckets = new_buckets; return 0; } static bool hashmap_find_entry(const struct hashmap *map, const long key, size_t hash, struct hashmap_entry ***pprev, struct hashmap_entry **entry) { struct hashmap_entry *cur, **prev_ptr; if (!map->buckets) return false; for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; cur; prev_ptr = &cur->next, cur = cur->next) { if (map->equal_fn(cur->key, key, map->ctx)) { if (pprev) *pprev = prev_ptr; *entry = cur; return true; } } return false; } int hashmap_insert(struct hashmap *map, long key, long value, enum hashmap_insert_strategy strategy, long *old_key, long *old_value) { struct hashmap_entry *entry; size_t h; int err; if (old_key) *old_key = 0; if (old_value) *old_value = 0; h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); if (strategy != HASHMAP_APPEND && hashmap_find_entry(map, key, h, NULL, &entry)) { if (old_key) *old_key = entry->key; if (old_value) *old_value = entry->value; if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) { entry->key = key; entry->value = value; return 0; } else if (strategy == HASHMAP_ADD) { return -EEXIST; } } if (strategy == HASHMAP_UPDATE) return -ENOENT; if (hashmap_needs_to_grow(map)) { err = hashmap_grow(map); if (err) return err; h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); } entry = malloc(sizeof(struct hashmap_entry)); if (!entry) return -ENOMEM; entry->key = key; entry->value = value; hashmap_add_entry(&map->buckets[h], entry); map->sz++; return 0; } bool hashmap_find(const struct hashmap *map, long key, long *value) { struct hashmap_entry *entry; size_t h; h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); if (!hashmap_find_entry(map, key, h, NULL, &entry)) return false; if (value) *value = entry->value; return true; } bool hashmap_delete(struct hashmap *map, long key, long *old_key, long *old_value) { struct hashmap_entry **pprev, *entry; size_t h; h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); if (!hashmap_find_entry(map, key, h, &pprev, &entry)) return false; if (old_key) *old_key = entry->key; if (old_value) *old_value = entry->value; hashmap_del_entry(pprev, entry); free(entry); map->sz--; return true; } xdp-tools-1.5.4/lib/libbpf/src/bpf.h0000644000175100001660000006157414706536574016566 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * Common BPF ELF operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License (not later!) * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see */ #ifndef __LIBBPF_BPF_H #define __LIBBPF_BPF_H #include #include #include #include #include "libbpf_common.h" #include "libbpf_legacy.h" #ifdef __cplusplus extern "C" { #endif LIBBPF_API int libbpf_set_memlock_rlim(size_t memlock_bytes); struct bpf_map_create_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 btf_fd; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 btf_vmlinux_value_type_id; __u32 inner_map_fd; __u32 map_flags; __u64 map_extra; __u32 numa_node; __u32 map_ifindex; __s32 value_type_btf_obj_fd; __u32 token_fd; size_t :0; }; #define bpf_map_create_opts__last_field token_fd LIBBPF_API int bpf_map_create(enum bpf_map_type map_type, const char *map_name, __u32 key_size, __u32 value_size, __u32 max_entries, const struct bpf_map_create_opts *opts); struct bpf_prog_load_opts { size_t sz; /* size of this struct for forward/backward compatibility */ /* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns * -EAGAIN. This field determines how many attempts libbpf has to * make. If not specified, libbpf will use default value of 5. */ int attempts; enum bpf_attach_type expected_attach_type; __u32 prog_btf_fd; __u32 prog_flags; __u32 prog_ifindex; __u32 kern_version; __u32 attach_btf_id; __u32 attach_prog_fd; __u32 attach_btf_obj_fd; const int *fd_array; /* .BTF.ext func info data */ const void *func_info; __u32 func_info_cnt; __u32 func_info_rec_size; /* .BTF.ext line info data */ const void *line_info; __u32 line_info_cnt; __u32 line_info_rec_size; /* verifier log options */ __u32 log_level; __u32 log_size; char *log_buf; /* output: actual total log contents size (including terminating zero). * It could be both larger than original log_size (if log was * truncated), or smaller (if log buffer wasn't filled completely). * If kernel doesn't support this feature, log_size is left unchanged. */ __u32 log_true_size; __u32 token_fd; size_t :0; }; #define bpf_prog_load_opts__last_field token_fd LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type, const char *prog_name, const char *license, const struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *opts); /* Flags to direct loading requirements */ #define MAPS_RELAX_COMPAT 0x01 /* Recommended log buffer size */ #define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */ struct bpf_btf_load_opts { size_t sz; /* size of this struct for forward/backward compatibility */ /* kernel log options */ char *log_buf; __u32 log_level; __u32 log_size; /* output: actual total log contents size (including terminating zero). * It could be both larger than original log_size (if log was * truncated), or smaller (if log buffer wasn't filled completely). * If kernel doesn't support this feature, log_size is left unchanged. */ __u32 log_true_size; __u32 btf_flags; __u32 token_fd; size_t :0; }; #define bpf_btf_load_opts__last_field token_fd LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts); LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value); LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags); LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value); LIBBPF_API int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags); LIBBPF_API int bpf_map_delete_elem(int fd, const void *key); LIBBPF_API int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags); LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key); LIBBPF_API int bpf_map_freeze(int fd); struct bpf_map_batch_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u64 elem_flags; __u64 flags; }; #define bpf_map_batch_opts__last_field flags /** * @brief **bpf_map_delete_batch()** allows for batch deletion of multiple * elements in a BPF map. * * @param fd BPF map file descriptor * @param keys pointer to an array of *count* keys * @param count input and output parameter; on input **count** represents the * number of elements in the map to delete in batch; * on output if a non-EFAULT error is returned, **count** represents the number of deleted * elements if the output **count** value is not equal to the input **count** value * If EFAULT is returned, **count** should not be trusted to be correct. * @param opts options for configuring the way the batch deletion works * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys, __u32 *count, const struct bpf_map_batch_opts *opts); /** * @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements. * * The parameter *in_batch* is the address of the first element in the batch to * read. *out_batch* is an output parameter that should be passed as *in_batch* * to subsequent calls to **bpf_map_lookup_batch()**. NULL can be passed for * *in_batch* to indicate that the batched lookup starts from the beginning of * the map. Both *in_batch* and *out_batch* must point to memory large enough to * hold a single key, except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH, * LRU_HASH, LRU_PERCPU_HASH}**, for which the memory size must be at * least 4 bytes wide regardless of key size. * * The *keys* and *values* are output parameters which must point to memory large enough to * hold *count* items based on the key and value size of the map *map_fd*. The *keys* * buffer must be of *key_size* * *count*. The *values* buffer must be of * *value_size* * *count*. * * @param fd BPF map file descriptor * @param in_batch address of the first element in batch to read, can pass NULL to * indicate that the batched lookup starts from the beginning of the map. * @param out_batch output parameter that should be passed to next call as *in_batch* * @param keys pointer to an array large enough for *count* keys * @param values pointer to an array large enough for *count* values * @param count input and output parameter; on input it's the number of elements * in the map to read in batch; on output it's the number of elements that were * successfully read. * If a non-EFAULT error is returned, count will be set as the number of elements * that were read before the error occurred. * If EFAULT is returned, **count** should not be trusted to be correct. * @param opts options for configuring the way the batch lookup works * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, void *values, __u32 *count, const struct bpf_map_batch_opts *opts); /** * @brief **bpf_map_lookup_and_delete_batch()** allows for batch lookup and deletion * of BPF map elements where each element is deleted after being retrieved. * * @param fd BPF map file descriptor * @param in_batch address of the first element in batch to read, can pass NULL to * get address of the first element in *out_batch*. If not NULL, must be large * enough to hold a key. For **BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH, * LRU_PERCPU_HASH}**, the memory size must be at least 4 bytes wide regardless * of key size. * @param out_batch output parameter that should be passed to next call as *in_batch* * @param keys pointer to an array of *count* keys * @param values pointer to an array large enough for *count* values * @param count input and output parameter; on input it's the number of elements * in the map to read and delete in batch; on output it represents the number of * elements that were successfully read and deleted * If a non-**EFAULT** error code is returned and if the output **count** value * is not equal to the input **count** value, up to **count** elements may * have been deleted. * if **EFAULT** is returned up to *count* elements may have been deleted without * being returned via the *keys* and *values* output parameters. * @param opts options for configuring the way the batch lookup and delete works * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, void *keys, void *values, __u32 *count, const struct bpf_map_batch_opts *opts); /** * @brief **bpf_map_update_batch()** updates multiple elements in a map * by specifying keys and their corresponding values. * * The *keys* and *values* parameters must point to memory large enough * to hold *count* items based on the key and value size of the map. * * The *opts* parameter can be used to control how *bpf_map_update_batch()* * should handle keys that either do or do not already exist in the map. * In particular the *flags* parameter of *bpf_map_batch_opts* can be * one of the following: * * Note that *count* is an input and output parameter, where on output it * represents how many elements were successfully updated. Also note that if * **EFAULT** then *count* should not be trusted to be correct. * * **BPF_ANY** * Create new elements or update existing. * * **BPF_NOEXIST** * Create new elements only if they do not exist. * * **BPF_EXIST** * Update existing elements. * * **BPF_F_LOCK** * Update spin_lock-ed map elements. This must be * specified if the map value contains a spinlock. * * @param fd BPF map file descriptor * @param keys pointer to an array of *count* keys * @param values pointer to an array of *count* values * @param count input and output parameter; on input it's the number of elements * in the map to update in batch; on output if a non-EFAULT error is returned, * **count** represents the number of updated elements if the output **count** * value is not equal to the input **count** value. * If EFAULT is returned, **count** should not be trusted to be correct. * @param opts options for configuring the way the batch update works * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count, const struct bpf_map_batch_opts *opts); struct bpf_obj_pin_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 file_flags; int path_fd; size_t :0; }; #define bpf_obj_pin_opts__last_field path_fd LIBBPF_API int bpf_obj_pin(int fd, const char *pathname); LIBBPF_API int bpf_obj_pin_opts(int fd, const char *pathname, const struct bpf_obj_pin_opts *opts); struct bpf_obj_get_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 file_flags; int path_fd; size_t :0; }; #define bpf_obj_get_opts__last_field path_fd LIBBPF_API int bpf_obj_get(const char *pathname); LIBBPF_API int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts); LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, unsigned int flags); LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd, enum bpf_attach_type type); struct bpf_prog_attach_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 flags; union { int replace_prog_fd; int replace_fd; }; int relative_fd; __u32 relative_id; __u64 expected_revision; size_t :0; }; #define bpf_prog_attach_opts__last_field expected_revision struct bpf_prog_detach_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 flags; int relative_fd; __u32 relative_id; __u64 expected_revision; size_t :0; }; #define bpf_prog_detach_opts__last_field expected_revision /** * @brief **bpf_prog_attach_opts()** attaches the BPF program corresponding to * *prog_fd* to a *target* which can represent a file descriptor or netdevice * ifindex. * * @param prog_fd BPF program file descriptor * @param target attach location file descriptor or ifindex * @param type attach type for the BPF program * @param opts options for configuring the attachment * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int target, enum bpf_attach_type type, const struct bpf_prog_attach_opts *opts); /** * @brief **bpf_prog_detach_opts()** detaches the BPF program corresponding to * *prog_fd* from a *target* which can represent a file descriptor or netdevice * ifindex. * * @param prog_fd BPF program file descriptor * @param target detach location file descriptor or ifindex * @param type detach type for the BPF program * @param opts options for configuring the detachment * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_prog_detach_opts(int prog_fd, int target, enum bpf_attach_type type, const struct bpf_prog_detach_opts *opts); union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */ struct bpf_link_create_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 flags; union bpf_iter_link_info *iter_info; __u32 iter_info_len; __u32 target_btf_id; union { struct { __u64 bpf_cookie; } perf_event; struct { __u32 flags; __u32 cnt; const char **syms; const unsigned long *addrs; const __u64 *cookies; } kprobe_multi; struct { __u32 flags; __u32 cnt; const char *path; const unsigned long *offsets; const unsigned long *ref_ctr_offsets; const __u64 *cookies; __u32 pid; } uprobe_multi; struct { __u64 cookie; } tracing; struct { __u32 pf; __u32 hooknum; __s32 priority; __u32 flags; } netfilter; struct { __u32 relative_fd; __u32 relative_id; __u64 expected_revision; } tcx; struct { __u32 relative_fd; __u32 relative_id; __u64 expected_revision; } netkit; }; size_t :0; }; #define bpf_link_create_opts__last_field uprobe_multi.pid LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, const struct bpf_link_create_opts *opts); LIBBPF_API int bpf_link_detach(int link_fd); struct bpf_link_update_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 flags; /* extra flags */ __u32 old_prog_fd; /* expected old program FD */ __u32 old_map_fd; /* expected old map FD */ }; #define bpf_link_update_opts__last_field old_map_fd LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd, const struct bpf_link_update_opts *opts); LIBBPF_API int bpf_iter_create(int link_fd); struct bpf_prog_test_run_attr { int prog_fd; int repeat; const void *data_in; __u32 data_size_in; void *data_out; /* optional */ __u32 data_size_out; /* in: max length of data_out * out: length of data_out */ __u32 retval; /* out: return code of the BPF program */ __u32 duration; /* out: average per repetition in ns */ const void *ctx_in; /* optional */ __u32 ctx_size_in; void *ctx_out; /* optional */ __u32 ctx_size_out; /* in: max length of ctx_out * out: length of cxt_out */ }; LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id); struct bpf_get_fd_by_id_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 open_flags; /* permissions requested for the operation on fd */ size_t :0; }; #define bpf_get_fd_by_id_opts__last_field open_flags LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_map_get_fd_by_id(__u32 id); LIBBPF_API int bpf_map_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id); LIBBPF_API int bpf_btf_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_link_get_fd_by_id(__u32 id); LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len); /** * @brief **bpf_prog_get_info_by_fd()** obtains information about the BPF * program corresponding to *prog_fd*. * * Populates up to *info_len* bytes of *info* and updates *info_len* with the * actual number of bytes written to *info*. Note that *info* should be * zero-initialized or initialized as expected by the requested *info* * type. Failing to (zero-)initialize *info* under certain circumstances can * result in this helper returning an error. * * @param prog_fd BPF program file descriptor * @param info pointer to **struct bpf_prog_info** that will be populated with * BPF program information * @param info_len pointer to the size of *info*; on success updated with the * number of bytes written to *info* * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len); /** * @brief **bpf_map_get_info_by_fd()** obtains information about the BPF * map corresponding to *map_fd*. * * Populates up to *info_len* bytes of *info* and updates *info_len* with the * actual number of bytes written to *info*. Note that *info* should be * zero-initialized or initialized as expected by the requested *info* * type. Failing to (zero-)initialize *info* under certain circumstances can * result in this helper returning an error. * * @param map_fd BPF map file descriptor * @param info pointer to **struct bpf_map_info** that will be populated with * BPF map information * @param info_len pointer to the size of *info*; on success updated with the * number of bytes written to *info* * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len); /** * @brief **bpf_btf_get_info_by_fd()** obtains information about the * BTF object corresponding to *btf_fd*. * * Populates up to *info_len* bytes of *info* and updates *info_len* with the * actual number of bytes written to *info*. Note that *info* should be * zero-initialized or initialized as expected by the requested *info* * type. Failing to (zero-)initialize *info* under certain circumstances can * result in this helper returning an error. * * @param btf_fd BTF object file descriptor * @param info pointer to **struct bpf_btf_info** that will be populated with * BTF object information * @param info_len pointer to the size of *info*; on success updated with the * number of bytes written to *info* * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len); /** * @brief **bpf_btf_get_info_by_fd()** obtains information about the BPF * link corresponding to *link_fd*. * * Populates up to *info_len* bytes of *info* and updates *info_len* with the * actual number of bytes written to *info*. Note that *info* should be * zero-initialized or initialized as expected by the requested *info* * type. Failing to (zero-)initialize *info* under certain circumstances can * result in this helper returning an error. * * @param link_fd BPF link file descriptor * @param info pointer to **struct bpf_link_info** that will be populated with * BPF link information * @param info_len pointer to the size of *info*; on success updated with the * number of bytes written to *info* * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len); struct bpf_prog_query_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 query_flags; __u32 attach_flags; /* output argument */ __u32 *prog_ids; union { /* input+output argument */ __u32 prog_cnt; __u32 count; }; __u32 *prog_attach_flags; __u32 *link_ids; __u32 *link_attach_flags; __u64 revision; size_t :0; }; #define bpf_prog_query_opts__last_field revision /** * @brief **bpf_prog_query_opts()** queries the BPF programs and BPF links * which are attached to *target* which can represent a file descriptor or * netdevice ifindex. * * @param target query location file descriptor or ifindex * @param type attach type for the BPF program * @param opts options for configuring the query * @return 0, on success; negative error code, otherwise (errno is also set to * the error code) */ LIBBPF_API int bpf_prog_query_opts(int target, enum bpf_attach_type type, struct bpf_prog_query_opts *opts); LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt); struct bpf_raw_tp_opts { size_t sz; /* size of this struct for forward/backward compatibility */ const char *tp_name; __u64 cookie; size_t :0; }; #define bpf_raw_tp_opts__last_field cookie LIBBPF_API int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts); LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd); LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr); #ifdef __cplusplus /* forward-declaring enums in C++ isn't compatible with pure C enums, so * instead define bpf_enable_stats() as accepting int as an input */ LIBBPF_API int bpf_enable_stats(int type); #else enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); #endif struct bpf_prog_bind_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 flags; }; #define bpf_prog_bind_opts__last_field flags LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd, const struct bpf_prog_bind_opts *opts); struct bpf_test_run_opts { size_t sz; /* size of this struct for forward/backward compatibility */ const void *data_in; /* optional */ void *data_out; /* optional */ __u32 data_size_in; __u32 data_size_out; /* in: max length of data_out * out: length of data_out */ const void *ctx_in; /* optional */ void *ctx_out; /* optional */ __u32 ctx_size_in; __u32 ctx_size_out; /* in: max length of ctx_out * out: length of cxt_out */ __u32 retval; /* out: return code of the BPF program */ int repeat; __u32 duration; /* out: average per repetition in ns */ __u32 flags; __u32 cpu; __u32 batch_size; }; #define bpf_test_run_opts__last_field batch_size LIBBPF_API int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts); struct bpf_token_create_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 flags; size_t :0; }; #define bpf_token_create_opts__last_field flags /** * @brief **bpf_token_create()** creates a new instance of BPF token derived * from specified BPF FS mount point. * * BPF token created with this API can be passed to bpf() syscall for * commands like BPF_PROG_LOAD, BPF_MAP_CREATE, etc. * * @param bpffs_fd FD for BPF FS instance from which to derive a BPF token * instance. * @param opts optional BPF token creation options, can be NULL * * @return BPF token FD > 0, on success; negative error code, otherwise (errno * is also set to the error code) */ LIBBPF_API int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBBPF_BPF_H */ xdp-tools-1.5.4/lib/libbpf/src/nlattr.c0000644000175100001660000001163014706536574017302 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * NETLINK Netlink attributes * * Copyright (c) 2003-2013 Thomas Graf */ #include #include #include #include #include "nlattr.h" #include "libbpf_internal.h" static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = { [LIBBPF_NLA_U8] = sizeof(uint8_t), [LIBBPF_NLA_U16] = sizeof(uint16_t), [LIBBPF_NLA_U32] = sizeof(uint32_t), [LIBBPF_NLA_U64] = sizeof(uint64_t), [LIBBPF_NLA_STRING] = 1, [LIBBPF_NLA_FLAG] = 0, }; static struct nlattr *nla_next(const struct nlattr *nla, int *remaining) { int totlen = NLA_ALIGN(nla->nla_len); *remaining -= totlen; return (struct nlattr *)((void *)nla + totlen); } static int nla_ok(const struct nlattr *nla, int remaining) { return remaining >= (int)sizeof(*nla) && nla->nla_len >= sizeof(*nla) && nla->nla_len <= remaining; } static int nla_type(const struct nlattr *nla) { return nla->nla_type & NLA_TYPE_MASK; } static int validate_nla(struct nlattr *nla, int maxtype, struct libbpf_nla_policy *policy) { struct libbpf_nla_policy *pt; unsigned int minlen = 0; int type = nla_type(nla); if (type < 0 || type > maxtype) return 0; pt = &policy[type]; if (pt->type > LIBBPF_NLA_TYPE_MAX) return 0; if (pt->minlen) minlen = pt->minlen; else if (pt->type != LIBBPF_NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; if (libbpf_nla_len(nla) < minlen) return -1; if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen) return -1; if (pt->type == LIBBPF_NLA_STRING) { char *data = libbpf_nla_data(nla); if (data[libbpf_nla_len(nla) - 1] != '\0') return -1; } return 0; } static inline int nlmsg_len(const struct nlmsghdr *nlh) { return nlh->nlmsg_len - NLMSG_HDRLEN; } /** * Create attribute index based on a stream of attributes. * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg head Head of attribute stream. * @arg len Length of attribute stream. * @arg policy Attribute validation policy. * * Iterates over the stream of attributes and stores a pointer to each * attribute in the index array using the attribute type as index to * the array. Attribute with a type greater than the maximum type * specified will be silently ignored in order to maintain backwards * compatibility. If \a policy is not NULL, the attribute will be * validated using the specified policy. * * @see nla_validate * @return 0 on success or a negative error code. */ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct libbpf_nla_policy *policy) { struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); libbpf_nla_for_each_attr(nla, head, len, rem) { int type = nla_type(nla); if (type > maxtype) continue; if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } if (tb[type]) pr_warn("Attribute of type %#x found multiple times in message, " "previous attribute is being ignored.\n", type); tb[type] = nla; } err = 0; errout: return err; } /** * Create attribute index based on nested attribute * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg nla Nested Attribute. * @arg policy Attribute validation policy. * * Feeds the stream of attributes nested into the specified attribute * to libbpf_nla_parse(). * * @see libbpf_nla_parse * @return 0 on success or a negative error code. */ int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, struct libbpf_nla_policy *policy) { return libbpf_nla_parse(tb, maxtype, libbpf_nla_data(nla), libbpf_nla_len(nla), policy); } /* dump netlink extended ack error message */ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) { struct libbpf_nla_policy extack_policy[NLMSGERR_ATTR_MAX + 1] = { [NLMSGERR_ATTR_MSG] = { .type = LIBBPF_NLA_STRING }, [NLMSGERR_ATTR_OFFS] = { .type = LIBBPF_NLA_U32 }, }; struct nlattr *tb[NLMSGERR_ATTR_MAX + 1], *attr; struct nlmsgerr *err; char *errmsg = NULL; int hlen, alen; /* no TLVs, nothing to do here */ if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) return 0; err = (struct nlmsgerr *)NLMSG_DATA(nlh); hlen = sizeof(*err); /* if NLM_F_CAPPED is set then the inner err msg was capped */ if (!(nlh->nlmsg_flags & NLM_F_CAPPED)) hlen += nlmsg_len(&err->msg); attr = (struct nlattr *) ((void *) err + hlen); alen = (void *)nlh + nlh->nlmsg_len - (void *)attr; if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) { pr_warn("Failed to parse extended error attributes\n"); return 0; } if (tb[NLMSGERR_ATTR_MSG]) errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]); pr_warn("Kernel error message: %s\n", errmsg); return 0; } xdp-tools-1.5.4/lib/libbpf/src/bpf_helper_defs.h0000644000175100001660000052710614706536574021124 0ustar runnerdocker/* This is auto-generated file. See bpf_doc.py for details. */ /* Forward declarations of BPF structs */ struct bpf_fib_lookup; struct bpf_sk_lookup; struct bpf_perf_event_data; struct bpf_perf_event_value; struct bpf_pidns_info; struct bpf_redir_neigh; struct bpf_sock; struct bpf_sock_addr; struct bpf_sock_ops; struct bpf_sock_tuple; struct bpf_spin_lock; struct bpf_sysctl; struct bpf_tcp_sock; struct bpf_tunnel_key; struct bpf_xfrm_state; struct linux_binprm; struct pt_regs; struct sk_reuseport_md; struct sockaddr; struct tcphdr; struct seq_file; struct tcp6_sock; struct tcp_sock; struct tcp_timewait_sock; struct tcp_request_sock; struct udp6_sock; struct unix_sock; struct task_struct; struct cgroup; struct __sk_buff; struct sk_msg_md; struct xdp_md; struct path; struct btf_ptr; struct inode; struct socket; struct file; struct bpf_timer; struct mptcp_sock; struct bpf_dynptr; struct iphdr; struct ipv6hdr; #ifndef __bpf_fastcall #if __has_attribute(bpf_fastcall) #define __bpf_fastcall __attribute__((bpf_fastcall)) #else #define __bpf_fastcall #endif #endif /* * bpf_map_lookup_elem * * Perform a lookup in *map* for an entry associated to *key*. * * Returns * Map value associated to *key*, or **NULL** if no entry was * found. */ static void *(* const bpf_map_lookup_elem)(void *map, const void *key) = (void *) 1; /* * bpf_map_update_elem * * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * Flag value **BPF_NOEXIST** cannot be used for maps of types * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all * elements always exist), the helper would return an error. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2; /* * bpf_map_delete_elem * * Delete entry with *key* from *map*. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_map_delete_elem)(void *map, const void *key) = (void *) 3; /* * bpf_probe_read * * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. * * Generally, use **bpf_probe_read_user**\ () or * **bpf_probe_read_kernel**\ () instead. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_probe_read)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 4; /* * bpf_ktime_get_ns * * Return the time elapsed since system boot, in nanoseconds. * Does not include time the system was suspended. * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) * * Returns * Current *ktime*. */ static __u64 (* const bpf_ktime_get_ns)(void) = (void *) 5; /* * bpf_trace_printk * * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) * to file *\/sys/kernel/tracing/trace* from TraceFS, if * available. It can take up to three additional **u64** * arguments (as an eBPF helpers, the total number of arguments is * limited to five). * * Each time the helper is called, it appends a line to the trace. * Lines are discarded while *\/sys/kernel/tracing/trace* is * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this. * The format of the trace is customizable, and the exact output * one will get depends on the options set in * *\/sys/kernel/tracing/trace_options* (see also the * *README* file under the same directory). However, it usually * defaults to something like: * * :: * * telnet-470 [001] .N.. 419421.045894: 0x00000001: * * In the above: * * * ``telnet`` is the name of the current task. * * ``470`` is the PID of the current task. * * ``001`` is the CPU number on which the task is * running. * * In ``.N..``, each character refers to a set of * options (whether irqs are enabled, scheduling * options, whether hard/softirqs are running, level of * preempt_disabled respectively). **N** means that * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** * are set. * * ``419421.045894`` is a timestamp. * * ``0x00000001`` is a fake value used by BPF for the * instruction pointer register. * * ```` is the message formatted with * *fmt*. * * The conversion specifiers supported by *fmt* are similar, but * more limited than for printk(). They are **%d**, **%i**, * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size * of field, padding with zeroes, etc.) is available, and the * helper will return **-EINVAL** (but print nothing) if it * encounters an unknown specifier. * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice * block (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values * to user space, perf events should be preferred. * * Returns * The number of bytes written to the buffer, or a negative error * in case of failure. */ static long (* const bpf_trace_printk)(const char *fmt, __u32 fmt_size, ...) = (void *) 6; /* * bpf_get_prandom_u32 * * Get a pseudo-random number. * * From a security point of view, this helper uses its own * pseudo-random internal state, and cannot be used to infer the * seed of other random functions in the kernel. However, it is * essential to note that the generator used by the helper is not * cryptographically secure. * * Returns * A random 32-bit unsigned value. */ static __u32 (* const bpf_get_prandom_u32)(void) = (void *) 7; /* * bpf_get_smp_processor_id * * Get the SMP (symmetric multiprocessing) processor id. Note that * all programs run with migration disabled, which means that the * SMP processor id is stable during all the execution of the * program. * * Returns * The SMP id of the processor running the program. */ static __bpf_fastcall __u32 (* const bpf_get_smp_processor_id)(void) = (void *) 8; /* * bpf_skb_store_bytes * * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the * checksum for the packet after storing the bytes) and * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **->swhash** and *skb*\ **->l4hash** to 0). * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len, __u64 flags) = (void *) 9; /* * bpf_l3_csum_replace * * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper * must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored in *size*. * Alternatively, it is possible to store the difference between * the previous and the new values of the header field in *to*, by * setting *from* and *size* to 0. For both methods, *offset* * indicates the location of the IP checksum within the packet. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_l3_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 size) = (void *) 10; /* * bpf_l4_csum_replace * * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the * helper must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored on the lowest * four bits of *flags*. Alternatively, it is possible to store * the difference between the previous and the new values of the * header field in *to*, by setting *from* and the four lowest * bits of *flags* to 0. For both methods, *offset* indicates the * location of the IP checksum within the packet. In addition to * the size of the field, *flags* can be added (bitwise OR) actual * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and * for updates resulting in a null checksum the value is set to * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates * the checksum is to be computed against a pseudo-header. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_l4_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 flags) = (void *) 11; /* * bpf_tail_call * * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack * frame is used (but values on stack and in registers for the * caller are not accessible to the callee). This mechanism allows * for program chaining, either for raising the maximum number of * available eBPF instructions, or to execute given programs in * conditional blocks. For security reasons, there is an upper * limit to the number of successive tail calls that can be * performed. * * Upon call of this helper, the program attempts to jump into a * program referenced at index *index* in *prog_array_map*, a * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes * *ctx*, a pointer to the context. * * If the call succeeds, the kernel immediately runs the first * instruction of the new program. This is not a function call, * and it never returns to the previous program. If the call * fails, then the helper has no effect, and the caller continues * to run its subsequent instructions. A call can fail if the * destination program for the jump does not exist (i.e. *index* * is superior to the number of entries in *prog_array_map*), or * if the maximum number of tail calls has been reached for this * chain of programs. This limit is defined in the kernel by the * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), * which is currently set to 33. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_tail_call)(void *ctx, void *prog_array_map, __u32 index) = (void *) 12; /* * bpf_clone_redirect * * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress * interfaces can be used for redirection. The **BPF_F_INGRESS** * value in *flags* is used to make the distinction (ingress path * is selected if the flag is present, egress path otherwise). * This is the only flag supported for now. * * In comparison with **bpf_redirect**\ () helper, * **bpf_clone_redirect**\ () has the associated cost of * duplicating the packet buffer, but this can be executed out of * the eBPF program. Conversely, **bpf_redirect**\ () is more * efficient, but it is handled through an action code where the * redirection happens only after the eBPF program has returned. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. Positive * error indicates a potential drop or congestion in the target * device. The particular positive error codes are not defined. */ static long (* const bpf_clone_redirect)(struct __sk_buff *skb, __u32 ifindex, __u64 flags) = (void *) 13; /* * bpf_get_current_pid_tgid * * Get the current pid and tgid. * * Returns * A 64-bit integer containing the current tgid and pid, and * created as such: * *current_task*\ **->tgid << 32 \|** * *current_task*\ **->pid**. */ static __u64 (* const bpf_get_current_pid_tgid)(void) = (void *) 14; /* * bpf_get_current_uid_gid * * Get the current uid and gid. * * Returns * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. */ static __u64 (* const bpf_get_current_uid_gid)(void) = (void *) 15; /* * bpf_get_current_comm * * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of * the executable (excluding the path) for the current task. The * *size_of_buf* must be strictly positive. On success, the * helper makes sure that the *buf* is NUL-terminated. On failure, * it is filled with zeroes. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_get_current_comm)(void *buf, __u32 size_of_buf) = (void *) 16; /* * bpf_get_cgroup_classid * * Retrieve the classid for the current task, i.e. for the net_cls * cgroup to which *skb* belongs. * * This helper can be used on TC egress path, but not on ingress. * * The net_cls cgroup provides an interface to tag network packets * based on a user-provided identifier for all traffic coming from * the tasks belonging to the related cgroup. See also the related * kernel documentation, available from the Linux sources in file * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. * * The Linux kernel has two versions for cgroups: there are * cgroups v1 and cgroups v2. Both are available to users, who can * use a mixture of them, but note that the net_cls cgroup is for * cgroup v1 only. This makes it incompatible with BPF programs * run on cgroups, which is a cgroup-v2-only feature (a socket can * only hold data for one version of cgroups at a time). * * This helper is only available is the kernel was compiled with * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to * "**y**" or to "**m**". * * Returns * The classid, or 0 for the default unconfigured classid. */ static __u32 (* const bpf_get_cgroup_classid)(struct __sk_buff *skb) = (void *) 17; /* * bpf_skb_vlan_push * * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update * the checksum. Note that if *vlan_proto* is different from * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * be **ETH_P_8021Q**. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_vlan_push)(struct __sk_buff *skb, __be16 vlan_proto, __u16 vlan_tci) = (void *) 18; /* * bpf_skb_vlan_pop * * Pop a VLAN header from the packet associated to *skb*. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_vlan_pop)(struct __sk_buff *skb) = (void *) 19; /* * bpf_skb_get_tunnel_key * * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be * filled with tunnel metadata for the packet associated to *skb*. * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which * indicates that the tunnel is based on IPv6 protocol instead of * IPv4. * * The **struct bpf_tunnel_key** is an object that generalizes the * principal parameters used by various tunneling protocols into a * single struct. This way, it can be used to easily make a * decision based on the contents of the encapsulation header, * "summarized" in this struct. In particular, it holds the IP * address of the remote end (IPv4 or IPv6, depending on the case) * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, * this struct exposes the *key*\ **->tunnel_id**, which is * generally mapped to a VNI (Virtual Network Identifier), making * it programmable together with the **bpf_skb_set_tunnel_key**\ * () helper. * * Let's imagine that the following code is part of a program * attached to the TC ingress interface, on one end of a GRE * tunnel, and is supposed to filter out all messages coming from * remote ends with IPv4 address other than 10.0.0.1: * * :: * * int ret; * struct bpf_tunnel_key key = {}; * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices * that can operate in "collect metadata" mode: instead of having * one network device per specific configuration, the "collect * metadata" mode only requires a single device where the * configuration can be extracted from this helper. * * This can be used together with various tunnels such as VXLan, * Geneve, GRE or IP in IP (IPIP). * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_get_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 20; /* * bpf_skb_set_tunnel_key * * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The * *flags* can be set to a combination of the following values: * * **BPF_F_TUNINFO_IPV6** * Indicate that the tunnel is based on IPv6 protocol * instead of IPv4. * **BPF_F_ZERO_CSUM_TX** * For IPv4 packets, add a flag to tunnel metadata * indicating that checksum computation should be skipped * and checksum set to zeroes. * **BPF_F_DONT_FRAGMENT** * Add a flag to tunnel metadata indicating that the * packet should not be fragmented. * **BPF_F_SEQ_NUMBER** * Add a flag to tunnel metadata indicating that a * sequence number should be added to tunnel header before * sending the packet. This flag was added for GRE * encapsulation, but might be used with other protocols * as well in the future. * **BPF_F_NO_TUNNEL_KEY** * Add a flag to tunnel metadata indicating that no tunnel * key should be set in the resulting tunnel header. * * Here is a typical usage on the transmit path: * * :: * * struct bpf_tunnel_key key; * populate key ... * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); * * See also the description of the **bpf_skb_get_tunnel_key**\ () * helper for additional information. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_set_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 21; /* * bpf_perf_event_read * * Read the value of a perf event counter. This helper relies on a * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of * the perf event counter is selected when *map* is updated with * perf event file descriptors. The *map* is an array whose size * is the number of available CPUs, and each cell contains a value * relative to one CPU. The value to retrieve is indicated by * *flags*, that contains the index of the CPU to look up, masked * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * Note that before Linux 4.13, only hardware perf event can be * retrieved. * * Also, be aware that the newer helper * **bpf_perf_event_read_value**\ () is recommended over * **bpf_perf_event_read**\ () in general. The latter has some ABI * quirks where error and counter value are used as a return code * (which is wrong to do since ranges may overlap). This issue is * fixed with **bpf_perf_event_read_value**\ (), which at the same * time provides more features over the **bpf_perf_event_read**\ * () interface. Please refer to the description of * **bpf_perf_event_read_value**\ () for details. * * Returns * The value of the perf event counter read from the map, or a * negative error code in case of failure. */ static __u64 (* const bpf_perf_event_read)(void *map, __u64 flags) = (void *) 22; /* * bpf_redirect * * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ * (), except that the packet is not cloned, which provides * increased performance. * * Except for XDP, both ingress and egress interfaces can be used * for redirection. The **BPF_F_INGRESS** value in *flags* is used * to make the distinction (ingress path is selected if the flag * is present, egress path otherwise). Currently, XDP only * supports redirection to the egress interface, and accepts no * flag at all. * * The same effect can also be attained with the more generic * **bpf_redirect_map**\ (), which uses a BPF map to store the * redirect target instead of providing it directly to the helper. * * Returns * For XDP, the helper returns **XDP_REDIRECT** on success or * **XDP_ABORTED** on error. For other program types, the values * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on * error. */ static long (* const bpf_redirect)(__u32 ifindex, __u64 flags) = (void *) 23; /* * bpf_get_route_realm * * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The * identifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. * * Retrieving this identifier works with the clsact TC egress hook * (see also **tc-bpf(8)**), or alternatively on conventional * classful egress qdiscs, but not on TC ingress path. In case of * clsact TC egress hook, this has the advantage that, internally, * the destination entry has not been dropped yet in the transmit * path. Therefore, the destination entry does not need to be * artificially held via **netif_keep_dst**\ () for a classful * qdisc until the *skb* is freed. * * This helper is available only if the kernel was compiled with * **CONFIG_IP_ROUTE_CLASSID** configuration option. * * Returns * The realm of the route for the packet associated to *skb*, or 0 * if none was found. */ static __u32 (* const bpf_get_route_realm)(struct __sk_buff *skb) = (void *) 24; /* * bpf_perf_event_output * * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * The context of the program *ctx* needs also be passed to the * helper. * * On user space, a program willing to read the values needs to * call **perf_event_open**\ () on the perf event (either for * one or for all CPUs) and to store the file descriptor into the * *map*. This must be done before the eBPF program can send data * into it. An example is available in file * *samples/bpf/trace_output_user.c* in the Linux kernel source * tree (the eBPF program counterpart is in * *samples/bpf/trace_output_kern.c*). * * **bpf_perf_event_output**\ () achieves better performance * than **bpf_trace_printk**\ () for sharing data with user * space, and is much better suitable for streaming data from eBPF * programs. * * Note that this helper is not restricted to tracing use cases * and can be used with programs attached to TC or XDP as well, * where it allows for passing data to user space listeners. Data * can be: * * * Only custom structs, * * Only the packet payload, or * * A combination of both. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_perf_event_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 25; /* * bpf_skb_load_bytes * * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from * the packet associated to *skb*, into the buffer pointed by * *to*. * * Since Linux 4.7, usage of this helper has mostly been replaced * by "direct packet access", enabling packet data to be * manipulated with *skb*\ **->data** and *skb*\ **->data_end** * pointing respectively to the first byte of packet data and to * the byte after the last byte of packet data. However, it * remains useful if one wishes to read large quantities of data * at once from a packet into the eBPF stack. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_load_bytes)(const void *skb, __u32 offset, void *to, __u32 len) = (void *) 26; /* * bpf_get_stackid * * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context * on which the tracing program is executed, and a pointer to a * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * a combination of the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_FAST_STACK_CMP** * Compare stacks by hash only. * **BPF_F_REUSE_STACKID** * If two different stacks hash into the same *stackid*, * discard the old one. * * The stack id retrieved is a 32 bit long integer handle which * can be further combined with other data (including other stack * ids) and used as a key into maps. This can be useful for * generating a variety of graphs (such as flame graphs or off-cpu * graphs). * * For walking a stack, this helper is an improvement over * **bpf_probe_read**\ (), which can be used with unrolled loops * but is not efficient and consumes a lot of eBPF instructions. * Instead, **bpf_get_stackid**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * * Returns * The positive or null stack id on success, or a negative error * in case of failure. */ static long (* const bpf_get_stackid)(void *ctx, void *map, __u64 flags) = (void *) 27; /* * bpf_csum_diff * * Compute a checksum difference, from the raw buffer pointed by * *from*, of length *from_size* (that must be a multiple of 4), * towards the raw buffer pointed by *to*, of size *to_size* * (same remark). An optional *seed* can be added to the value * (this can be cascaded, the seed may come from a previous call * to the helper). * * This is flexible enough to be used in several ways: * * * With *from_size* == 0, *to_size* > 0 and *seed* set to * checksum, it can be used when pushing new data. * * With *from_size* > 0, *to_size* == 0 and *seed* set to * checksum, it can be used when removing data from a packet. * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it * can be used to compute a diff. Note that *from_size* and * *to_size* do not need to be equal. * * This helper can be used in combination with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to * which one can feed in the difference computed with * **bpf_csum_diff**\ (). * * Returns * The checksum result, or a negative error code in case of * failure. */ static __s64 (* const bpf_csum_diff)(__be32 *from, __u32 from_size, __be32 *to, __u32 to_size, __wsum seed) = (void *) 28; /* * bpf_skb_get_tunnel_opt * * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* * of *size*. * * This helper can be used with encapsulation devices that can * operate in "collect metadata" mode (please refer to the related * note in the description of **bpf_skb_get_tunnel_key**\ () for * more details). A particular example where this can be used is * in combination with the Geneve encapsulation protocol, where it * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) * and retrieving arbitrary TLVs (Type-Length-Value headers) from * the eBPF program. This allows for full customization of these * headers. * * Returns * The size of the option data retrieved. */ static long (* const bpf_skb_get_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 29; /* * bpf_skb_set_tunnel_opt * * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. * * See also the description of the **bpf_skb_get_tunnel_opt**\ () * helper for additional information. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_set_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 30; /* * bpf_skb_change_proto * * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to * IPv4. The helper takes care of the groundwork for the * transition, including resizing the socket buffer. The eBPF * program is expected to fill the new headers, if any, via * **skb_store_bytes**\ () and to recompute the checksums with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ * (). The main case for this helper is to perform NAT64 * operations out of an eBPF program. * * Internally, the GSO type is marked as dodgy so that headers are * checked and segments are recalculated by the GSO/GRO engine. * The size for GSO target is adapted as well. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_change_proto)(struct __sk_buff *skb, __be16 proto, __u64 flags) = (void *) 31; /* * bpf_skb_change_type * * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except * the eBPF program does not have a write access to *skb*\ * **->pkt_type** beside this helper. Using a helper here allows * for graceful handling of errors. * * The major use case is to change incoming *skb*s to * **PACKET_HOST** in a programmatic way instead of having to * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for * example. * * Note that *type* only allows certain values. At this time, they * are: * * **PACKET_HOST** * Packet is for us. * **PACKET_BROADCAST** * Send packet to all. * **PACKET_MULTICAST** * Send packet to group. * **PACKET_OTHERHOST** * Send packet to someone else. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_change_type)(struct __sk_buff *skb, __u32 type) = (void *) 32; /* * bpf_skb_under_cgroup * * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * * Returns * The return value depends on the result of the test, and can be: * * * 0, if the *skb* failed the cgroup2 descendant test. * * 1, if the *skb* succeeded the cgroup2 descendant test. * * A negative error code, if an error occurred. */ static long (* const bpf_skb_under_cgroup)(struct __sk_buff *skb, void *map, __u32 index) = (void *) 33; /* * bpf_get_hash_recalc * * Retrieve the hash of the packet, *skb*\ **->hash**. If it is * not set, in particular if the hash was cleared due to mangling, * recompute this hash. Later accesses to the hash can be done * directly with *skb*\ **->hash**. * * Calling **bpf_set_hash_invalid**\ (), changing a packet * prototype with **bpf_skb_change_proto**\ (), or calling * **bpf_skb_store_bytes**\ () with the * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear * the hash and to trigger a new computation for the next call to * **bpf_get_hash_recalc**\ (). * * Returns * The 32-bit hash. */ static __u32 (* const bpf_get_hash_recalc)(struct __sk_buff *skb) = (void *) 34; /* * bpf_get_current_task * * Get the current task. * * Returns * A pointer to the current task struct. */ static __u64 (* const bpf_get_current_task)(void) = (void *) 35; /* * bpf_probe_write_user * * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in * user context, and *dst* must be a valid user space address. * * This helper should not be used to implement any kind of * security mechanism because of TOC-TOU attacks, but rather to * debug, divert, and manipulate execution of semi-cooperative * processes. * * Keep in mind that this feature is meant for experiments, and it * has a risk of crashing the system and running programs. * Therefore, when an eBPF program using this helper is attached, * a warning including PID and process name is printed to kernel * logs. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_probe_write_user)(void *dst, const void *src, __u32 len) = (void *) 36; /* * bpf_current_task_under_cgroup * * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * * Returns * The return value depends on the result of the test, and can be: * * * 1, if current task belongs to the cgroup2. * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. */ static long (* const bpf_current_task_under_cgroup)(void *map, __u32 index) = (void *) 37; /* * bpf_skb_change_tail * * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must * be left at zero. * * The basic idea is that the helper performs the needed work to * change the size of the packet, then the eBPF program rewrites * the rest via helpers like **bpf_skb_store_bytes**\ (), * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () * and others. This helper is a slow path utility intended for * replies with control messages. And because it is targeted for * slow path, the helper itself can afford to be slow: it * implicitly linearizes, unclones and drops offloads from the * *skb*. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_change_tail)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 38; /* * bpf_skb_pull_data * * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes * from *skb* readable and writable. If a zero value is passed for * *len*, then all bytes in the linear part of *skb* will be made * readable and writable. * * This helper is only needed for reading and writing with direct * packet access. * * For direct packet access, testing that offsets to access * are within packet boundaries (test on *skb*\ **->data_end**) is * susceptible to fail if offsets are invalid, or if the requested * data is in non-linear parts of the *skb*. On failure the * program can just bail out, or in the case of a non-linear * buffer, use a helper to make the data available. The * **bpf_skb_load_bytes**\ () helper is a first solution to access * the data. Another one consists in using **bpf_skb_pull_data** * to pull in once the non-linear parts, then retesting and * eventually access the data. * * At the same time, this also makes sure the *skb* is uncloned, * which is a necessary condition for direct write. As this needs * to be an invariant for the write part only, the verifier * detects writes and adds a prologue that is calling * **bpf_skb_pull_data()** to effectively unclone the *skb* from * the very beginning in case it is indeed cloned. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_pull_data)(struct __sk_buff *skb, __u32 len) = (void *) 39; /* * bpf_csum_update * * Add the checksum *csum* into *skb*\ **->csum** in case the * driver has supplied a checksum for the entire packet into that * field. Return an error otherwise. This helper is intended to be * used in combination with **bpf_csum_diff**\ (), in particular * when the checksum needs to be updated after data has been * written into the packet through direct packet access. * * Returns * The checksum on success, or a negative error code in case of * failure. */ static __s64 (* const bpf_csum_update)(struct __sk_buff *skb, __wsum csum) = (void *) 40; /* * bpf_set_hash_invalid * * Invalidate the current *skb*\ **->hash**. It can be used after * mangling on headers through direct packet access, in order to * indicate that the hash is outdated and to trigger a * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * * Returns * void. */ static void (* const bpf_set_hash_invalid)(struct __sk_buff *skb) = (void *) 41; /* * bpf_get_numa_node_id * * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA * node, when the program is attached to sockets using the * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), * but the helper is also available to other eBPF program types, * similarly to **bpf_get_smp_processor_id**\ (). * * Returns * The id of current NUMA node. */ static long (* const bpf_get_numa_node_id)(void) = (void *) 42; /* * bpf_skb_change_head * * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of * space. It automatically extends and reallocates memory as * required. * * This helper can be used on a layer 3 *skb* to push a MAC header * for redirection into a layer 2 device. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_change_head)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 43; /* * bpf_xdp_adjust_head * * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper * can be used to prepare the packet for pushing or popping * headers. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_xdp_adjust_head)(struct xdp_md *xdp_md, int delta) = (void *) 44; /* * bpf_probe_read_str * * Copy a NUL terminated string from an unsafe kernel address * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for * more details. * * Generally, use **bpf_probe_read_user_str**\ () or * **bpf_probe_read_kernel_str**\ () instead. * * Returns * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative * value. */ static long (* const bpf_probe_read_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 45; /* * bpf_get_socket_cookie * * If the **struct sk_buff** pointed by *skb* has a known socket, * retrieve the cookie (generated by the kernel) of this socket. * If no cookie has been set yet, generate a new cookie. Once * generated, the socket cookie remains stable for the life of the * socket. This helper can be useful for monitoring per socket * networking traffic statistics as it provides a global socket * identifier that can be assumed unique. * * Returns * A 8-byte long unique number on success, or 0 if the socket * field is missing inside *skb*. */ static __u64 (* const bpf_get_socket_cookie)(void *ctx) = (void *) 46; /* * bpf_get_socket_uid * * Get the owner UID of the socked associated to *skb*. * * Returns * The owner UID of the socket associated to *skb*. If the socket * is **NULL**, or if it is not a full socket (i.e. if it is a * time-wait or a request socket instead), **overflowuid** value * is returned (note that **overflowuid** might also be the actual * UID value for the socket). */ static __u32 (* const bpf_get_socket_uid)(struct __sk_buff *skb) = (void *) 47; /* * bpf_set_hash * * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * * Returns * 0 */ static long (* const bpf_set_hash)(struct __sk_buff *skb, __u32 hash) = (void *) 48; /* * bpf_setsockopt * * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **setsockopt(2)** for more information. * The option value of length *optlen* is pointed by *optval*. * * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, * **SO_BINDTODEVICE**, **SO_KEEPALIVE**, **SO_REUSEADDR**, * **SO_REUSEPORT**, **SO_BINDTOIFINDEX**, **SO_TXREHASH**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**, * **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**, * **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**, * **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports the following *optname*\ s: * **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_setsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 49; /* * bpf_skb_adjust_room * * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. * * By default, the helper will reset any offloaded checksum * indicator of the skb to CHECKSUM_NONE. This can be avoided * by the following flag: * * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded * checksum data of the skb to CHECKSUM_NONE. * * There are two supported modes at this time: * * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer * (room space is added or removed between the layer 2 and * layer 3 headers). * * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer * (room space is added or removed between the layer 3 and * layer 4 headers). * * The following flags are supported at this time: * * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. * Adjusting mss in this way is not allowed for datagrams. * * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: * Any new space is reserved to hold a tunnel header. * Configure skb offsets and other fields accordingly. * * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: * Use with ENCAP_L3 flags to further specify the tunnel type. * * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): * Use with ENCAP_L3/L4 flags to further specify the tunnel * type; *len* is the length of the inner MAC header. * * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the * L2 type as Ethernet. * * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**, * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**: * Indicate the new IP header version after decapsulating the outer * IP header. Used when the inner and outer IP versions are different. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_adjust_room)(struct __sk_buff *skb, __s32 len_diff, __u32 mode, __u64 flags) = (void *) 50; /* * bpf_redirect_map * * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain * references to net devices (for forwarding packets through other * ports), or to CPUs (for redirecting XDP frames to another CPU; * but this is only implemented for native XDP (with driver * support) as of this writing). * * The lower two bits of *flags* are used as the return code if * the map lookup fails. This is so that the return value can be * one of the XDP program return codes up to **XDP_TX**, as chosen * by the caller. The higher bits of *flags* can be set to * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. * * With BPF_F_BROADCAST the packet will be broadcasted to all the * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress * interface will be excluded when do broadcasting. * * See also **bpf_redirect**\ (), which only supports redirecting * to an ifindex, but doesn't require a map to do so. * * Returns * **XDP_REDIRECT** on success, or the value of the two lower bits * of the *flags* argument on error. */ static long (* const bpf_redirect_map)(void *map, __u64 key, __u64 flags) = (void *) 51; /* * bpf_sk_redirect_map * * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * * Returns * **SK_PASS** on success, or **SK_DROP** on error. */ static long (* const bpf_sk_redirect_map)(struct __sk_buff *skb, void *map, __u32 key, __u64 flags) = (void *) 52; /* * bpf_sock_map_update * * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_sock_map_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 53; /* * bpf_xdp_adjust_meta * * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this * operation modifies the address stored in *xdp_md*\ **->data**, * so the latter must be loaded only after the helper has been * called. * * The use of *xdp_md*\ **->data_meta** is optional and programs * are not required to use it. The rationale is that when the * packet is processed with XDP (e.g. as DoS filter), it is * possible to push further meta data along with it before passing * to the stack, and to give the guarantee that an ingress eBPF * program attached as a TC classifier on the same device can pick * this up for further post-processing. Since TC works with socket * buffers, it remains possible to set from XDP the **mark** or * **priority** pointers, or other pointers for the socket buffer. * Having this scratch space generic and programmable allows for * more flexibility as the user is free to store whatever meta * data they need. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_xdp_adjust_meta)(struct xdp_md *xdp_md, int delta) = (void *) 54; /* * bpf_perf_event_read_value * * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event * counter is selected when *map* is updated with perf event file * descriptors. The *map* is an array whose size is the number of * available CPUs, and each cell contains a value relative to one * CPU. The value to retrieve is indicated by *flags*, that * contains the index of the CPU to look up, masked with * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * This helper behaves in a way close to * **bpf_perf_event_read**\ () helper, save that instead of * just returning the value observed, it fills the *buf* * structure. This allows for additional data to be retrieved: in * particular, the enabled and running times (in *buf*\ * **->enabled** and *buf*\ **->running**, respectively) are * copied. In general, **bpf_perf_event_read_value**\ () is * recommended over **bpf_perf_event_read**\ (), which has some * ABI issues and provides fewer functionalities. * * These values are interesting, because hardware PMU (Performance * Monitoring Unit) counters are limited resources. When there are * more PMU based perf events opened than available counters, * kernel will multiplex these events so each event gets certain * percentage (but not all) of the PMU time. In case that * multiplexing happens, the number of samples or counter value * will not reflect the case compared to when no multiplexing * occurs. This makes comparison between different runs difficult. * Typically, the counter value should be normalized before * comparing to other experiments. The usual normalization is done * as follows. * * :: * * normalized_counter = counter * t_enabled / t_running * * Where t_enabled is the time enabled for event and t_running is * the time running for event since last normalization. The * enabled and running times are accumulated since the perf event * open. To achieve scaling factor between two invocations of an * eBPF program, users can use CPU id as the key (which is * typical for perf array usage model) to remember the previous * value and do the calculation inside the eBPF program. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_perf_event_read_value)(void *map, __u64 flags, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 55; /* * bpf_perf_prog_read_value * * For an eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in * the structure pointed by *buf* and of size *buf_size*. Enabled * and running times are also stored in the structure (see * description of helper **bpf_perf_event_read_value**\ () for * more details). * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_perf_prog_read_value)(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 56; /* * bpf_getsockopt * * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **getsockopt(2)** for more information. * The retrieved value is stored in the structure pointed by * *opval* and of length *optlen*. * * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **getsockopt()**. * It supports the same set of *optname*\ s that is supported by * the **bpf_setsockopt**\ () helper. The exceptions are * **TCP_BPF_*** is **bpf_setsockopt**\ () only and * **TCP_SAVED_SYN** is **bpf_getsockopt**\ () only. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_getsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 57; /* * bpf_override_return * * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. * The first argument is the context *regs* on which the kprobe * works. * * This helper works by setting the PC (program counter) * to an override function which is run in place of the original * probed function. This means the probed function is not run at * all. The replacement function just returns with the required * value. * * This helper has security implications, and thus is subject to * restrictions. It is only available if the kernel was compiled * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration * option, and in this case it only works on functions tagged with * **ALLOW_ERROR_INJECTION** in the kernel code. * * Returns * 0 */ static long (* const bpf_override_return)(struct pt_regs *regs, __u64 rc) = (void *) 58; /* * bpf_sock_ops_cb_flags_set * * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to * *argval*. * * The primary use of this field is to determine if there should * be calls to eBPF programs of type * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP * code. A program of the same type can change its value, per * connection and as necessary, when the connection is * established. This field is directly accessible for reading, but * this helper must be used for updates in order to return an * error if an eBPF program tries to set a callback that is not * supported in the current kernel. * * *argval* is a flag array which can combine these flags: * * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) * * Therefore, this function can be used to clear a callback flag by * setting the appropriate bit to zero. e.g. to disable the RTO * callback: * * **bpf_sock_ops_cb_flags_set(bpf_sock,** * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** * * Here are some examples of where one could call such eBPF * program: * * * When RTO fires. * * When a packet is retransmitted. * * When the connection terminates. * * When a packet is sent. * * When a packet is received. * * Returns * Code **-EINVAL** if the socket is not a full TCP socket; * otherwise, a positive number containing the bits that could not * be set is returned (which comes down to 0 if all bits were set * as required). */ static long (* const bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops *bpf_sock, int argval) = (void *) 59; /* * bpf_msg_redirect_map * * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * * Returns * **SK_PASS** on success, or **SK_DROP** on error. */ static long (* const bpf_msg_redirect_map)(struct sk_msg_md *msg, void *map, __u32 key, __u64 flags) = (void *) 60; /* * bpf_msg_apply_bytes * * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. * * For example, this helper can be used in the following cases: * * * A single **sendmsg**\ () or **sendfile**\ () system call * contains multiple logical messages that the eBPF program is * supposed to read and for which it should apply a verdict. * * An eBPF program only cares to read the first *bytes* of a * *msg*. If the message has a large payload, then setting up * and calling the eBPF program repeatedly for all bytes, even * though the verdict is already known, would create unnecessary * overhead. * * When called from within an eBPF program, the helper sets a * counter internal to the BPF infrastructure, that is used to * apply the last verdict to the next *bytes*. If *bytes* is * smaller than the current data being processed from a * **sendmsg**\ () or **sendfile**\ () system call, the first * *bytes* will be sent and the eBPF program will be re-run with * the pointer for start of data pointing to byte number *bytes* * **+ 1**. If *bytes* is larger than the current data being * processed, then the eBPF verdict will be applied to multiple * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are * consumed. * * Note that if a socket closes with the internal counter holding * a non-zero value, this is not a problem because data is not * being buffered for *bytes* and is sent as it is received. * * Returns * 0 */ static long (* const bpf_msg_apply_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 61; /* * bpf_msg_cork_bytes * * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been * accumulated. * * This can be used when one needs a specific number of bytes * before a verdict can be assigned, even if the data spans * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme * case would be a user calling **sendmsg**\ () repeatedly with * 1-byte long message segments. Obviously, this is bad for * performance, but it is still valid. If the eBPF program needs * *bytes* bytes to validate a header, this helper can be used to * prevent the eBPF program to be called again until *bytes* have * been accumulated. * * Returns * 0 */ static long (* const bpf_msg_cork_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 62; /* * bpf_msg_pull_data * * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ * **->data_end** to *start* and *end* bytes offsets into *msg*, * respectively. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it can only parse data that the (**data**, **data_end**) * pointers have already consumed. For **sendmsg**\ () hooks this * is likely the first scatterlist element. But for calls relying * on the **sendpage** handler (e.g. **sendfile**\ ()) this will * be the range (**0**, **0**) because the data is shared with * user space and by default the objective is to avoid allowing * user space to modify data while (or after) eBPF verdict is * being decided. This helper can be used to pull in data and to * set the start and end pointer to given values. Data will be * copied if necessary (i.e. if data was not linear and if start * and end pointers do not point to the same chunk). * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_msg_pull_data)(struct sk_msg_md *msg, __u32 start, __u32 end, __u64 flags) = (void *) 63; /* * bpf_bind * * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing * connection from the desired IP address, which can be useful for * example when all processes inside a cgroup should use one * single IP address on a host that has multiple IP configured. * * This helper works for IPv4 and IPv6, TCP and UDP sockets. The * domain (*addr*\ **->sa_family**) must be **AF_INET** (or * **AF_INET6**). It's advised to pass zero port (**sin_port** * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like * behavior and lets the kernel efficiently pick up an unused * port as long as 4-tuple is unique. Passing non-zero port might * lead to degraded performance. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_bind)(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) = (void *) 64; /* * bpf_xdp_adjust_tail * * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * possible to both shrink and grow the packet tail. * Shrink done via *delta* being a negative integer. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_xdp_adjust_tail)(struct xdp_md *xdp_md, int delta) = (void *) 65; /* * bpf_skb_get_xfrm_state * * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. * * The retrieved value is stored in the **struct bpf_xfrm_state** * pointed by *xfrm_state* and of length *size*. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_XFRM** configuration option. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_get_xfrm_state)(struct __sk_buff *skb, __u32 index, struct bpf_xfrm_state *xfrm_state, __u32 size, __u64 flags) = (void *) 66; /* * bpf_get_stack * * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer * to the context on which the tracing program is executed. * To store the stacktrace, the bpf program provides *buf* with * a nonnegative *size*. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_USER_BUILD_ID** * Collect (build_id, file_offset) instead of ips for user * stack, only valid if **BPF_F_USER_STACK** is also * specified. * * *file_offset* is an offset relative to the beginning * of the executable or shared object file backing the vma * which the *ip* falls in. It is *not* an offset relative * to that object's base address. Accordingly, it must be * adjusted by adding (sh_addr - sh_offset), where * sh_{addr,offset} correspond to the executable section * containing *file_offset* in the object, for comparisons * to symbols' st_value to be valid. * * **bpf_get_stack**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * to sufficient large buffer size. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * * Returns * The non-negative copied *buf* length equal to or less than * *size* on success, or a negative error in case of failure. */ static long (* const bpf_get_stack)(void *ctx, void *buf, __u32 size, __u64 flags) = (void *) 67; /* * bpf_skb_load_bytes_relative * * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* * from the packet associated to *skb*, into the buffer pointed * by *to*. The difference to **bpf_skb_load_bytes**\ () is that * a fifth argument *start_header* exists in order to select a * base offset to start from. *start_header* can be one of: * * **BPF_HDR_START_MAC** * Base offset to load data from is *skb*'s mac header. * **BPF_HDR_START_NET** * Base offset to load data from is *skb*'s network header. * * In general, "direct packet access" is the preferred method to * access packet data, however, this helper is in particular useful * in socket filters where *skb*\ **->data** does not always point * to the start of the mac header and where "direct packet access" * is not available. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_load_bytes_relative)(const void *skb, __u32 offset, void *to, __u32 len, __u32 start_header) = (void *) 68; /* * bpf_fib_lookup * * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be * forwarded, the neighbor tables are searched for the nexthop. * If successful (ie., FIB lookup shows forwarding and nexthop * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric * is set to metric from route (IPv4/IPv6 only), and ifindex * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the * following values: * * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB * rules. * **BPF_FIB_LOOKUP_TBID** * Used with BPF_FIB_LOOKUP_DIRECT. * Use the routing table ID present in *params*->tbid * for the fib lookup. * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). * **BPF_FIB_LOOKUP_SKIP_NEIGH** * Skip the neighbour table lookup. *params*->dmac * and *params*->smac will not be set as output. A common * use case is to call **bpf_redirect_neigh**\ () after * doing **bpf_fib_lookup**\ (). * **BPF_FIB_LOOKUP_SRC** * Derive and set source IP addr in *params*->ipv{4,6}_src * for the nexthop. If the src addr cannot be derived, * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this * case, *params*->dmac and *params*->smac are not set either. * **BPF_FIB_LOOKUP_MARK** * Use the mark present in *params*->mark for the fib lookup. * This option should not be used with BPF_FIB_LOOKUP_DIRECT, * as it only has meaning for full lookups. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * * Returns * * < 0 if any input argument is invalid * * 0 on success (packet is forwarded, nexthop neighbor exists) * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU * was exceeded and output params->mtu_result contains the MTU. */ static long (* const bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, int plen, __u32 flags) = (void *) 69; /* * bpf_sock_hash_update * * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_sock_hash_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 70; /* * bpf_msg_redirect_hash * * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * * Returns * **SK_PASS** on success, or **SK_DROP** on error. */ static long (* const bpf_msg_redirect_hash)(struct sk_msg_md *msg, void *map, void *key, __u64 flags) = (void *) 71; /* * bpf_sk_redirect_hash * * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. * if the verdict eBPF program returns **SK_PASS**), redirect it * to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress otherwise). This is the only flag supported for now. * * Returns * **SK_PASS** on success, or **SK_DROP** on error. */ static long (* const bpf_sk_redirect_hash)(struct __sk_buff *skb, void *map, void *key, __u64 flags) = (void *) 72; /* * bpf_lwt_push_encap * * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at * address *hdr*, with *len* its size in bytes. *type* indicates * the protocol of the header and can be one of: * * **BPF_LWT_ENCAP_SEG6** * IPv6 encapsulation with Segment Routing Header * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, * the IPv6 header is computed by the kernel. * **BPF_LWT_ENCAP_SEG6_INLINE** * Only works if *skb* contains an IPv6 packet. Insert a * Segment Routing Header (**struct ipv6_sr_hdr**) inside * the IPv6 header. * **BPF_LWT_ENCAP_IP** * IP encapsulation (GRE/GUE/IPIP/etc). The outer header * must be IPv4 or IPv6, followed by zero or more * additional headers, up to **LWT_BPF_MAX_HEADROOM** * total bytes in all prepended headers. Please note that * if **skb_is_gso**\ (*skb*) is true, no more than two * headers can be prepended, and the inner header, if * present, should be either GRE or UDP/GUE. * * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and * **BPF_PROG_TYPE_LWT_XMIT**. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_lwt_push_encap)(struct __sk_buff *skb, __u32 type, void *hdr, __u32 len) = (void *) 73; /* * bpf_lwt_seg6_store_bytes * * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs * inside the outermost IPv6 Segment Routing Header can be * modified through this helper. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_lwt_seg6_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len) = (void *) 74; /* * bpf_lwt_seg6_adjust_srh * * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to * *skb*, at position *offset* by *delta* bytes. Only offsets * after the segments are accepted. *delta* can be as well * positive (growing) as negative (shrinking). * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_lwt_seg6_adjust_srh)(struct __sk_buff *skb, __u32 offset, __s32 delta) = (void *) 75; /* * bpf_lwt_seg6_action * * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter * contained at address *param*, and of length *param_len* bytes. * *action* can be one of: * * **SEG6_LOCAL_ACTION_END_X** * End.X action: Endpoint with Layer-3 cross-connect. * Type of *param*: **struct in6_addr**. * **SEG6_LOCAL_ACTION_END_T** * End.T action: Endpoint with specific IPv6 table lookup. * Type of *param*: **int**. * **SEG6_LOCAL_ACTION_END_B6** * End.B6 action: Endpoint bound to an SRv6 policy. * Type of *param*: **struct ipv6_sr_hdr**. * **SEG6_LOCAL_ACTION_END_B6_ENCAP** * End.B6.Encap action: Endpoint bound to an SRv6 * encapsulation policy. * Type of *param*: **struct ipv6_sr_hdr**. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_lwt_seg6_action)(struct __sk_buff *skb, __u32 action, void *param, __u32 param_len) = (void *) 76; /* * bpf_rc_repeat * * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays * the generation of a key up event for previously generated * key down event. * * Some IR protocols like NEC have a special IR message for * repeating last button, for when a button is held down. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * * Returns * 0 */ static long (* const bpf_rc_repeat)(void *ctx) = (void *) 77; /* * bpf_rc_keydown * * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, * *toggle* value in the given *protocol*. The scancode will be * translated to a keycode using the rc keymap, and reported as * an input key down event. After a period a key up event is * generated. This period can be extended by calling either * **bpf_rc_keydown**\ () again with the same values, or calling * **bpf_rc_repeat**\ (). * * Some protocols include a toggle bit, in case the button was * released and pressed again between consecutive scancodes. * * The *ctx* should point to the lirc sample as passed into * the program. * * The *protocol* is the decoded protocol number (see * **enum rc_proto** for some predefined values). * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * * Returns * 0 */ static long (* const bpf_rc_keydown)(void *ctx, __u32 protocol, __u64 scancode, __u32 toggle) = (void *) 78; /* * bpf_skb_cgroup_id * * Return the cgroup v2 id of the socket associated with the *skb*. * This is roughly similar to the **bpf_get_cgroup_classid**\ () * helper for cgroup v1 by providing a tag resp. identifier that * can be matched on or used for map lookups e.g. to implement * policy. The cgroup v2 id of a given path in the hierarchy is * exposed in user space through the f_handle API in order to get * to the same 64-bit id. * * This helper can be used on TC egress path, but not on ingress, * and is available only if the kernel was compiled with the * **CONFIG_SOCK_CGROUP_DATA** configuration option. * * Returns * The id is returned or 0 in case the id could not be retrieved. */ static __u64 (* const bpf_skb_cgroup_id)(struct __sk_buff *skb) = (void *) 79; /* * bpf_get_current_cgroup_id * * Get the current cgroup id based on the cgroup within which * the current task is running. * * Returns * A 64-bit integer containing the current cgroup id based * on the cgroup within which the current task is running. */ static __u64 (* const bpf_get_current_cgroup_id)(void) = (void *) 80; /* * bpf_get_local_storage * * Get the pointer to the local storage area. * The type and the size of the local storage is defined * by the *map* argument. * The *flags* meaning is specific for each map type, * and has to be 0 for cgroup local storage. * * Depending on the BPF program type, a local storage area * can be shared between multiple instances of the BPF program, * running simultaneously. * * A user should care about the synchronization by himself. * For example, by using the **BPF_ATOMIC** instructions to alter * the shared data. * * Returns * A pointer to the local storage area. */ static void *(* const bpf_get_local_storage)(void *map, __u64 flags) = (void *) 81; /* * bpf_sk_select_reuseport * * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. * It checks the selected socket is matching the incoming * request in the socket buffer. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_sk_select_reuseport)(struct sk_reuseport_md *reuse, void *map, void *key, __u64 flags) = (void *) 82; /* * bpf_skb_ancestor_cgroup_id * * Return id of cgroup v2 that is ancestor of cgroup associated * with the *skb* at the *ancestor_level*. The root cgroup is at * *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with *skb*, then return value will be same as that * of **bpf_skb_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with *skb*. * * The format of returned id and helper limitations are same as in * **bpf_skb_cgroup_id**\ (). * * Returns * The id is returned or 0 in case the id could not be retrieved. */ static __u64 (* const bpf_skb_ancestor_cgroup_id)(struct __sk_buff *skb, int ancestor_level) = (void *) 83; /* * bpf_sk_lookup_tcp * * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * * Returns * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. */ static struct bpf_sock *(* const bpf_sk_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 84; /* * bpf_sk_lookup_udp * * Look for UDP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * * Returns * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. */ static struct bpf_sock *(* const bpf_sk_lookup_udp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 85; /* * bpf_sk_release * * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from * **bpf_sk_lookup_xxx**\ (). * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_sk_release)(void *sock) = (void *) 86; /* * bpf_map_push_elem * * Push an element *value* in *map*. *flags* is one of: * * **BPF_EXIST** * If the queue/stack is full, the oldest element is * removed to make room for this. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_map_push_elem)(void *map, const void *value, __u64 flags) = (void *) 87; /* * bpf_map_pop_elem * * Pop an element from *map*. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_map_pop_elem)(void *map, void *value) = (void *) 88; /* * bpf_map_peek_elem * * Get an element from *map* without removing it. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_map_peek_elem)(void *map, void *value) = (void *) 89; /* * bpf_msg_push_data * * For socket policies, insert *len* bytes into *msg* at offset * *start*. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it may want to insert metadata or options into the *msg*. * This can later be read and used by any of the lower layer BPF * hooks. * * This helper may fail if under memory pressure (a malloc * fails) in these cases BPF programs will get an appropriate * error and BPF programs will need to handle them. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_msg_push_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 90; /* * bpf_msg_pop_data * * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if * an allocation and copy are required due to a full ring buffer. * However, the helper will try to avoid doing the allocation * if possible. Other errors can occur if input parameters are * invalid either due to *start* byte not being valid part of *msg* * payload and/or *pop* value being to large. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_msg_pop_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 91; /* * bpf_rc_pointer_rel * * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * * Returns * 0 */ static long (* const bpf_rc_pointer_rel)(void *ctx, __s32 rel_x, __s32 rel_y) = (void *) 92; /* * bpf_spin_lock * * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to * safely update the rest of the fields in that value. The * spinlock can (and must) later be released with a call to * **bpf_spin_unlock**\ (\ *lock*\ ). * * Spinlocks in BPF programs come with a number of restrictions * and constraints: * * * **bpf_spin_lock** objects are only allowed inside maps of * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this * list could be extended in the future). * * BTF description of the map is mandatory. * * The BPF program can take ONE lock at a time, since taking two * or more could cause dead locks. * * Only one **struct bpf_spin_lock** is allowed per map element. * * When the lock is taken, calls (either BPF to BPF or helpers) * are not allowed. * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not * allowed inside a spinlock-ed region. * * The BPF program MUST call **bpf_spin_unlock**\ () to release * the lock, on all execution paths, before it returns. * * The BPF program can access **struct bpf_spin_lock** only via * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () * helpers. Loading or storing data into the **struct * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. * * To use the **bpf_spin_lock**\ () helper, the BTF description * of the map value must be a struct and have **struct * bpf_spin_lock** *anyname*\ **;** field at the top level. * Nested lock inside another struct is not allowed. * * The **struct bpf_spin_lock** *lock* field in a map value must * be aligned on a multiple of 4 bytes in that value. * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy * the **bpf_spin_lock** field to user space. * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from * a BPF program, do not update the **bpf_spin_lock** field. * * **bpf_spin_lock** cannot be on the stack or inside a * networking packet (it can only be inside of a map values). * * **bpf_spin_lock** is available to root only. * * Tracing programs and socket filter programs cannot use * **bpf_spin_lock**\ () due to insufficient preemption checks * (but this may change in the future). * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. * * Returns * 0 */ static long (* const bpf_spin_lock)(struct bpf_spin_lock *lock) = (void *) 93; /* * bpf_spin_unlock * * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). * * Returns * 0 */ static long (* const bpf_spin_unlock)(struct bpf_spin_lock *lock) = (void *) 94; /* * bpf_sk_fullsock * * This helper gets a **struct bpf_sock** pointer such * that all the fields in this **bpf_sock** can be accessed. * * Returns * A **struct bpf_sock** pointer on success, or **NULL** in * case of failure. */ static struct bpf_sock *(* const bpf_sk_fullsock)(struct bpf_sock *sk) = (void *) 95; /* * bpf_tcp_sock * * This helper gets a **struct bpf_tcp_sock** pointer from a * **struct bpf_sock** pointer. * * Returns * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. */ static struct bpf_tcp_sock *(* const bpf_tcp_sock)(struct bpf_sock *sk) = (void *) 96; /* * bpf_skb_ecn_set_ce * * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 * and IPv4. * * Returns * 1 if the **CE** flag is set (either by the current helper call * or because it was already present), 0 if it is not set. */ static long (* const bpf_skb_ecn_set_ce)(struct __sk_buff *skb) = (void *) 97; /* * bpf_get_listener_sock * * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. * **bpf_sk_release**\ () is unnecessary and not allowed. * * Returns * A **struct bpf_sock** pointer on success, or **NULL** in * case of failure. */ static struct bpf_sock *(* const bpf_get_listener_sock)(struct bpf_sock *sk) = (void *) 98; /* * bpf_skc_lookup_tcp * * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * This function is identical to **bpf_sk_lookup_tcp**\ (), except * that it also returns timewait or request sockets. Use * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the * full structure. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * * Returns * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. */ static struct bpf_sock *(* const bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 99; /* * bpf_tcp_check_syncookie * * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. * * *iph* points to the start of the IPv4 or IPv6 header, while * *iph_len* contains **sizeof**\ (**struct iphdr**) or * **sizeof**\ (**struct ipv6hdr**). * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header (at least * **sizeof**\ (**struct tcphdr**)). * * Returns * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. */ static long (* const bpf_tcp_check_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 100; /* * bpf_sysctl_get_name * * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. * * The buffer is always NUL terminated, unless it's zero-sized. * * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name * only (e.g. "tcp_mem"). * * Returns * Number of character copied (not including the trailing NUL). * * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). */ static long (* const bpf_sysctl_get_name)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len, __u64 flags) = (void *) 101; /* * bpf_sysctl_get_current_value * * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided * by program buffer *buf* of size *buf_len*. * * The whole value is copied, no matter what file position user * space issued e.g. sys_read at. * * The buffer is always NUL terminated, unless it's zero-sized. * * Returns * Number of character copied (not including the trailing NUL). * * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. */ static long (* const bpf_sysctl_get_current_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 102; /* * bpf_sysctl_get_new_value * * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into * provided by program buffer *buf* of size *buf_len*. * * User space may write new value at file position > 0. * * The buffer is always NUL terminated, unless it's zero-sized. * * Returns * Number of character copied (not including the trailing NUL). * * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * * **-EINVAL** if sysctl is being read. */ static long (* const bpf_sysctl_get_new_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 103; /* * bpf_sysctl_set_new_value * * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. * * *buf* should contain a string in same form as provided by user * space on sysctl write. * * User space may write new value at file position > 0. To override * the whole sysctl value file position should be set to zero. * * Returns * 0 on success. * * **-E2BIG** if the *buf_len* is too big. * * **-EINVAL** if sysctl is being read. */ static long (* const bpf_sysctl_set_new_value)(struct bpf_sysctl *ctx, const char *buf, unsigned long buf_len) = (void *) 104; /* * bpf_strtol * * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base * and save the result in *res*. * * The string may begin with an arbitrary amount of white space * (as determined by **isspace**\ (3)) followed by a single * optional '**-**' sign. * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically * similar to user space **strtol**\ (3). * * Returns * Number of characters consumed on success. Must be positive but * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. * * **-ERANGE** if resulting value was out of range. */ static long (* const bpf_strtol)(const char *buf, unsigned long buf_len, __u64 flags, long *res) = (void *) 105; /* * bpf_strtoul * * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the * given base and save the result in *res*. * * The string may begin with an arbitrary amount of white space * (as determined by **isspace**\ (3)). * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically * similar to user space **strtoul**\ (3). * * Returns * Number of characters consumed on success. Must be positive but * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. * * **-ERANGE** if resulting value was out of range. */ static long (* const bpf_strtoul)(const char *buf, unsigned long buf_len, __u64 flags, unsigned long *res) = (void *) 106; /* * bpf_sk_storage_get * * Get a bpf-local-storage from a *sk*. * * Logically, it could be thought of getting the value from * a *map* with *sk* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this * helper enforces the key must be a full socket and the map must * be a **BPF_MAP_TYPE_SK_STORAGE** also. * * Underneath, the value is stored locally at *sk* instead of * the *map*. The *map* is used as the bpf-local-storage * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf-local-storages residing at *sk*. * * *sk* is a kernel **struct sock** pointer for LSM program. * *sk* is a **struct bpf_sock** pointer for other program types. * * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be * used such that a new bpf-local-storage will be * created if one does not exist. *value* can be used * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf-local-storage. If *value* is * **NULL**, the new bpf-local-storage will be zero initialized. * * Returns * A bpf-local-storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf-local-storage. */ static void *(* const bpf_sk_storage_get)(void *map, void *sk, void *value, __u64 flags) = (void *) 107; /* * bpf_sk_storage_delete * * Delete a bpf-local-storage from a *sk*. * * Returns * 0 on success. * * **-ENOENT** if the bpf-local-storage cannot be found. * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). */ static long (* const bpf_sk_storage_delete)(void *map, void *sk) = (void *) 108; /* * bpf_send_signal * * Send signal *sig* to the process of the current task. * The signal may be delivered to any of this process's threads. * * Returns * 0 on success or successfully queued. * * **-EBUSY** if work queue under nmi is full. * * **-EINVAL** if *sig* is invalid. * * **-EPERM** if no permission to send the *sig*. * * **-EAGAIN** if bpf program can try again. */ static long (* const bpf_send_signal)(__u32 sig) = (void *) 109; /* * bpf_tcp_gen_syncookie * * Try to issue a SYN cookie for the packet with corresponding * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. * * *iph* points to the start of the IPv4 or IPv6 header, while * *iph_len* contains **sizeof**\ (**struct iphdr**) or * **sizeof**\ (**struct ipv6hdr**). * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header with options (at least * **sizeof**\ (**struct tcphdr**)). * * Returns * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, * and the top 16 bits are unused. * * On failure, the returned value is one of the following: * * **-EINVAL** SYN cookie cannot be issued due to error * * **-ENOENT** SYN cookie should not be issued (no SYN flood) * * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 */ static __s64 (* const bpf_tcp_gen_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 110; /* * bpf_skb_output * * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * *ctx* is a pointer to in-kernel struct sk_buff. * * This helper is similar to **bpf_perf_event_output**\ () but * restricted to raw_tracepoint bpf programs. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_skb_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 111; /* * bpf_probe_read_user * * Safely attempt to read *size* bytes from user space address * *unsafe_ptr* and store the data in *dst*. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_probe_read_user)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 112; /* * bpf_probe_read_kernel * * Safely attempt to read *size* bytes from kernel space address * *unsafe_ptr* and store the data in *dst*. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_probe_read_kernel)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 113; /* * bpf_probe_read_user_str * * Copy a NUL terminated string from an unsafe user address * *unsafe_ptr* to *dst*. The *size* should include the * terminating NUL byte. In case the string length is smaller than * *size*, the target is not padded with further NUL bytes. If the * string length is larger than *size*, just *size*-1 bytes are * copied and the last byte is set to NUL. * * On success, returns the number of bytes that were written, * including the terminal NUL. This makes this helper useful in * tracing programs for reading strings, and more importantly to * get its length at runtime. See the following snippet: * * :: * * SEC("kprobe/sys_open") * void bpf_sys_open(struct pt_regs *ctx) * { * char buf[PATHLEN]; // PATHLEN is defined to 256 * int res = bpf_probe_read_user_str(buf, sizeof(buf), * ctx->di); * * // Consume buf, for example push it to * // userspace via bpf_perf_event_output(); we * // can use res (the string length) as event * // size, after checking its boundaries. * } * * In comparison, using **bpf_probe_read_user**\ () helper here * instead to read the string would require to estimate the length * at compile time, and would often result in copying more memory * than necessary. * * Another useful use case is when parsing individual process * arguments or individual environment variables navigating * *current*\ **->mm->arg_start** and *current*\ * **->mm->env_start**: using this helper and the return value, * one can quickly iterate at the right offset of the memory area. * * Returns * On success, the strictly positive length of the output string, * including the trailing NUL character. On error, a negative * value. */ static long (* const bpf_probe_read_user_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 114; /* * bpf_probe_read_kernel_str * * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. * * Returns * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. */ static long (* const bpf_probe_read_kernel_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 115; /* * bpf_tcp_send_ack * * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_tcp_send_ack)(void *tp, __u32 rcv_nxt) = (void *) 116; /* * bpf_send_signal_thread * * Send signal *sig* to the thread corresponding to the current task. * * Returns * 0 on success or successfully queued. * * **-EBUSY** if work queue under nmi is full. * * **-EINVAL** if *sig* is invalid. * * **-EPERM** if no permission to send the *sig*. * * **-EAGAIN** if bpf program can try again. */ static long (* const bpf_send_signal_thread)(__u32 sig) = (void *) 117; /* * bpf_jiffies64 * * Obtain the 64bit jiffies * * Returns * The 64 bit jiffies */ static __u64 (* const bpf_jiffies64)(void) = (void *) 118; /* * bpf_read_branch_records * * For an eBPF program attached to a perf event, retrieve the * branch records (**struct perf_branch_entry**) associated to *ctx* * and store it in the buffer pointed by *buf* up to size * *size* bytes. * * Returns * On success, number of bytes written to *buf*. On error, a * negative value. * * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to * instead return the number of bytes required to store all the * branch entries. If this flag is set, *buf* may be NULL. * * **-EINVAL** if arguments invalid or **size** not a multiple * of **sizeof**\ (**struct perf_branch_entry**\ ). * * **-ENOENT** if architecture does not support branch records. */ static long (* const bpf_read_branch_records)(struct bpf_perf_event_data *ctx, void *buf, __u32 size, __u64 flags) = (void *) 119; /* * bpf_get_ns_current_pid_tgid * * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. * * Returns * 0 on success, or one of the following in case of failure: * * **-EINVAL** if dev and inum supplied don't match dev_t and inode number * with nsfs of current task, or if dev conversion to dev_t lost high bits. * * **-ENOENT** if pidns does not exists for the current task. */ static long (* const bpf_get_ns_current_pid_tgid)(__u64 dev, __u64 ino, struct bpf_pidns_info *nsdata, __u32 size) = (void *) 120; /* * bpf_xdp_output * * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * *ctx* is a pointer to in-kernel struct xdp_buff. * * This helper is similar to **bpf_perf_eventoutput**\ () but * restricted to raw_tracepoint bpf programs. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_xdp_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 121; /* * bpf_get_netns_cookie * * Retrieve the cookie (generated by the kernel) of the network * namespace the input *ctx* is associated with. The network * namespace cookie remains stable for its lifetime and provides * a global identifier that can be assumed unique. If *ctx* is * NULL, then the helper returns the cookie for the initial * network namespace. The cookie itself is very similar to that * of **bpf_get_socket_cookie**\ () helper, but for network * namespaces instead of sockets. * * Returns * A 8-byte long opaque number. */ static __u64 (* const bpf_get_netns_cookie)(void *ctx) = (void *) 122; /* * bpf_get_current_ancestor_cgroup_id * * Return id of cgroup v2 that is ancestor of the cgroup associated * with the current task at the *ancestor_level*. The root cgroup * is at *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with the current task, then return value will be the * same as that of **bpf_get_current_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with the current task. * * The format of returned id and helper limitations are same as in * **bpf_get_current_cgroup_id**\ (). * * Returns * The id is returned or 0 in case the id could not be retrieved. */ static __u64 (* const bpf_get_current_ancestor_cgroup_id)(int ancestor_level) = (void *) 123; /* * bpf_sk_assign * * Helper is overloaded depending on BPF program type. This * description applies to **BPF_PROG_TYPE_SCHED_CLS** and * **BPF_PROG_TYPE_SCHED_ACT** programs. * * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, * will cause *skb* to be delivered to the specified socket. * Subsequent redirection of *skb* via **bpf_redirect**\ (), * **bpf_clone_redirect**\ () or other methods outside of BPF may * interfere with successful delivery to the socket. * * This operation is only valid from TC ingress path. * * The *flags* argument must be zero. * * Returns * 0 on success, or a negative error in case of failure: * * **-EINVAL** if specified *flags* are not supported. * * **-ENOENT** if the socket is unavailable for assignment. * * **-ENETUNREACH** if the socket is unreachable (wrong netns). * * **-EOPNOTSUPP** if the operation is not supported, for example * a call from outside of TC ingress. */ static long (* const bpf_sk_assign)(void *ctx, void *sk, __u64 flags) = (void *) 124; /* * bpf_ktime_get_boot_ns * * Return the time elapsed since system boot, in nanoseconds. * Does include the time the system was suspended. * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) * * Returns * Current *ktime*. */ static __u64 (* const bpf_ktime_get_boot_ns)(void) = (void *) 125; /* * bpf_seq_printf * * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print * out the format string. * The *m* represents the seq_file. The *fmt* and *fmt_size* are for * the format string itself. The *data* and *data_len* are format string * arguments. The *data* are a **u64** array and corresponding format string * values are stored in the array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* array. * The *data_len* is the size of *data* in bytes - must be a multiple of 8. * * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. * Reading kernel memory may fail due to either invalid address or * valid address but requiring a major memory fault. If reading kernel memory * fails, the string for **%s** will be an empty string, and the ip * address for **%p{i,I}{4,6}** will be 0. Not returning error to * bpf program is consistent with what **bpf_trace_printk**\ () does for now. * * Returns * 0 on success, or a negative error in case of failure: * * **-EBUSY** if per-CPU memory copy buffer is busy, can try again * by returning 1 from bpf program. * * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. * * **-E2BIG** if *fmt* contains too many format specifiers. * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. */ static long (* const bpf_seq_printf)(struct seq_file *m, const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 126; /* * bpf_seq_write * * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. * The *m* represents the seq_file. The *data* and *len* represent the * data to write in bytes. * * Returns * 0 on success, or a negative error in case of failure: * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. */ static long (* const bpf_seq_write)(struct seq_file *m, const void *data, __u32 len) = (void *) 127; /* * bpf_sk_cgroup_id * * Return the cgroup v2 id of the socket *sk*. * * *sk* must be a non-**NULL** pointer to a socket, e.g. one * returned from **bpf_sk_lookup_xxx**\ (), * **bpf_sk_fullsock**\ (), etc. The format of returned id is * same as in **bpf_skb_cgroup_id**\ (). * * This helper is available only if the kernel was compiled with * the **CONFIG_SOCK_CGROUP_DATA** configuration option. * * Returns * The id is returned or 0 in case the id could not be retrieved. */ static __u64 (* const bpf_sk_cgroup_id)(void *sk) = (void *) 128; /* * bpf_sk_ancestor_cgroup_id * * Return id of cgroup v2 that is ancestor of cgroup associated * with the *sk* at the *ancestor_level*. The root cgroup is at * *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with *sk*, then return value will be same as that * of **bpf_sk_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with *sk*. * * The format of returned id and helper limitations are same as in * **bpf_sk_cgroup_id**\ (). * * Returns * The id is returned or 0 in case the id could not be retrieved. */ static __u64 (* const bpf_sk_ancestor_cgroup_id)(void *sk, int ancestor_level) = (void *) 129; /* * bpf_ringbuf_output * * Copy *size* bytes from *data* into a ring buffer *ringbuf*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. * If **0** is specified in *flags*, an adaptive notification * of new data availability is sent. * * An adaptive notification is a notification sent whenever the user-space * process has caught up and consumed all available payloads. In case the user-space * process is still processing a previous payload, then no notification is needed * as it will process the newly added payload automatically. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_ringbuf_output)(void *ringbuf, void *data, __u64 size, __u64 flags) = (void *) 130; /* * bpf_ringbuf_reserve * * Reserve *size* bytes of payload in a ring buffer *ringbuf*. * *flags* must be 0. * * Returns * Valid pointer with *size* bytes of memory available; NULL, * otherwise. */ static void *(* const bpf_ringbuf_reserve)(void *ringbuf, __u64 size, __u64 flags) = (void *) 131; /* * bpf_ringbuf_submit * * Submit reserved ring buffer sample, pointed to by *data*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. * If **0** is specified in *flags*, an adaptive notification * of new data availability is sent. * * See 'bpf_ringbuf_output()' for the definition of adaptive notification. * * Returns * Nothing. Always succeeds. */ static void (* const bpf_ringbuf_submit)(void *data, __u64 flags) = (void *) 132; /* * bpf_ringbuf_discard * * Discard reserved ring buffer sample, pointed to by *data*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. * If **0** is specified in *flags*, an adaptive notification * of new data availability is sent. * * See 'bpf_ringbuf_output()' for the definition of adaptive notification. * * Returns * Nothing. Always succeeds. */ static void (* const bpf_ringbuf_discard)(void *data, __u64 flags) = (void *) 133; /* * bpf_ringbuf_query * * Query various characteristics of provided ring buffer. What * exactly is queries is determined by *flags*: * * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. * * **BPF_RB_RING_SIZE**: The size of ring buffer. * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). * * Data returned is just a momentary snapshot of actual values * and could be inaccurate, so this facility should be used to * power heuristics and for reporting, not to make 100% correct * calculation. * * Returns * Requested value, or 0, if *flags* are not recognized. */ static __u64 (* const bpf_ringbuf_query)(void *ringbuf, __u64 flags) = (void *) 134; /* * bpf_csum_level * * Change the skbs checksum level by one layer up or down, or * reset it entirely to none in order to have the stack perform * checksum validation. The level is applicable to the following * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | * through **bpf_skb_adjust_room**\ () helper with passing in * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since * the UDP header is removed. Similarly, an encap of the latter * into the former could be accompanied by a helper call to * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the * skb is still intended to be processed in higher layers of the * stack instead of just egressing at tc. * * There are three supported level settings at this time: * * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs * with CHECKSUM_UNNECESSARY. * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs * with CHECKSUM_UNNECESSARY. * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and * sets CHECKSUM_NONE to force checksum validation by the stack. * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current * skb->csum_level. * * Returns * 0 on success, or a negative error in case of failure. In the * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level * is returned or the error code -EACCES in case the skb is not * subject to CHECKSUM_UNNECESSARY. */ static long (* const bpf_csum_level)(struct __sk_buff *skb, __u64 level) = (void *) 135; /* * bpf_skc_to_tcp6_sock * * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. * * Returns * *sk* if casting is valid, or **NULL** otherwise. */ static struct tcp6_sock *(* const bpf_skc_to_tcp6_sock)(void *sk) = (void *) 136; /* * bpf_skc_to_tcp_sock * * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. * * Returns * *sk* if casting is valid, or **NULL** otherwise. */ static struct tcp_sock *(* const bpf_skc_to_tcp_sock)(void *sk) = (void *) 137; /* * bpf_skc_to_tcp_timewait_sock * * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. * * Returns * *sk* if casting is valid, or **NULL** otherwise. */ static struct tcp_timewait_sock *(* const bpf_skc_to_tcp_timewait_sock)(void *sk) = (void *) 138; /* * bpf_skc_to_tcp_request_sock * * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. * * Returns * *sk* if casting is valid, or **NULL** otherwise. */ static struct tcp_request_sock *(* const bpf_skc_to_tcp_request_sock)(void *sk) = (void *) 139; /* * bpf_skc_to_udp6_sock * * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. * * Returns * *sk* if casting is valid, or **NULL** otherwise. */ static struct udp6_sock *(* const bpf_skc_to_udp6_sock)(void *sk) = (void *) 140; /* * bpf_get_task_stack * * Return a user or a kernel stack in bpf program provided buffer. * Note: the user stack will only be populated if the *task* is * the current task; all other tasks will return -EOPNOTSUPP. * To achieve this, the helper needs *task*, which is a valid * pointer to **struct task_struct**. To store the stacktrace, the * bpf program provides *buf* with a nonnegative *size*. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * The *task* must be the current task. * **BPF_F_USER_BUILD_ID** * Collect buildid+offset instead of ips for user stack, * only valid if **BPF_F_USER_STACK** is also specified. * * **bpf_get_task_stack**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * to sufficient large buffer size. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * * Returns * The non-negative copied *buf* length equal to or less than * *size* on success, or a negative error in case of failure. */ static long (* const bpf_get_task_stack)(struct task_struct *task, void *buf, __u32 size, __u64 flags) = (void *) 141; /* * bpf_load_hdr_opt * * Load header option. Support reading a particular TCP header * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). * * If *flags* is 0, it will search the option from the * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** * has details on what skb_data contains under different * *skops*\ **->op**. * * The first byte of the *searchby_res* specifies the * kind that it wants to search. * * If the searching kind is an experimental kind * (i.e. 253 or 254 according to RFC6994). It also * needs to specify the "magic" which is either * 2 bytes or 4 bytes. It then also needs to * specify the size of the magic by using * the 2nd byte which is "kind-length" of a TCP * header option and the "kind-length" also * includes the first 2 bytes "kind" and "kind-length" * itself as a normal TCP header option also does. * * For example, to search experimental kind 254 with * 2 byte magic 0xeB9F, the searchby_res should be * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. * * To search for the standard window scale option (3), * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. * Note, kind-length must be 0 for regular option. * * Searching for No-Op (0) and End-of-Option-List (1) are * not supported. * * *len* must be at least 2 bytes which is the minimal size * of a header option. * * Supported flags: * * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the * saved_syn packet or the just-received syn packet. * * * Returns * > 0 when found, the header option is copied to *searchby_res*. * The return value is the total length copied. On failure, a * negative error code is returned: * * **-EINVAL** if a parameter is invalid. * * **-ENOMSG** if the option is not found. * * **-ENOENT** if no syn packet is available when * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. * * **-ENOSPC** if there is not enough space. Only *len* number of * bytes are copied. * * **-EFAULT** on failure to parse the header options in the * packet. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. */ static long (* const bpf_load_hdr_opt)(struct bpf_sock_ops *skops, void *searchby_res, __u32 len, __u64 flags) = (void *) 142; /* * bpf_store_hdr_opt * * Store header option. The data will be copied * from buffer *from* with length *len* to the TCP header. * * The buffer *from* should have the whole option that * includes the kind, kind-length, and the actual * option data. The *len* must be at least kind-length * long. The kind-length does not have to be 4 byte * aligned. The kernel will take care of the padding * and setting the 4 bytes aligned value to th->doff. * * This helper will check for duplicated option * by searching the same option in the outgoing skb. * * This helper can only be called during * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. * * * Returns * 0 on success, or negative error in case of failure: * * **-EINVAL** If param is invalid. * * **-ENOSPC** if there is not enough space in the header. * Nothing has been written * * **-EEXIST** if the option already exists. * * **-EFAULT** on failure to parse the existing header options. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. */ static long (* const bpf_store_hdr_opt)(struct bpf_sock_ops *skops, const void *from, __u32 len, __u64 flags) = (void *) 143; /* * bpf_reserve_hdr_opt * * Reserve *len* bytes for the bpf header option. The * space will be used by **bpf_store_hdr_opt**\ () later in * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. * * If **bpf_reserve_hdr_opt**\ () is called multiple times, * the total number of bytes will be reserved. * * This helper can only be called during * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. * * * Returns * 0 on success, or negative error in case of failure: * * **-EINVAL** if a parameter is invalid. * * **-ENOSPC** if there is not enough space in the header. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. */ static long (* const bpf_reserve_hdr_opt)(struct bpf_sock_ops *skops, __u32 len, __u64 flags) = (void *) 144; /* * bpf_inode_storage_get * * Get a bpf_local_storage from an *inode*. * * Logically, it could be thought of as getting the value from * a *map* with *inode* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this * helper enforces the key must be an inode and the map must also * be a **BPF_MAP_TYPE_INODE_STORAGE**. * * Underneath, the value is stored locally at *inode* instead of * the *map*. The *map* is used as the bpf-local-storage * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf_local_storage residing at *inode*. * * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be * used such that a new bpf_local_storage will be * created if one does not exist. *value* can be used * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf_local_storage. If *value* is * **NULL**, the new bpf_local_storage will be zero initialized. * * Returns * A bpf_local_storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf_local_storage. */ static void *(* const bpf_inode_storage_get)(void *map, void *inode, void *value, __u64 flags) = (void *) 145; /* * bpf_inode_storage_delete * * Delete a bpf_local_storage from an *inode*. * * Returns * 0 on success. * * **-ENOENT** if the bpf_local_storage cannot be found. */ static int (* const bpf_inode_storage_delete)(void *map, void *inode) = (void *) 146; /* * bpf_d_path * * Return full path for given **struct path** object, which * needs to be the kernel BTF *path* object. The path is * returned in the provided buffer *buf* of size *sz* and * is zero terminated. * * * Returns * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative * value. */ static long (* const bpf_d_path)(struct path *path, char *buf, __u32 sz) = (void *) 147; /* * bpf_copy_from_user * * Read *size* bytes from user space address *user_ptr* and store * the data in *dst*. This is a wrapper of **copy_from_user**\ (). * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_copy_from_user)(void *dst, __u32 size, const void *user_ptr) = (void *) 148; /* * bpf_snprintf_btf * * Use BTF to store a string representation of *ptr*->ptr in *str*, * using *ptr*->type_id. This value should specify the type * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) * can be used to look up vmlinux BTF type ids. Traversing the * data structure using BTF, the type information and values are * stored in the first *str_size* - 1 bytes of *str*. Safe copy of * the pointer data is carried out to avoid kernel crashes during * operation. Smaller types can use string space on the stack; * larger programs can use map data to store the string * representation. * * The string can be subsequently shared with userspace via * bpf_perf_event_output() or ring buffer interfaces. * bpf_trace_printk() is to be avoided as it places too small * a limit on string size to be useful. * * *flags* is a combination of * * **BTF_F_COMPACT** * no formatting around type information * **BTF_F_NONAME** * no struct/union member names/types * **BTF_F_PTR_RAW** * show raw (unobfuscated) pointer values; * equivalent to printk specifier %px. * **BTF_F_ZERO** * show zero-valued struct/union members; they * are not displayed by default * * * Returns * The number of bytes that were written (or would have been * written if output had to be truncated due to string size), * or a negative error in cases of failure. */ static long (* const bpf_snprintf_btf)(char *str, __u32 str_size, struct btf_ptr *ptr, __u32 btf_ptr_size, __u64 flags) = (void *) 149; /* * bpf_seq_printf_btf * * Use BTF to write to seq_write a string representation of * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). * *flags* are identical to those used for bpf_snprintf_btf. * * Returns * 0 on success or a negative error in case of failure. */ static long (* const bpf_seq_printf_btf)(struct seq_file *m, struct btf_ptr *ptr, __u32 ptr_size, __u64 flags) = (void *) 150; /* * bpf_skb_cgroup_classid * * See **bpf_get_cgroup_classid**\ () for the main description. * This helper differs from **bpf_get_cgroup_classid**\ () in that * the cgroup v1 net_cls class is retrieved only from the *skb*'s * associated socket instead of the current process. * * Returns * The id is returned or 0 in case the id could not be retrieved. */ static __u64 (* const bpf_skb_cgroup_classid)(struct __sk_buff *skb) = (void *) 151; /* * bpf_redirect_neigh * * Redirect the packet to another net device of index *ifindex* * and fill in L2 addresses from neighboring subsystem. This helper * is somewhat similar to **bpf_redirect**\ (), except that it * populates L2 addresses as well, meaning, internally, the helper * relies on the neighbor lookup for the L2 address of the nexthop. * * The helper will perform a FIB lookup based on the skb's * networking header to get the address of the next hop, unless * this is supplied by the caller in the *params* argument. The * *plen* argument indicates the len of *params* and should be set * to 0 if *params* is NULL. * * The *flags* argument is reserved and must be 0. The helper is * currently only supported for tc BPF program types, and enabled * for IPv4 and IPv6 protocols. * * Returns * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. */ static long (* const bpf_redirect_neigh)(__u32 ifindex, struct bpf_redir_neigh *params, int plen, __u64 flags) = (void *) 152; /* * bpf_per_cpu_ptr * * Take a pointer to a percpu ksym, *percpu_ptr*, and return a * pointer to the percpu kernel variable on *cpu*. A ksym is an * extern variable decorated with '__ksym'. For ksym, there is a * global var (either static or global) defined of the same name * in the kernel. The ksym is percpu if the global var is percpu. * The returned pointer points to the global percpu var on *cpu*. * * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the * kernel, except that bpf_per_cpu_ptr() may return NULL. This * happens if *cpu* is larger than nr_cpu_ids. The caller of * bpf_per_cpu_ptr() must check the returned value. * * Returns * A pointer pointing to the kernel percpu variable on *cpu*, or * NULL, if *cpu* is invalid. */ static void *(* const bpf_per_cpu_ptr)(const void *percpu_ptr, __u32 cpu) = (void *) 153; /* * bpf_this_cpu_ptr * * Take a pointer to a percpu ksym, *percpu_ptr*, and return a * pointer to the percpu kernel variable on this cpu. See the * description of 'ksym' in **bpf_per_cpu_ptr**\ (). * * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would * never return NULL. * * Returns * A pointer pointing to the kernel percpu variable on this cpu. */ static void *(* const bpf_this_cpu_ptr)(const void *percpu_ptr) = (void *) 154; /* * bpf_redirect_peer * * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_redirect**\ (), except * that the redirection happens to the *ifindex*' peer device and * the netns switch takes place from ingress to ingress without * going through the CPU's backlog queue. * * The *flags* argument is reserved and must be 0. The helper is * currently only supported for tc BPF program types at the * ingress hook and for veth and netkit target device types. The * peer device must reside in a different network namespace. * * Returns * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. */ static long (* const bpf_redirect_peer)(__u32 ifindex, __u64 flags) = (void *) 155; /* * bpf_task_storage_get * * Get a bpf_local_storage from the *task*. * * Logically, it could be thought of as getting the value from * a *map* with *task* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this * helper enforces the key must be a task_struct and the map must also * be a **BPF_MAP_TYPE_TASK_STORAGE**. * * Underneath, the value is stored locally at *task* instead of * the *map*. The *map* is used as the bpf-local-storage * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf_local_storage residing at *task*. * * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be * used such that a new bpf_local_storage will be * created if one does not exist. *value* can be used * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf_local_storage. If *value* is * **NULL**, the new bpf_local_storage will be zero initialized. * * Returns * A bpf_local_storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf_local_storage. */ static void *(* const bpf_task_storage_get)(void *map, struct task_struct *task, void *value, __u64 flags) = (void *) 156; /* * bpf_task_storage_delete * * Delete a bpf_local_storage from a *task*. * * Returns * 0 on success. * * **-ENOENT** if the bpf_local_storage cannot be found. */ static long (* const bpf_task_storage_delete)(void *map, struct task_struct *task) = (void *) 157; /* * bpf_get_current_task_btf * * Return a BTF pointer to the "current" task. * This pointer can also be used in helpers that accept an * *ARG_PTR_TO_BTF_ID* of type *task_struct*. * * Returns * Pointer to the current task. */ static struct task_struct *(* const bpf_get_current_task_btf)(void) = (void *) 158; /* * bpf_bprm_opts_set * * Set or clear certain options on *bprm*: * * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit * which sets the **AT_SECURE** auxv for glibc. The bit * is cleared if the flag is not specified. * * Returns * **-EINVAL** if invalid *flags* are passed, zero otherwise. */ static long (* const bpf_bprm_opts_set)(struct linux_binprm *bprm, __u64 flags) = (void *) 159; /* * bpf_ktime_get_coarse_ns * * Return a coarse-grained version of the time elapsed since * system boot, in nanoseconds. Does not include time the system * was suspended. * * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**) * * Returns * Current *ktime*. */ static __u64 (* const bpf_ktime_get_coarse_ns)(void) = (void *) 160; /* * bpf_ima_inode_hash * * Returns the stored IMA hash of the *inode* (if it's available). * If the hash is larger than *size*, then only *size* * bytes will be copied to *dst* * * Returns * The **hash_algo** is returned on success, * **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if * invalid arguments are passed. */ static long (* const bpf_ima_inode_hash)(struct inode *inode, void *dst, __u32 size) = (void *) 161; /* * bpf_sock_from_file * * If the given file represents a socket, returns the associated * socket. * * Returns * A pointer to a struct socket on success or NULL if the file is * not a socket. */ static struct socket *(* const bpf_sock_from_file)(struct file *file) = (void *) 162; /* * bpf_check_mtu * * Check packet size against exceeding MTU of net device (based * on *ifindex*). This helper will likely be used in combination * with helpers that adjust/change the packet size. * * The argument *len_diff* can be used for querying with a planned * size change. This allows to check MTU prior to changing packet * ctx. Providing a *len_diff* adjustment that is larger than the * actual packet size (resulting in negative packet size) will in * principle not exceed the MTU, which is why it is not considered * a failure. Other BPF helpers are needed for performing the * planned size change; therefore the responsibility for catching * a negative packet size belongs in those helpers. * * Specifying *ifindex* zero means the MTU check is performed * against the current net device. This is practical if this isn't * used prior to redirect. * * On input *mtu_len* must be a valid pointer, else verifier will * reject BPF program. If the value *mtu_len* is initialized to * zero then the ctx packet size is use. When value *mtu_len* is * provided as input this specify the L3 length that the MTU check * is done against. Remember XDP and TC length operate at L2, but * this value is L3 as this correlate to MTU and IP-header tot_len * values which are L3 (similar behavior as bpf_fib_lookup). * * The Linux kernel route table can configure MTUs on a more * specific per route level, which is not provided by this helper. * For route level MTU checks use the **bpf_fib_lookup**\ () * helper. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** for tc cls_act programs. * * The *flags* argument can be a combination of one or more of the * following values: * * **BPF_MTU_CHK_SEGS** * This flag will only works for *ctx* **struct sk_buff**. * If packet context contains extra packet segment buffers * (often knows as GSO skb), then MTU check is harder to * check at this point, because in transmit path it is * possible for the skb packet to get re-segmented * (depending on net device features). This could still be * a MTU violation, so this flag enables performing MTU * check against segments, with a different violation * return code to tell it apart. Check cannot use len_diff. * * On return *mtu_len* pointer contains the MTU value of the net * device. Remember the net device configured MTU is the L3 size, * which is returned here and XDP and TC length operate at L2. * Helper take this into account for you, but remember when using * MTU value in your BPF-code. * * * Returns * * 0 on success, and populate MTU value in *mtu_len* pointer. * * * < 0 if any input argument is invalid (*mtu_len* not updated) * * MTU violations return positive values, but also populate MTU * value in *mtu_len* pointer, as this can be needed for * implementing PMTU handing: * * * **BPF_MTU_CHK_RET_FRAG_NEEDED** * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** */ static long (* const bpf_check_mtu)(void *ctx, __u32 ifindex, __u32 *mtu_len, __s32 len_diff, __u64 flags) = (void *) 163; /* * bpf_for_each_map_elem * * For each element in **map**, call **callback_fn** function with * **map**, **callback_ctx** and other map-specific parameters. * The **callback_fn** should be a static function and * the **callback_ctx** should be a pointer to the stack. * The **flags** is used to control certain aspects of the helper. * Currently, the **flags** must be 0. * * The following are a list of supported map types and their * respective expected callback signatures: * * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY * * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); * * For per_cpu maps, the map_value is the value on the cpu where the * bpf_prog is running. * * If **callback_fn** return 0, the helper will continue to the next * element. If return value is 1, the helper will skip the rest of * elements and return. Other return values are not used now. * * * Returns * The number of traversed map elements for success, **-EINVAL** for * invalid **flags**. */ static long (* const bpf_for_each_map_elem)(void *map, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 164; /* * bpf_snprintf * * Outputs a string into the **str** buffer of size **str_size** * based on a format string stored in a read-only map pointed by * **fmt**. * * Each format specifier in **fmt** corresponds to one u64 element * in the **data** array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* * array. The *data_len* is the size of *data* in bytes - must be * a multiple of 8. * * Formats **%s** and **%p{i,I}{4,6}** require to read kernel * memory. Reading kernel memory may fail due to either invalid * address or valid address but requiring a major memory fault. If * reading kernel memory fails, the string for **%s** will be an * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. * Not returning error to bpf program is consistent with what * **bpf_trace_printk**\ () does for now. * * * Returns * The strictly positive length of the formatted string, including * the trailing zero character. If the return value is greater than * **str_size**, **str** contains a truncated string, guaranteed to * be zero-terminated except when **str_size** is 0. * * Or **-EBUSY** if the per-CPU memory copy buffer is busy. */ static long (* const bpf_snprintf)(char *str, __u32 str_size, const char *fmt, __u64 *data, __u32 data_len) = (void *) 165; /* * bpf_sys_bpf * * Execute bpf syscall with given arguments. * * Returns * A syscall result. */ static long (* const bpf_sys_bpf)(__u32 cmd, void *attr, __u32 attr_size) = (void *) 166; /* * bpf_btf_find_by_name_kind * * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. * * Returns * Returns btf_id and btf_obj_fd in lower and upper 32 bits. */ static long (* const bpf_btf_find_by_name_kind)(char *name, int name_sz, __u32 kind, int flags) = (void *) 167; /* * bpf_sys_close * * Execute close syscall for given FD. * * Returns * A syscall result. */ static long (* const bpf_sys_close)(__u32 fd) = (void *) 168; /* * bpf_timer_init * * Initialize the timer. * First 4 bits of *flags* specify clockid. * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. * All other bits of *flags* are reserved. * The verifier will reject the program if *timer* is not from * the same *map*. * * Returns * 0 on success. * **-EBUSY** if *timer* is already initialized. * **-EINVAL** if invalid *flags* are passed. * **-EPERM** if *timer* is in a map that doesn't have any user references. * The user space should either hold a file descriptor to a map with timers * or pin such map in bpffs. When map is unpinned or file descriptor is * closed all timers in the map will be cancelled and freed. */ static long (* const bpf_timer_init)(struct bpf_timer *timer, void *map, __u64 flags) = (void *) 169; /* * bpf_timer_set_callback * * Configure the timer to call *callback_fn* static function. * * Returns * 0 on success. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. * **-EPERM** if *timer* is in a map that doesn't have any user references. * The user space should either hold a file descriptor to a map with timers * or pin such map in bpffs. When map is unpinned or file descriptor is * closed all timers in the map will be cancelled and freed. */ static long (* const bpf_timer_set_callback)(struct bpf_timer *timer, void *callback_fn) = (void *) 170; /* * bpf_timer_start * * Set timer expiration N nanoseconds from the current time. The * configured callback will be invoked in soft irq context on some cpu * and will not repeat unless another bpf_timer_start() is made. * In such case the next invocation can migrate to a different cpu. * Since struct bpf_timer is a field inside map element the map * owns the timer. The bpf_timer_set_callback() will increment refcnt * of BPF program to make sure that callback_fn code stays valid. * When user space reference to a map reaches zero all timers * in a map are cancelled and corresponding program's refcnts are * decremented. This is done to make sure that Ctrl-C of a user * process doesn't leave any timers running. If map is pinned in * bpffs the callback_fn can re-arm itself indefinitely. * bpf_map_update/delete_elem() helpers and user space sys_bpf commands * cancel and free the timer in the given map element. * The map can contain timers that invoke callback_fn-s from different * programs. The same callback_fn can serve different timers from * different maps if key/value layout matches across maps. * Every bpf_timer_set_callback() can have different callback_fn. * * *flags* can be one of: * * **BPF_F_TIMER_ABS** * Start the timer in absolute expire value instead of the * default relative one. * **BPF_F_TIMER_CPU_PIN** * Timer will be pinned to the CPU of the caller. * * * Returns * 0 on success. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier * or invalid *flags* are passed. */ static long (* const bpf_timer_start)(struct bpf_timer *timer, __u64 nsecs, __u64 flags) = (void *) 171; /* * bpf_timer_cancel * * Cancel the timer and wait for callback_fn to finish if it was running. * * Returns * 0 if the timer was not active. * 1 if the timer was active. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its * own timer which would have led to a deadlock otherwise. */ static long (* const bpf_timer_cancel)(struct bpf_timer *timer) = (void *) 172; /* * bpf_get_func_ip * * Get address of the traced function (for tracing and kprobe programs). * * When called for kprobe program attached as uprobe it returns * probe address for both entry and return uprobe. * * * Returns * Address of the traced function for kprobe. * 0 for kprobes placed within the function (not at the entry). * Address of the probe for uprobe and return uprobe. */ static __u64 (* const bpf_get_func_ip)(void *ctx) = (void *) 173; /* * bpf_get_attach_cookie * * Get bpf_cookie value provided (optionally) during the program * attachment. It might be different for each individual * attachment, even if BPF program itself is the same. * Expects BPF program context *ctx* as a first argument. * * Supported for the following program types: * - kprobe/uprobe; * - tracepoint; * - perf_event. * * Returns * Value specified by user at BPF link creation/attachment time * or 0, if it was not specified. */ static __u64 (* const bpf_get_attach_cookie)(void *ctx) = (void *) 174; /* * bpf_task_pt_regs * * Get the struct pt_regs associated with **task**. * * Returns * A pointer to struct pt_regs. */ static long (* const bpf_task_pt_regs)(struct task_struct *task) = (void *) 175; /* * bpf_get_branch_snapshot * * Get branch trace from hardware engines like Intel LBR. The * hardware engine is stopped shortly after the helper is * called. Therefore, the user need to filter branch entries * based on the actual use case. To capture branch trace * before the trigger point of the BPF program, the helper * should be called at the beginning of the BPF program. * * The data is stored as struct perf_branch_entry into output * buffer *entries*. *size* is the size of *entries* in bytes. * *flags* is reserved for now and must be zero. * * * Returns * On success, number of bytes written to *buf*. On error, a * negative value. * * **-EINVAL** if *flags* is not zero. * * **-ENOENT** if architecture does not support branch records. */ static long (* const bpf_get_branch_snapshot)(void *entries, __u32 size, __u64 flags) = (void *) 176; /* * bpf_trace_vprintk * * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64 * to format and can handle more format args as a result. * * Arguments are to be used as in **bpf_seq_printf**\ () helper. * * Returns * The number of bytes written to the buffer, or a negative error * in case of failure. */ static long (* const bpf_trace_vprintk)(const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 177; /* * bpf_skc_to_unix_sock * * Dynamically cast a *sk* pointer to a *unix_sock* pointer. * * Returns * *sk* if casting is valid, or **NULL** otherwise. */ static struct unix_sock *(* const bpf_skc_to_unix_sock)(void *sk) = (void *) 178; /* * bpf_kallsyms_lookup_name * * Get the address of a kernel symbol, returned in *res*. *res* is * set to 0 if the symbol is not found. * * Returns * On success, zero. On error, a negative value. * * **-EINVAL** if *flags* is not zero. * * **-EINVAL** if string *name* is not the same size as *name_sz*. * * **-ENOENT** if symbol is not found. * * **-EPERM** if caller does not have permission to obtain kernel address. */ static long (* const bpf_kallsyms_lookup_name)(const char *name, int name_sz, int flags, __u64 *res) = (void *) 179; /* * bpf_find_vma * * Find vma of *task* that contains *addr*, call *callback_fn* * function with *task*, *vma*, and *callback_ctx*. * The *callback_fn* should be a static function and * the *callback_ctx* should be a pointer to the stack. * The *flags* is used to control certain aspects of the helper. * Currently, the *flags* must be 0. * * The expected callback signature is * * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx); * * * Returns * 0 on success. * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. * **-EBUSY** if failed to try lock mmap_lock. * **-EINVAL** for invalid **flags**. */ static long (* const bpf_find_vma)(struct task_struct *task, __u64 addr, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 180; /* * bpf_loop * * For **nr_loops**, call **callback_fn** function * with **callback_ctx** as the context parameter. * The **callback_fn** should be a static function and * the **callback_ctx** should be a pointer to the stack. * The **flags** is used to control certain aspects of the helper. * Currently, the **flags** must be 0. Currently, nr_loops is * limited to 1 << 23 (~8 million) loops. * * long (\*callback_fn)(u64 index, void \*ctx); * * where **index** is the current index in the loop. The index * is zero-indexed. * * If **callback_fn** returns 0, the helper will continue to the next * loop. If return value is 1, the helper will skip the rest of * the loops and return. Other return values are not used now, * and will be rejected by the verifier. * * * Returns * The number of loops performed, **-EINVAL** for invalid **flags**, * **-E2BIG** if **nr_loops** exceeds the maximum number of loops. */ static long (* const bpf_loop)(__u32 nr_loops, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 181; /* * bpf_strncmp * * Do strncmp() between **s1** and **s2**. **s1** doesn't need * to be null-terminated and **s1_sz** is the maximum storage * size of **s1**. **s2** must be a read-only string. * * Returns * An integer less than, equal to, or greater than zero * if the first **s1_sz** bytes of **s1** is found to be * less than, to match, or be greater than **s2**. */ static long (* const bpf_strncmp)(const char *s1, __u32 s1_sz, const char *s2) = (void *) 182; /* * bpf_get_func_arg * * Get **n**-th argument register (zero based) of the traced function (for tracing programs) * returned in **value**. * * * Returns * 0 on success. * **-EINVAL** if n >= argument register count of traced function. */ static long (* const bpf_get_func_arg)(void *ctx, __u32 n, __u64 *value) = (void *) 183; /* * bpf_get_func_ret * * Get return value of the traced function (for tracing programs) * in **value**. * * * Returns * 0 on success. * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN. */ static long (* const bpf_get_func_ret)(void *ctx, __u64 *value) = (void *) 184; /* * bpf_get_func_arg_cnt * * Get number of registers of the traced function (for tracing programs) where * function arguments are stored in these registers. * * * Returns * The number of argument registers of the traced function. */ static long (* const bpf_get_func_arg_cnt)(void *ctx) = (void *) 185; /* * bpf_get_retval * * Get the BPF program's return value that will be returned to the upper layers. * * This helper is currently supported by cgroup programs and only by the hooks * where BPF program's return value is returned to the userspace via errno. * * Returns * The BPF program's return value. */ static int (* const bpf_get_retval)(void) = (void *) 186; /* * bpf_set_retval * * Set the BPF program's return value that will be returned to the upper layers. * * This helper is currently supported by cgroup programs and only by the hooks * where BPF program's return value is returned to the userspace via errno. * * Note that there is the following corner case where the program exports an error * via bpf_set_retval but signals success via 'return 1': * * bpf_set_retval(-EPERM); * return 1; * * In this case, the BPF program's return value will use helper's -EPERM. This * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case. * * * Returns * 0 on success, or a negative error in case of failure. */ static int (* const bpf_set_retval)(int retval) = (void *) 187; /* * bpf_xdp_get_buff_len * * Get the total size of a given xdp buff (linear and paged area) * * Returns * The total size of a given xdp buffer. */ static __u64 (* const bpf_xdp_get_buff_len)(struct xdp_md *xdp_md) = (void *) 188; /* * bpf_xdp_load_bytes * * This helper is provided as an easy way to load data from a * xdp buffer. It can be used to load *len* bytes from *offset* from * the frame associated to *xdp_md*, into the buffer pointed by * *buf*. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_xdp_load_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 189; /* * bpf_xdp_store_bytes * * Store *len* bytes from buffer *buf* into the frame * associated to *xdp_md*, at *offset*. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_xdp_store_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 190; /* * bpf_copy_from_user_task * * Read *size* bytes from user space address *user_ptr* in *tsk*'s * address space, and stores the data in *dst*. *flags* is not * used yet and is provided for future extensibility. This helper * can only be used by sleepable programs. * * Returns * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. */ static long (* const bpf_copy_from_user_task)(void *dst, __u32 size, const void *user_ptr, struct task_struct *tsk, __u64 flags) = (void *) 191; /* * bpf_skb_set_tstamp * * Change the __sk_buff->tstamp_type to *tstamp_type* * and set *tstamp* to the __sk_buff->tstamp together. * * If there is no need to change the __sk_buff->tstamp_type, * the tstamp value can be directly written to __sk_buff->tstamp * instead. * * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that * will be kept during bpf_redirect_*(). A non zero * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO * *tstamp_type*. * * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used * with a zero *tstamp*. * * Only IPv4 and IPv6 skb->protocol are supported. * * This function is most useful when it needs to set a * mono delivery time to __sk_buff->tstamp and then * bpf_redirect_*() to the egress of an iface. For example, * changing the (rcv) timestamp in __sk_buff->tstamp at * ingress to a mono delivery time and then bpf_redirect_*() * to sch_fq@phy-dev. * * Returns * 0 on success. * **-EINVAL** for invalid input * **-EOPNOTSUPP** for unsupported protocol */ static long (* const bpf_skb_set_tstamp)(struct __sk_buff *skb, __u64 tstamp, __u32 tstamp_type) = (void *) 192; /* * bpf_ima_file_hash * * Returns a calculated IMA hash of the *file*. * If the hash is larger than *size*, then only *size* * bytes will be copied to *dst* * * Returns * The **hash_algo** is returned on success, * **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if * invalid arguments are passed. */ static long (* const bpf_ima_file_hash)(struct file *file, void *dst, __u32 size) = (void *) 193; /* * bpf_kptr_xchg * * Exchange kptr at pointer *dst* with *ptr*, and return the old value. * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise * it must be a referenced pointer which will be released when this helper * is called. * * Returns * The old value of kptr (which can be NULL). The returned pointer * if not NULL, is a reference which must be released using its * corresponding release function, or moved into a BPF map before * program exit. */ static void *(* const bpf_kptr_xchg)(void *dst, void *ptr) = (void *) 194; /* * bpf_map_lookup_percpu_elem * * Perform a lookup in *percpu map* for an entry associated to * *key* on *cpu*. * * Returns * Map value associated to *key* on *cpu*, or **NULL** if no entry * was found or *cpu* is invalid. */ static void *(* const bpf_map_lookup_percpu_elem)(void *map, const void *key, __u32 cpu) = (void *) 195; /* * bpf_skc_to_mptcp_sock * * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer. * * Returns * *sk* if casting is valid, or **NULL** otherwise. */ static struct mptcp_sock *(* const bpf_skc_to_mptcp_sock)(void *sk) = (void *) 196; /* * bpf_dynptr_from_mem * * Get a dynptr to local memory *data*. * * *data* must be a ptr to a map value. * The maximum *size* supported is DYNPTR_MAX_SIZE. * *flags* is currently unused. * * Returns * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE, * -EINVAL if flags is not 0. */ static long (* const bpf_dynptr_from_mem)(void *data, __u32 size, __u64 flags, struct bpf_dynptr *ptr) = (void *) 197; /* * bpf_ringbuf_reserve_dynptr * * Reserve *size* bytes of payload in a ring buffer *ringbuf* * through the dynptr interface. *flags* must be 0. * * Please note that a corresponding bpf_ringbuf_submit_dynptr or * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the * reservation fails. This is enforced by the verifier. * * Returns * 0 on success, or a negative error in case of failure. */ static long (* const bpf_ringbuf_reserve_dynptr)(void *ringbuf, __u32 size, __u64 flags, struct bpf_dynptr *ptr) = (void *) 198; /* * bpf_ringbuf_submit_dynptr * * Submit reserved ring buffer sample, pointed to by *data*, * through the dynptr interface. This is a no-op if the dynptr is * invalid/null. * * For more information on *flags*, please see * 'bpf_ringbuf_submit'. * * Returns * Nothing. Always succeeds. */ static void (* const bpf_ringbuf_submit_dynptr)(struct bpf_dynptr *ptr, __u64 flags) = (void *) 199; /* * bpf_ringbuf_discard_dynptr * * Discard reserved ring buffer sample through the dynptr * interface. This is a no-op if the dynptr is invalid/null. * * For more information on *flags*, please see * 'bpf_ringbuf_discard'. * * Returns * Nothing. Always succeeds. */ static void (* const bpf_ringbuf_discard_dynptr)(struct bpf_dynptr *ptr, __u64 flags) = (void *) 200; /* * bpf_dynptr_read * * Read *len* bytes from *src* into *dst*, starting from *offset* * into *src*. * *flags* is currently unused. * * Returns * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if * *flags* is not 0. */ static long (* const bpf_dynptr_read)(void *dst, __u32 len, const struct bpf_dynptr *src, __u32 offset, __u64 flags) = (void *) 201; /* * bpf_dynptr_write * * Write *len* bytes from *src* into *dst*, starting from *offset* * into *dst*. * * *flags* must be 0 except for skb-type dynptrs. * * For skb-type dynptrs: * * All data slices of the dynptr are automatically * invalidated after **bpf_dynptr_write**\ (). This is * because writing may pull the skb and change the * underlying packet buffer. * * * For *flags*, please see the flags accepted by * **bpf_skb_store_bytes**\ (). * * Returns * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs, * other errors correspond to errors returned by **bpf_skb_store_bytes**\ (). */ static long (* const bpf_dynptr_write)(const struct bpf_dynptr *dst, __u32 offset, void *src, __u32 len, __u64 flags) = (void *) 202; /* * bpf_dynptr_data * * Get a pointer to the underlying dynptr data. * * *len* must be a statically known value. The returned data slice * is invalidated whenever the dynptr is invalidated. * * skb and xdp type dynptrs may not use bpf_dynptr_data. They should * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr. * * Returns * Pointer to the underlying dynptr data, NULL if the dynptr is * read-only, if the dynptr is invalid, or if the offset and length * is out of bounds. */ static void *(* const bpf_dynptr_data)(const struct bpf_dynptr *ptr, __u32 offset, __u32 len) = (void *) 203; /* * bpf_tcp_raw_gen_syncookie_ipv4 * * Try to issue a SYN cookie for the packet with corresponding * IPv4/TCP headers, *iph* and *th*, without depending on a * listening socket. * * *iph* points to the IPv4 header. * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header (at least * **sizeof**\ (**struct tcphdr**)). * * Returns * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, * and the top 16 bits are unused. * * On failure, the returned value is one of the following: * * **-EINVAL** if *th_len* is invalid. */ static __s64 (* const bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *iph, struct tcphdr *th, __u32 th_len) = (void *) 204; /* * bpf_tcp_raw_gen_syncookie_ipv6 * * Try to issue a SYN cookie for the packet with corresponding * IPv6/TCP headers, *iph* and *th*, without depending on a * listening socket. * * *iph* points to the IPv6 header. * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header (at least * **sizeof**\ (**struct tcphdr**)). * * Returns * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, * and the top 16 bits are unused. * * On failure, the returned value is one of the following: * * **-EINVAL** if *th_len* is invalid. * * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. */ static __s64 (* const bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th, __u32 th_len) = (void *) 205; /* * bpf_tcp_raw_check_syncookie_ipv4 * * Check whether *iph* and *th* contain a valid SYN cookie ACK * without depending on a listening socket. * * *iph* points to the IPv4 header. * * *th* points to the TCP header. * * Returns * 0 if *iph* and *th* are a valid SYN cookie ACK. * * On failure, the returned value is one of the following: * * **-EACCES** if the SYN cookie is not valid. */ static long (* const bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *iph, struct tcphdr *th) = (void *) 206; /* * bpf_tcp_raw_check_syncookie_ipv6 * * Check whether *iph* and *th* contain a valid SYN cookie ACK * without depending on a listening socket. * * *iph* points to the IPv6 header. * * *th* points to the TCP header. * * Returns * 0 if *iph* and *th* are a valid SYN cookie ACK. * * On failure, the returned value is one of the following: * * **-EACCES** if the SYN cookie is not valid. * * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. */ static long (* const bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th) = (void *) 207; /* * bpf_ktime_get_tai_ns * * A nonsettable system-wide clock derived from wall-clock time but * ignoring leap seconds. This clock does not experience * discontinuities and backwards jumps caused by NTP inserting leap * seconds as CLOCK_REALTIME does. * * See: **clock_gettime**\ (**CLOCK_TAI**) * * Returns * Current *ktime*. */ static __u64 (* const bpf_ktime_get_tai_ns)(void) = (void *) 208; /* * bpf_user_ringbuf_drain * * Drain samples from the specified user ring buffer, and invoke * the provided callback for each such sample: * * long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx); * * If **callback_fn** returns 0, the helper will continue to try * and drain the next sample, up to a maximum of * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1, * the helper will skip the rest of the samples and return. Other * return values are not used now, and will be rejected by the * verifier. * * Returns * The number of drained samples if no error was encountered while * draining samples, or 0 if no samples were present in the ring * buffer. If a user-space producer was epoll-waiting on this map, * and at least one sample was drained, they will receive an event * notification notifying them of available space in the ring * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this * function, no wakeup notification will be sent. If the * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will * be sent even if no sample was drained. * * On failure, the returned value is one of the following: * * **-EBUSY** if the ring buffer is contended, and another calling * context was concurrently draining the ring buffer. * * **-EINVAL** if user-space is not properly tracking the ring * buffer due to the producer position not being aligned to 8 * bytes, a sample not being aligned to 8 bytes, or the producer * position not matching the advertised length of a sample. * * **-E2BIG** if user-space has tried to publish a sample which is * larger than the size of the ring buffer, or which cannot fit * within a struct bpf_dynptr. */ static long (* const bpf_user_ringbuf_drain)(void *map, void *callback_fn, void *ctx, __u64 flags) = (void *) 209; /* * bpf_cgrp_storage_get * * Get a bpf_local_storage from the *cgroup*. * * Logically, it could be thought of as getting the value from * a *map* with *cgroup* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this * helper enforces the key must be a cgroup struct and the map must also * be a **BPF_MAP_TYPE_CGRP_STORAGE**. * * In reality, the local-storage value is embedded directly inside of the * *cgroup* object itself, rather than being located in the * **BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is * queried for some *map* on a *cgroup* object, the kernel will perform an * O(n) iteration over all of the live local-storage values for that * *cgroup* object until the local-storage value for the *map* is found. * * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be * used such that a new bpf_local_storage will be * created if one does not exist. *value* can be used * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf_local_storage. If *value* is * **NULL**, the new bpf_local_storage will be zero initialized. * * Returns * A bpf_local_storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf_local_storage. */ static void *(* const bpf_cgrp_storage_get)(void *map, struct cgroup *cgroup, void *value, __u64 flags) = (void *) 210; /* * bpf_cgrp_storage_delete * * Delete a bpf_local_storage from a *cgroup*. * * Returns * 0 on success. * * **-ENOENT** if the bpf_local_storage cannot be found. */ static long (* const bpf_cgrp_storage_delete)(void *map, struct cgroup *cgroup) = (void *) 211; xdp-tools-1.5.4/lib/libbpf/src/btf.c0000644000175100001660000045002014706536574016551 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "btf.h" #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" #include "hashmap.h" #include "strset.h" #define BTF_MAX_NR_TYPES 0x7fffffffU #define BTF_MAX_STR_OFFSET 0x7fffffffU static struct btf_type btf_void; struct btf { /* raw BTF data in native endianness */ void *raw_data; /* raw BTF data in non-native endianness */ void *raw_data_swapped; __u32 raw_size; /* whether target endianness differs from the native one */ bool swapped_endian; /* * When BTF is loaded from an ELF or raw memory it is stored * in a contiguous memory block. The hdr, type_data, and, strs_data * point inside that memory region to their respective parts of BTF * representation: * * +--------------------------------+ * | Header | Types | Strings | * +--------------------------------+ * ^ ^ ^ * | | | * hdr | | * types_data-+ | * strs_data------------+ * * If BTF data is later modified, e.g., due to types added or * removed, BTF deduplication performed, etc, this contiguous * representation is broken up into three independently allocated * memory regions to be able to modify them independently. * raw_data is nulled out at that point, but can be later allocated * and cached again if user calls btf__raw_data(), at which point * raw_data will contain a contiguous copy of header, types, and * strings: * * +----------+ +---------+ +-----------+ * | Header | | Types | | Strings | * +----------+ +---------+ +-----------+ * ^ ^ ^ * | | | * hdr | | * types_data----+ | * strset__data(strs_set)-----+ * * +----------+---------+-----------+ * | Header | Types | Strings | * raw_data----->+----------+---------+-----------+ */ struct btf_header *hdr; void *types_data; size_t types_data_cap; /* used size stored in hdr->type_len */ /* type ID to `struct btf_type *` lookup index * type_offs[0] corresponds to the first non-VOID type: * - for base BTF it's type [1]; * - for split BTF it's the first non-base BTF type. */ __u32 *type_offs; size_t type_offs_cap; /* number of types in this BTF instance: * - doesn't include special [0] void type; * - for split BTF counts number of types added on top of base BTF. */ __u32 nr_types; /* if not NULL, points to the base BTF on top of which the current * split BTF is based */ struct btf *base_btf; /* BTF type ID of the first type in this BTF instance: * - for base BTF it's equal to 1; * - for split BTF it's equal to biggest type ID of base BTF plus 1. */ int start_id; /* logical string offset of this BTF instance: * - for base BTF it's equal to 0; * - for split BTF it's equal to total size of base BTF's string section size. */ int start_str_off; /* only one of strs_data or strs_set can be non-NULL, depending on * whether BTF is in a modifiable state (strs_set is used) or not * (strs_data points inside raw_data) */ void *strs_data; /* a set of unique strings */ struct strset *strs_set; /* whether strings are already deduplicated */ bool strs_deduped; /* whether base_btf should be freed in btf_free for this instance */ bool owns_base; /* BTF object FD, if loaded into kernel */ int fd; /* Pointer size (in bytes) for a target architecture of this BTF */ int ptr_sz; }; static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; } /* Ensure given dynamically allocated memory region pointed to by *data* with * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements * are already used. At most *max_cnt* elements can be ever allocated. * If necessary, memory is reallocated and all existing data is copied over, * new pointer to the memory region is stored at *data, new memory region * capacity (in number of elements) is stored in *cap. * On success, memory pointer to the beginning of unused memory is returned. * On error, NULL is returned. */ void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t cur_cnt, size_t max_cnt, size_t add_cnt) { size_t new_cnt; void *new_data; if (cur_cnt + add_cnt <= *cap_cnt) return *data + cur_cnt * elem_sz; /* requested more than the set limit */ if (cur_cnt + add_cnt > max_cnt) return NULL; new_cnt = *cap_cnt; new_cnt += new_cnt / 4; /* expand by 25% */ if (new_cnt < 16) /* but at least 16 elements */ new_cnt = 16; if (new_cnt > max_cnt) /* but not exceeding a set limit */ new_cnt = max_cnt; if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */ new_cnt = cur_cnt + add_cnt; new_data = libbpf_reallocarray(*data, new_cnt, elem_sz); if (!new_data) return NULL; /* zero out newly allocated portion of memory */ memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz); *data = new_data; *cap_cnt = new_cnt; return new_data + cur_cnt * elem_sz; } /* Ensure given dynamically allocated memory region has enough allocated space * to accommodate *need_cnt* elements of size *elem_sz* bytes each */ int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt) { void *p; if (need_cnt <= *cap_cnt) return 0; p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt); if (!p) return -ENOMEM; return 0; } static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt) { return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32), btf->nr_types, BTF_MAX_NR_TYPES, add_cnt); } static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off) { __u32 *p; p = btf_add_type_offs_mem(btf, 1); if (!p) return -ENOMEM; *p = type_off; return 0; } static void btf_bswap_hdr(struct btf_header *h) { h->magic = bswap_16(h->magic); h->hdr_len = bswap_32(h->hdr_len); h->type_off = bswap_32(h->type_off); h->type_len = bswap_32(h->type_len); h->str_off = bswap_32(h->str_off); h->str_len = bswap_32(h->str_len); } static int btf_parse_hdr(struct btf *btf) { struct btf_header *hdr = btf->hdr; __u32 meta_left; if (btf->raw_size < sizeof(struct btf_header)) { pr_debug("BTF header not found\n"); return -EINVAL; } if (hdr->magic == bswap_16(BTF_MAGIC)) { btf->swapped_endian = true; if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) { pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n", bswap_32(hdr->hdr_len)); return -ENOTSUP; } btf_bswap_hdr(hdr); } else if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF magic: %x\n", hdr->magic); return -EINVAL; } if (btf->raw_size < hdr->hdr_len) { pr_debug("BTF header len %u larger than data size %u\n", hdr->hdr_len, btf->raw_size); return -EINVAL; } meta_left = btf->raw_size - hdr->hdr_len; if (meta_left < (long long)hdr->str_off + hdr->str_len) { pr_debug("Invalid BTF total size: %u\n", btf->raw_size); return -EINVAL; } if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) { pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n", hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len); return -EINVAL; } if (hdr->type_off % 4) { pr_debug("BTF type section is not aligned to 4 bytes\n"); return -EINVAL; } return 0; } static int btf_parse_str_sec(struct btf *btf) { const struct btf_header *hdr = btf->hdr; const char *start = btf->strs_data; const char *end = start + btf->hdr->str_len; if (btf->base_btf && hdr->str_len == 0) return 0; if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) { pr_debug("Invalid BTF string section\n"); return -EINVAL; } if (!btf->base_btf && start[0]) { pr_debug("Invalid BTF string section\n"); return -EINVAL; } return 0; } static int btf_type_size(const struct btf_type *t) { const int base_size = sizeof(struct btf_type); __u16 vlen = btf_vlen(t); switch (btf_kind(t)) { case BTF_KIND_FWD: case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_FLOAT: case BTF_KIND_TYPE_TAG: return base_size; case BTF_KIND_INT: return base_size + sizeof(__u32); case BTF_KIND_ENUM: return base_size + vlen * sizeof(struct btf_enum); case BTF_KIND_ENUM64: return base_size + vlen * sizeof(struct btf_enum64); case BTF_KIND_ARRAY: return base_size + sizeof(struct btf_array); case BTF_KIND_STRUCT: case BTF_KIND_UNION: return base_size + vlen * sizeof(struct btf_member); case BTF_KIND_FUNC_PROTO: return base_size + vlen * sizeof(struct btf_param); case BTF_KIND_VAR: return base_size + sizeof(struct btf_var); case BTF_KIND_DATASEC: return base_size + vlen * sizeof(struct btf_var_secinfo); case BTF_KIND_DECL_TAG: return base_size + sizeof(struct btf_decl_tag); default: pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); return -EINVAL; } } static void btf_bswap_type_base(struct btf_type *t) { t->name_off = bswap_32(t->name_off); t->info = bswap_32(t->info); t->type = bswap_32(t->type); } static int btf_bswap_type_rest(struct btf_type *t) { struct btf_var_secinfo *v; struct btf_enum64 *e64; struct btf_member *m; struct btf_array *a; struct btf_param *p; struct btf_enum *e; __u16 vlen = btf_vlen(t); int i; switch (btf_kind(t)) { case BTF_KIND_FWD: case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_FLOAT: case BTF_KIND_TYPE_TAG: return 0; case BTF_KIND_INT: *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1)); return 0; case BTF_KIND_ENUM: for (i = 0, e = btf_enum(t); i < vlen; i++, e++) { e->name_off = bswap_32(e->name_off); e->val = bswap_32(e->val); } return 0; case BTF_KIND_ENUM64: for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) { e64->name_off = bswap_32(e64->name_off); e64->val_lo32 = bswap_32(e64->val_lo32); e64->val_hi32 = bswap_32(e64->val_hi32); } return 0; case BTF_KIND_ARRAY: a = btf_array(t); a->type = bswap_32(a->type); a->index_type = bswap_32(a->index_type); a->nelems = bswap_32(a->nelems); return 0; case BTF_KIND_STRUCT: case BTF_KIND_UNION: for (i = 0, m = btf_members(t); i < vlen; i++, m++) { m->name_off = bswap_32(m->name_off); m->type = bswap_32(m->type); m->offset = bswap_32(m->offset); } return 0; case BTF_KIND_FUNC_PROTO: for (i = 0, p = btf_params(t); i < vlen; i++, p++) { p->name_off = bswap_32(p->name_off); p->type = bswap_32(p->type); } return 0; case BTF_KIND_VAR: btf_var(t)->linkage = bswap_32(btf_var(t)->linkage); return 0; case BTF_KIND_DATASEC: for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) { v->type = bswap_32(v->type); v->offset = bswap_32(v->offset); v->size = bswap_32(v->size); } return 0; case BTF_KIND_DECL_TAG: btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx); return 0; default: pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); return -EINVAL; } } static int btf_parse_type_sec(struct btf *btf) { struct btf_header *hdr = btf->hdr; void *next_type = btf->types_data; void *end_type = next_type + hdr->type_len; int err, type_size; while (next_type + sizeof(struct btf_type) <= end_type) { if (btf->swapped_endian) btf_bswap_type_base(next_type); type_size = btf_type_size(next_type); if (type_size < 0) return type_size; if (next_type + type_size > end_type) { pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types); return -EINVAL; } if (btf->swapped_endian && btf_bswap_type_rest(next_type)) return -EINVAL; err = btf_add_type_idx_entry(btf, next_type - btf->types_data); if (err) return err; next_type += type_size; btf->nr_types++; } if (next_type != end_type) { pr_warn("BTF types data is malformed\n"); return -EINVAL; } return 0; } static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id) { const char *s; s = btf__str_by_offset(btf, str_off); if (!s) { pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off); return -EINVAL; } return 0; } static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id) { const struct btf_type *t; t = btf__type_by_id(btf, id); if (!t) { pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id); return -EINVAL; } return 0; } static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id) { __u32 kind = btf_kind(t); int err, i, n; err = btf_validate_str(btf, t->name_off, "type name", id); if (err) return err; switch (kind) { case BTF_KIND_UNKN: case BTF_KIND_INT: case BTF_KIND_FWD: case BTF_KIND_FLOAT: break; case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: case BTF_KIND_TYPE_TAG: err = btf_validate_id(btf, t->type, id); if (err) return err; break; case BTF_KIND_ARRAY: { const struct btf_array *a = btf_array(t); err = btf_validate_id(btf, a->type, id); err = err ?: btf_validate_id(btf, a->index_type, id); if (err) return err; break; } case BTF_KIND_STRUCT: case BTF_KIND_UNION: { const struct btf_member *m = btf_members(t); n = btf_vlen(t); for (i = 0; i < n; i++, m++) { err = btf_validate_str(btf, m->name_off, "field name", id); err = err ?: btf_validate_id(btf, m->type, id); if (err) return err; } break; } case BTF_KIND_ENUM: { const struct btf_enum *m = btf_enum(t); n = btf_vlen(t); for (i = 0; i < n; i++, m++) { err = btf_validate_str(btf, m->name_off, "enum name", id); if (err) return err; } break; } case BTF_KIND_ENUM64: { const struct btf_enum64 *m = btf_enum64(t); n = btf_vlen(t); for (i = 0; i < n; i++, m++) { err = btf_validate_str(btf, m->name_off, "enum name", id); if (err) return err; } break; } case BTF_KIND_FUNC: { const struct btf_type *ft; err = btf_validate_id(btf, t->type, id); if (err) return err; ft = btf__type_by_id(btf, t->type); if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) { pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type); return -EINVAL; } break; } case BTF_KIND_FUNC_PROTO: { const struct btf_param *m = btf_params(t); n = btf_vlen(t); for (i = 0; i < n; i++, m++) { err = btf_validate_str(btf, m->name_off, "param name", id); err = err ?: btf_validate_id(btf, m->type, id); if (err) return err; } break; } case BTF_KIND_DATASEC: { const struct btf_var_secinfo *m = btf_var_secinfos(t); n = btf_vlen(t); for (i = 0; i < n; i++, m++) { err = btf_validate_id(btf, m->type, id); if (err) return err; } break; } default: pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind); return -EINVAL; } return 0; } /* Validate basic sanity of BTF. It's intentionally less thorough than * kernel's validation and validates only properties of BTF that libbpf relies * on to be correct (e.g., valid type IDs, valid string offsets, etc) */ static int btf_sanity_check(const struct btf *btf) { const struct btf_type *t; __u32 i, n = btf__type_cnt(btf); int err; for (i = btf->start_id; i < n; i++) { t = btf_type_by_id(btf, i); err = btf_validate_type(btf, t, i); if (err) return err; } return 0; } __u32 btf__type_cnt(const struct btf *btf) { return btf->start_id + btf->nr_types; } const struct btf *btf__base_btf(const struct btf *btf) { return btf->base_btf; } /* internal helper returning non-const pointer to a type */ struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id) { if (type_id == 0) return &btf_void; if (type_id < btf->start_id) return btf_type_by_id(btf->base_btf, type_id); return btf->types_data + btf->type_offs[type_id - btf->start_id]; } const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) { if (type_id >= btf->start_id + btf->nr_types) return errno = EINVAL, NULL; return btf_type_by_id((struct btf *)btf, type_id); } static int determine_ptr_size(const struct btf *btf) { static const char * const long_aliases[] = { "long", "long int", "int long", "unsigned long", "long unsigned", "unsigned long int", "unsigned int long", "long unsigned int", "long int unsigned", "int unsigned long", "int long unsigned", }; const struct btf_type *t; const char *name; int i, j, n; if (btf->base_btf && btf->base_btf->ptr_sz > 0) return btf->base_btf->ptr_sz; n = btf__type_cnt(btf); for (i = 1; i < n; i++) { t = btf__type_by_id(btf, i); if (!btf_is_int(t)) continue; if (t->size != 4 && t->size != 8) continue; name = btf__name_by_offset(btf, t->name_off); if (!name) continue; for (j = 0; j < ARRAY_SIZE(long_aliases); j++) { if (strcmp(name, long_aliases[j]) == 0) return t->size; } } return -1; } static size_t btf_ptr_sz(const struct btf *btf) { if (!btf->ptr_sz) ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz; } /* Return pointer size this BTF instance assumes. The size is heuristically * determined by looking for 'long' or 'unsigned long' integer type and * recording its size in bytes. If BTF type information doesn't have any such * type, this function returns 0. In the latter case, native architecture's * pointer size is assumed, so will be either 4 or 8, depending on * architecture that libbpf was compiled for. It's possible to override * guessed value by using btf__set_pointer_size() API. */ size_t btf__pointer_size(const struct btf *btf) { if (!btf->ptr_sz) ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); if (btf->ptr_sz < 0) /* not enough BTF type info to guess */ return 0; return btf->ptr_sz; } /* Override or set pointer size in bytes. Only values of 4 and 8 are * supported. */ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) { if (ptr_sz != 4 && ptr_sz != 8) return libbpf_err(-EINVAL); btf->ptr_sz = ptr_sz; return 0; } static bool is_host_big_endian(void) { #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return false; #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return true; #else # error "Unrecognized __BYTE_ORDER__" #endif } enum btf_endianness btf__endianness(const struct btf *btf) { if (is_host_big_endian()) return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; else return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; } int btf__set_endianness(struct btf *btf, enum btf_endianness endian) { if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) return libbpf_err(-EINVAL); btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); if (!btf->swapped_endian) { free(btf->raw_data_swapped); btf->raw_data_swapped = NULL; } return 0; } static bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void || btf_is_fwd(t); } static bool btf_type_is_void_or_null(const struct btf_type *t) { return !t || btf_type_is_void(t); } #define MAX_RESOLVE_DEPTH 32 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) { const struct btf_array *array; const struct btf_type *t; __u32 nelems = 1; __s64 size = -1; int i; t = btf__type_by_id(btf, type_id); for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { switch (btf_kind(t)) { case BTF_KIND_INT: case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_DATASEC: case BTF_KIND_FLOAT: size = t->size; goto done; case BTF_KIND_PTR: size = btf_ptr_sz(btf); goto done; case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: case BTF_KIND_TYPE_TAG: type_id = t->type; break; case BTF_KIND_ARRAY: array = btf_array(t); if (nelems && array->nelems > UINT32_MAX / nelems) return libbpf_err(-E2BIG); nelems *= array->nelems; type_id = array->type; break; default: return libbpf_err(-EINVAL); } t = btf__type_by_id(btf, type_id); } done: if (size < 0) return libbpf_err(-EINVAL); if (nelems && size > UINT32_MAX / nelems) return libbpf_err(-E2BIG); return nelems * size; } int btf__align_of(const struct btf *btf, __u32 id) { const struct btf_type *t = btf__type_by_id(btf, id); __u16 kind = btf_kind(t); switch (kind) { case BTF_KIND_INT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_FLOAT: return min(btf_ptr_sz(btf), (size_t)t->size); case BTF_KIND_PTR: return btf_ptr_sz(btf); case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_TYPE_TAG: return btf__align_of(btf, t->type); case BTF_KIND_ARRAY: return btf__align_of(btf, btf_array(t)->type); case BTF_KIND_STRUCT: case BTF_KIND_UNION: { const struct btf_member *m = btf_members(t); __u16 vlen = btf_vlen(t); int i, max_align = 1, align; for (i = 0; i < vlen; i++, m++) { align = btf__align_of(btf, m->type); if (align <= 0) return libbpf_err(align); max_align = max(max_align, align); /* if field offset isn't aligned according to field * type's alignment, then struct must be packed */ if (btf_member_bitfield_size(t, i) == 0 && (m->offset % (8 * align)) != 0) return 1; } /* if struct/union size isn't a multiple of its alignment, * then struct must be packed */ if ((t->size % max_align) != 0) return 1; return max_align; } default: pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t)); return errno = EINVAL, 0; } } int btf__resolve_type(const struct btf *btf, __u32 type_id) { const struct btf_type *t; int depth = 0; t = btf__type_by_id(btf, type_id); while (depth < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t) && (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) { type_id = t->type; t = btf__type_by_id(btf, type_id); depth++; } if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) return libbpf_err(-EINVAL); return type_id; } __s32 btf__find_by_name(const struct btf *btf, const char *type_name) { __u32 i, nr_types = btf__type_cnt(btf); if (!strcmp(type_name, "void")) return 0; for (i = 1; i < nr_types; i++) { const struct btf_type *t = btf__type_by_id(btf, i); const char *name = btf__name_by_offset(btf, t->name_off); if (name && !strcmp(type_name, name)) return i; } return libbpf_err(-ENOENT); } static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id, const char *type_name, __u32 kind) { __u32 i, nr_types = btf__type_cnt(btf); if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) return 0; for (i = start_id; i < nr_types; i++) { const struct btf_type *t = btf__type_by_id(btf, i); const char *name; if (btf_kind(t) != kind) continue; name = btf__name_by_offset(btf, t->name_off); if (name && !strcmp(type_name, name)) return i; } return libbpf_err(-ENOENT); } __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, __u32 kind) { return btf_find_by_name_kind(btf, btf->start_id, type_name, kind); } __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, __u32 kind) { return btf_find_by_name_kind(btf, 1, type_name, kind); } static bool btf_is_modifiable(const struct btf *btf) { return (void *)btf->hdr != btf->raw_data; } void btf__free(struct btf *btf) { if (IS_ERR_OR_NULL(btf)) return; if (btf->fd >= 0) close(btf->fd); if (btf_is_modifiable(btf)) { /* if BTF was modified after loading, it will have a split * in-memory representation for header, types, and strings * sections, so we need to free all of them individually. It * might still have a cached contiguous raw data present, * which will be unconditionally freed below. */ free(btf->hdr); free(btf->types_data); strset__free(btf->strs_set); } free(btf->raw_data); free(btf->raw_data_swapped); free(btf->type_offs); if (btf->owns_base) btf__free(btf->base_btf); free(btf); } static struct btf *btf_new_empty(struct btf *base_btf) { struct btf *btf; btf = calloc(1, sizeof(*btf)); if (!btf) return ERR_PTR(-ENOMEM); btf->nr_types = 0; btf->start_id = 1; btf->start_str_off = 0; btf->fd = -1; btf->ptr_sz = sizeof(void *); btf->swapped_endian = false; if (base_btf) { btf->base_btf = base_btf; btf->start_id = btf__type_cnt(base_btf); btf->start_str_off = base_btf->hdr->str_len; btf->swapped_endian = base_btf->swapped_endian; } /* +1 for empty string at offset 0 */ btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1); btf->raw_data = calloc(1, btf->raw_size); if (!btf->raw_data) { free(btf); return ERR_PTR(-ENOMEM); } btf->hdr = btf->raw_data; btf->hdr->hdr_len = sizeof(struct btf_header); btf->hdr->magic = BTF_MAGIC; btf->hdr->version = BTF_VERSION; btf->types_data = btf->raw_data + btf->hdr->hdr_len; btf->strs_data = btf->raw_data + btf->hdr->hdr_len; btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */ return btf; } struct btf *btf__new_empty(void) { return libbpf_ptr(btf_new_empty(NULL)); } struct btf *btf__new_empty_split(struct btf *base_btf) { return libbpf_ptr(btf_new_empty(base_btf)); } static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) { struct btf *btf; int err; btf = calloc(1, sizeof(struct btf)); if (!btf) return ERR_PTR(-ENOMEM); btf->nr_types = 0; btf->start_id = 1; btf->start_str_off = 0; btf->fd = -1; if (base_btf) { btf->base_btf = base_btf; btf->start_id = btf__type_cnt(base_btf); btf->start_str_off = base_btf->hdr->str_len; } btf->raw_data = malloc(size); if (!btf->raw_data) { err = -ENOMEM; goto done; } memcpy(btf->raw_data, data, size); btf->raw_size = size; btf->hdr = btf->raw_data; err = btf_parse_hdr(btf); if (err) goto done; btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off; btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off; err = btf_parse_str_sec(btf); err = err ?: btf_parse_type_sec(btf); err = err ?: btf_sanity_check(btf); if (err) goto done; done: if (err) { btf__free(btf); return ERR_PTR(err); } return btf; } struct btf *btf__new(const void *data, __u32 size) { return libbpf_ptr(btf_new(data, size, NULL)); } struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf) { return libbpf_ptr(btf_new(data, size, base_btf)); } struct btf_elf_secs { Elf_Data *btf_data; Elf_Data *btf_ext_data; Elf_Data *btf_base_data; }; static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs) { Elf_Scn *scn = NULL; Elf_Data *data; GElf_Ehdr ehdr; size_t shstrndx; int idx = 0; if (!gelf_getehdr(elf, &ehdr)) { pr_warn("failed to get EHDR from %s\n", path); goto err; } if (elf_getshdrstrndx(elf, &shstrndx)) { pr_warn("failed to get section names section index for %s\n", path); goto err; } if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) { pr_warn("failed to get e_shstrndx from %s\n", path); goto err; } while ((scn = elf_nextscn(elf, scn)) != NULL) { Elf_Data **field; GElf_Shdr sh; char *name; idx++; if (gelf_getshdr(scn, &sh) != &sh) { pr_warn("failed to get section(%d) header from %s\n", idx, path); goto err; } name = elf_strptr(elf, shstrndx, sh.sh_name); if (!name) { pr_warn("failed to get section(%d) name from %s\n", idx, path); goto err; } if (strcmp(name, BTF_ELF_SEC) == 0) field = &secs->btf_data; else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) field = &secs->btf_ext_data; else if (strcmp(name, BTF_BASE_ELF_SEC) == 0) field = &secs->btf_base_data; else continue; data = elf_getdata(scn, 0); if (!data) { pr_warn("failed to get section(%d, %s) data from %s\n", idx, name, path); goto err; } *field = data; } return 0; err: return -LIBBPF_ERRNO__FORMAT; } static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) { struct btf_elf_secs secs = {}; struct btf *dist_base_btf = NULL; struct btf *btf = NULL; int err = 0, fd = -1; Elf *elf = NULL; if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn("failed to init libelf for %s\n", path); return ERR_PTR(-LIBBPF_ERRNO__LIBELF); } fd = open(path, O_RDONLY | O_CLOEXEC); if (fd < 0) { err = -errno; pr_warn("failed to open %s: %s\n", path, strerror(errno)); return ERR_PTR(err); } elf = elf_begin(fd, ELF_C_READ, NULL); if (!elf) { pr_warn("failed to open %s as ELF file\n", path); goto done; } err = btf_find_elf_sections(elf, path, &secs); if (err) goto done; if (!secs.btf_data) { pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path); err = -ENODATA; goto done; } if (secs.btf_base_data) { dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size, NULL); if (IS_ERR(dist_base_btf)) { err = PTR_ERR(dist_base_btf); dist_base_btf = NULL; goto done; } } btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size, dist_base_btf ?: base_btf); if (IS_ERR(btf)) { err = PTR_ERR(btf); goto done; } if (dist_base_btf && base_btf) { err = btf__relocate(btf, base_btf); if (err) goto done; btf__free(dist_base_btf); dist_base_btf = NULL; } if (dist_base_btf) btf->owns_base = true; switch (gelf_getclass(elf)) { case ELFCLASS32: btf__set_pointer_size(btf, 4); break; case ELFCLASS64: btf__set_pointer_size(btf, 8); break; default: pr_warn("failed to get ELF class (bitness) for %s\n", path); break; } if (btf_ext && secs.btf_ext_data) { *btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size); if (IS_ERR(*btf_ext)) { err = PTR_ERR(*btf_ext); goto done; } } else if (btf_ext) { *btf_ext = NULL; } done: if (elf) elf_end(elf); close(fd); if (!err) return btf; if (btf_ext) btf_ext__free(*btf_ext); btf__free(dist_base_btf); btf__free(btf); return ERR_PTR(err); } struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) { return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext)); } struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf) { return libbpf_ptr(btf_parse_elf(path, base_btf, NULL)); } static struct btf *btf_parse_raw(const char *path, struct btf *base_btf) { struct btf *btf = NULL; void *data = NULL; FILE *f = NULL; __u16 magic; int err = 0; long sz; f = fopen(path, "rbe"); if (!f) { err = -errno; goto err_out; } /* check BTF magic */ if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) { err = -EIO; goto err_out; } if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) { /* definitely not a raw BTF */ err = -EPROTO; goto err_out; } /* get file size */ if (fseek(f, 0, SEEK_END)) { err = -errno; goto err_out; } sz = ftell(f); if (sz < 0) { err = -errno; goto err_out; } /* rewind to the start */ if (fseek(f, 0, SEEK_SET)) { err = -errno; goto err_out; } /* pre-alloc memory and read all of BTF data */ data = malloc(sz); if (!data) { err = -ENOMEM; goto err_out; } if (fread(data, 1, sz, f) < sz) { err = -EIO; goto err_out; } /* finally parse BTF data */ btf = btf_new(data, sz, base_btf); err_out: free(data); if (f) fclose(f); return err ? ERR_PTR(err) : btf; } struct btf *btf__parse_raw(const char *path) { return libbpf_ptr(btf_parse_raw(path, NULL)); } struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf) { return libbpf_ptr(btf_parse_raw(path, base_btf)); } static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) { struct btf *btf; int err; if (btf_ext) *btf_ext = NULL; btf = btf_parse_raw(path, base_btf); err = libbpf_get_error(btf); if (!err) return btf; if (err != -EPROTO) return ERR_PTR(err); return btf_parse_elf(path, base_btf, btf_ext); } struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) { return libbpf_ptr(btf_parse(path, NULL, btf_ext)); } struct btf *btf__parse_split(const char *path, struct btf *base_btf) { return libbpf_ptr(btf_parse(path, base_btf, NULL)); } static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level, int token_fd) { LIBBPF_OPTS(bpf_btf_load_opts, opts); __u32 buf_sz = 0, raw_size; char *buf = NULL, *tmp; void *raw_data; int err = 0; if (btf->fd >= 0) return libbpf_err(-EEXIST); if (log_sz && !log_buf) return libbpf_err(-EINVAL); /* cache native raw data representation */ raw_data = btf_get_raw_data(btf, &raw_size, false); if (!raw_data) { err = -ENOMEM; goto done; } btf->raw_size = raw_size; btf->raw_data = raw_data; retry_load: /* if log_level is 0, we won't provide log_buf/log_size to the kernel, * initially. Only if BTF loading fails, we bump log_level to 1 and * retry, using either auto-allocated or custom log_buf. This way * non-NULL custom log_buf provides a buffer just in case, but hopes * for successful load and no need for log_buf. */ if (log_level) { /* if caller didn't provide custom log_buf, we'll keep * allocating our own progressively bigger buffers for BTF * verification log */ if (!log_buf) { buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2); tmp = realloc(buf, buf_sz); if (!tmp) { err = -ENOMEM; goto done; } buf = tmp; buf[0] = '\0'; } opts.log_buf = log_buf ? log_buf : buf; opts.log_size = log_buf ? log_sz : buf_sz; opts.log_level = log_level; } opts.token_fd = token_fd; if (token_fd) opts.btf_flags |= BPF_F_TOKEN_FD; btf->fd = bpf_btf_load(raw_data, raw_size, &opts); if (btf->fd < 0) { /* time to turn on verbose mode and try again */ if (log_level == 0) { log_level = 1; goto retry_load; } /* only retry if caller didn't provide custom log_buf, but * make sure we can never overflow buf_sz */ if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2) goto retry_load; err = -errno; pr_warn("BTF loading error: %d\n", err); /* don't print out contents of custom log_buf */ if (!log_buf && buf[0]) pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf); } done: free(buf); return libbpf_err(err); } int btf__load_into_kernel(struct btf *btf) { return btf_load_into_kernel(btf, NULL, 0, 0, 0); } int btf__fd(const struct btf *btf) { return btf->fd; } void btf__set_fd(struct btf *btf, int fd) { btf->fd = fd; } static const void *btf_strs_data(const struct btf *btf) { return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set); } static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian) { struct btf_header *hdr = btf->hdr; struct btf_type *t; void *data, *p; __u32 data_sz; int i; data = swap_endian ? btf->raw_data_swapped : btf->raw_data; if (data) { *size = btf->raw_size; return data; } data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len; data = calloc(1, data_sz); if (!data) return NULL; p = data; memcpy(p, hdr, hdr->hdr_len); if (swap_endian) btf_bswap_hdr(p); p += hdr->hdr_len; memcpy(p, btf->types_data, hdr->type_len); if (swap_endian) { for (i = 0; i < btf->nr_types; i++) { t = p + btf->type_offs[i]; /* btf_bswap_type_rest() relies on native t->info, so * we swap base type info after we swapped all the * additional information */ if (btf_bswap_type_rest(t)) goto err_out; btf_bswap_type_base(t); } } p += hdr->type_len; memcpy(p, btf_strs_data(btf), hdr->str_len); p += hdr->str_len; *size = data_sz; return data; err_out: free(data); return NULL; } const void *btf__raw_data(const struct btf *btf_ro, __u32 *size) { struct btf *btf = (struct btf *)btf_ro; __u32 data_sz; void *data; data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian); if (!data) return errno = ENOMEM, NULL; btf->raw_size = data_sz; if (btf->swapped_endian) btf->raw_data_swapped = data; else btf->raw_data = data; *size = data_sz; return data; } __attribute__((alias("btf__raw_data"))) const void *btf__get_raw_data(const struct btf *btf, __u32 *size); const char *btf__str_by_offset(const struct btf *btf, __u32 offset) { if (offset < btf->start_str_off) return btf__str_by_offset(btf->base_btf, offset); else if (offset - btf->start_str_off < btf->hdr->str_len) return btf_strs_data(btf) + (offset - btf->start_str_off); else return errno = EINVAL, NULL; } const char *btf__name_by_offset(const struct btf *btf, __u32 offset) { return btf__str_by_offset(btf, offset); } struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) { struct bpf_btf_info btf_info; __u32 len = sizeof(btf_info); __u32 last_size; struct btf *btf; void *ptr; int err; /* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so * let's start with a sane default - 4KiB here - and resize it only if * bpf_btf_get_info_by_fd() needs a bigger buffer. */ last_size = 4096; ptr = malloc(last_size); if (!ptr) return ERR_PTR(-ENOMEM); memset(&btf_info, 0, sizeof(btf_info)); btf_info.btf = ptr_to_u64(ptr); btf_info.btf_size = last_size; err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len); if (!err && btf_info.btf_size > last_size) { void *temp_ptr; last_size = btf_info.btf_size; temp_ptr = realloc(ptr, last_size); if (!temp_ptr) { btf = ERR_PTR(-ENOMEM); goto exit_free; } ptr = temp_ptr; len = sizeof(btf_info); memset(&btf_info, 0, sizeof(btf_info)); btf_info.btf = ptr_to_u64(ptr); btf_info.btf_size = last_size; err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len); } if (err || btf_info.btf_size > last_size) { btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG); goto exit_free; } btf = btf_new(ptr, btf_info.btf_size, base_btf); exit_free: free(ptr); return btf; } struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) { struct btf *btf; int btf_fd; btf_fd = bpf_btf_get_fd_by_id(id); if (btf_fd < 0) return libbpf_err_ptr(-errno); btf = btf_get_from_fd(btf_fd, base_btf); close(btf_fd); return libbpf_ptr(btf); } struct btf *btf__load_from_kernel_by_id(__u32 id) { return btf__load_from_kernel_by_id_split(id, NULL); } static void btf_invalidate_raw_data(struct btf *btf) { if (btf->raw_data) { free(btf->raw_data); btf->raw_data = NULL; } if (btf->raw_data_swapped) { free(btf->raw_data_swapped); btf->raw_data_swapped = NULL; } } /* Ensure BTF is ready to be modified (by splitting into a three memory * regions for header, types, and strings). Also invalidate cached * raw_data, if any. */ static int btf_ensure_modifiable(struct btf *btf) { void *hdr, *types; struct strset *set = NULL; int err = -ENOMEM; if (btf_is_modifiable(btf)) { /* any BTF modification invalidates raw_data */ btf_invalidate_raw_data(btf); return 0; } /* split raw data into three memory regions */ hdr = malloc(btf->hdr->hdr_len); types = malloc(btf->hdr->type_len); if (!hdr || !types) goto err_out; memcpy(hdr, btf->hdr, btf->hdr->hdr_len); memcpy(types, btf->types_data, btf->hdr->type_len); /* build lookup index for all strings */ set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len); if (IS_ERR(set)) { err = PTR_ERR(set); goto err_out; } /* only when everything was successful, update internal state */ btf->hdr = hdr; btf->types_data = types; btf->types_data_cap = btf->hdr->type_len; btf->strs_data = NULL; btf->strs_set = set; /* if BTF was created from scratch, all strings are guaranteed to be * unique and deduplicated */ if (btf->hdr->str_len == 0) btf->strs_deduped = true; if (!btf->base_btf && btf->hdr->str_len == 1) btf->strs_deduped = true; /* invalidate raw_data representation */ btf_invalidate_raw_data(btf); return 0; err_out: strset__free(set); free(hdr); free(types); return err; } /* Find an offset in BTF string section that corresponds to a given string *s*. * Returns: * - >0 offset into string section, if string is found; * - -ENOENT, if string is not in the string section; * - <0, on any other error. */ int btf__find_str(struct btf *btf, const char *s) { int off; if (btf->base_btf) { off = btf__find_str(btf->base_btf, s); if (off != -ENOENT) return off; } /* BTF needs to be in a modifiable state to build string lookup index */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); off = strset__find_str(btf->strs_set, s); if (off < 0) return libbpf_err(off); return btf->start_str_off + off; } /* Add a string s to the BTF string section. * Returns: * - > 0 offset into string section, on success; * - < 0, on error. */ int btf__add_str(struct btf *btf, const char *s) { int off; if (btf->base_btf) { off = btf__find_str(btf->base_btf, s); if (off != -ENOENT) return off; } if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); off = strset__add_str(btf->strs_set, s); if (off < 0) return libbpf_err(off); btf->hdr->str_len = strset__data_size(btf->strs_set); return btf->start_str_off + off; } static void *btf_add_type_mem(struct btf *btf, size_t add_sz) { return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1, btf->hdr->type_len, UINT_MAX, add_sz); } static void btf_type_inc_vlen(struct btf_type *t) { t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t)); } static int btf_commit_type(struct btf *btf, int data_sz) { int err; err = btf_add_type_idx_entry(btf, btf->hdr->type_len); if (err) return libbpf_err(err); btf->hdr->type_len += data_sz; btf->hdr->str_off += data_sz; btf->nr_types++; return btf->start_id + btf->nr_types - 1; } struct btf_pipe { const struct btf *src; struct btf *dst; struct hashmap *str_off_map; /* map string offsets from src to dst */ }; static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off) { long mapped_off; int off, err; if (!*str_off) /* nothing to do for empty strings */ return 0; if (p->str_off_map && hashmap__find(p->str_off_map, *str_off, &mapped_off)) { *str_off = mapped_off; return 0; } off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off)); if (off < 0) return off; /* Remember string mapping from src to dst. It avoids * performing expensive string comparisons. */ if (p->str_off_map) { err = hashmap__append(p->str_off_map, *str_off, off); if (err) return err; } *str_off = off; return 0; } static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type) { struct btf_field_iter it; struct btf_type *t; __u32 *str_off; int sz, err; sz = btf_type_size(src_type); if (sz < 0) return libbpf_err(sz); /* deconstruct BTF, if necessary, and invalidate raw_data */ if (btf_ensure_modifiable(p->dst)) return libbpf_err(-ENOMEM); t = btf_add_type_mem(p->dst, sz); if (!t) return libbpf_err(-ENOMEM); memcpy(t, src_type, sz); err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); if (err) return libbpf_err(err); while ((str_off = btf_field_iter_next(&it))) { err = btf_rewrite_str(p, str_off); if (err) return libbpf_err(err); } return btf_commit_type(p->dst, sz); } int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) { struct btf_pipe p = { .src = src_btf, .dst = btf }; return btf_add_type(&p, src_type); } static size_t btf_dedup_identity_hash_fn(long key, void *ctx); static bool btf_dedup_equal_fn(long k1, long k2, void *ctx); int btf__add_btf(struct btf *btf, const struct btf *src_btf) { struct btf_pipe p = { .src = src_btf, .dst = btf }; int data_sz, sz, cnt, i, err, old_strs_len; __u32 *off; void *t; /* appending split BTF isn't supported yet */ if (src_btf->base_btf) return libbpf_err(-ENOTSUP); /* deconstruct BTF, if necessary, and invalidate raw_data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); /* remember original strings section size if we have to roll back * partial strings section changes */ old_strs_len = btf->hdr->str_len; data_sz = src_btf->hdr->type_len; cnt = btf__type_cnt(src_btf) - 1; /* pre-allocate enough memory for new types */ t = btf_add_type_mem(btf, data_sz); if (!t) return libbpf_err(-ENOMEM); /* pre-allocate enough memory for type offset index for new types */ off = btf_add_type_offs_mem(btf, cnt); if (!off) return libbpf_err(-ENOMEM); /* Map the string offsets from src_btf to the offsets from btf to improve performance */ p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); if (IS_ERR(p.str_off_map)) return libbpf_err(-ENOMEM); /* bulk copy types data for all types from src_btf */ memcpy(t, src_btf->types_data, data_sz); for (i = 0; i < cnt; i++) { struct btf_field_iter it; __u32 *type_id, *str_off; sz = btf_type_size(t); if (sz < 0) { /* unlikely, has to be corrupted src_btf */ err = sz; goto err_out; } /* fill out type ID to type offset mapping for lookups by type ID */ *off = t - btf->types_data; /* add, dedup, and remap strings referenced by this BTF type */ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); if (err) goto err_out; while ((str_off = btf_field_iter_next(&it))) { err = btf_rewrite_str(&p, str_off); if (err) goto err_out; } /* remap all type IDs referenced from this BTF type */ err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (err) goto err_out; while ((type_id = btf_field_iter_next(&it))) { if (!*type_id) /* nothing to do for VOID references */ continue; /* we haven't updated btf's type count yet, so * btf->start_id + btf->nr_types - 1 is the type ID offset we should * add to all newly added BTF types */ *type_id += btf->start_id + btf->nr_types - 1; } /* go to next type data and type offset index entry */ t += sz; off++; } /* Up until now any of the copied type data was effectively invisible, * so if we exited early before this point due to error, BTF would be * effectively unmodified. There would be extra internal memory * pre-allocated, but it would not be available for querying. But now * that we've copied and rewritten all the data successfully, we can * update type count and various internal offsets and sizes to * "commit" the changes and made them visible to the outside world. */ btf->hdr->type_len += data_sz; btf->hdr->str_off += data_sz; btf->nr_types += cnt; hashmap__free(p.str_off_map); /* return type ID of the first added BTF type */ return btf->start_id + btf->nr_types - cnt; err_out: /* zero out preallocated memory as if it was just allocated with * libbpf_add_mem() */ memset(btf->types_data + btf->hdr->type_len, 0, data_sz); memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len); /* and now restore original strings section size; types data size * wasn't modified, so doesn't need restoring, see big comment above */ btf->hdr->str_len = old_strs_len; hashmap__free(p.str_off_map); return libbpf_err(err); } /* * Append new BTF_KIND_INT type with: * - *name* - non-empty, non-NULL type name; * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes; * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL. * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding) { struct btf_type *t; int sz, name_off; /* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL); /* byte_sz must be power of 2 */ if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16) return libbpf_err(-EINVAL); if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL)) return libbpf_err(-EINVAL); /* deconstruct BTF, if necessary, and invalidate raw_data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type) + sizeof(int); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); /* if something goes wrong later, we might end up with an extra string, * but that shouldn't be a problem, because BTF can't be constructed * completely anyway and will most probably be just discarded */ name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; t->name_off = name_off; t->info = btf_type_info(BTF_KIND_INT, 0, 0); t->size = byte_sz; /* set INT info, we don't allow setting legacy bit offset/size */ *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8); return btf_commit_type(btf, sz); } /* * Append new BTF_KIND_FLOAT type with: * - *name* - non-empty, non-NULL type name; * - *sz* - size of the type, in bytes; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_float(struct btf *btf, const char *name, size_t byte_sz) { struct btf_type *t; int sz, name_off; /* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL); /* byte_sz must be one of the explicitly allowed values */ if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 && byte_sz != 16) return libbpf_err(-EINVAL); if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; t->name_off = name_off; t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0); t->size = byte_sz; return btf_commit_type(btf, sz); } /* it's completely legal to append BTF types with type IDs pointing forward to * types that haven't been appended yet, so we only make sure that id looks * sane, we can't guarantee that ID will always be valid */ static int validate_type_id(int id) { if (id < 0 || id > BTF_MAX_NR_TYPES) return -EINVAL; return 0; } /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */ static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id) { struct btf_type *t; int sz, name_off = 0; if (validate_type_id(ref_type_id)) return libbpf_err(-EINVAL); if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); if (name && name[0]) { name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; } t->name_off = name_off; t->info = btf_type_info(kind, 0, 0); t->type = ref_type_id; return btf_commit_type(btf, sz); } /* * Append new BTF_KIND_PTR type with: * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_ptr(struct btf *btf, int ref_type_id) { return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id); } /* * Append new BTF_KIND_ARRAY type with: * - *index_type_id* - type ID of the type describing array index; * - *elem_type_id* - type ID of the type describing array element; * - *nr_elems* - the size of the array; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems) { struct btf_type *t; struct btf_array *a; int sz; if (validate_type_id(index_type_id) || validate_type_id(elem_type_id)) return libbpf_err(-EINVAL); if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type) + sizeof(struct btf_array); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); t->name_off = 0; t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0); t->size = 0; a = btf_array(t); a->type = elem_type_id; a->index_type = index_type_id; a->nelems = nr_elems; return btf_commit_type(btf, sz); } /* generic STRUCT/UNION append function */ static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz) { struct btf_type *t; int sz, name_off = 0; if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); if (name && name[0]) { name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; } /* start out with vlen=0 and no kflag; this will be adjusted when * adding each member */ t->name_off = name_off; t->info = btf_type_info(kind, 0, 0); t->size = bytes_sz; return btf_commit_type(btf, sz); } /* * Append new BTF_KIND_STRUCT type with: * - *name* - name of the struct, can be NULL or empty for anonymous structs; * - *byte_sz* - size of the struct, in bytes; * * Struct initially has no fields in it. Fields can be added by * btf__add_field() right after btf__add_struct() succeeds. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz) { return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz); } /* * Append new BTF_KIND_UNION type with: * - *name* - name of the union, can be NULL or empty for anonymous union; * - *byte_sz* - size of the union, in bytes; * * Union initially has no fields in it. Fields can be added by * btf__add_field() right after btf__add_union() succeeds. All fields * should have *bit_offset* of 0. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz) { return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz); } static struct btf_type *btf_last_type(struct btf *btf) { return btf_type_by_id(btf, btf__type_cnt(btf) - 1); } /* * Append new field for the current STRUCT/UNION type with: * - *name* - name of the field, can be NULL or empty for anonymous field; * - *type_id* - type ID for the type describing field type; * - *bit_offset* - bit offset of the start of the field within struct/union; * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields; * Returns: * - 0, on success; * - <0, on error. */ int btf__add_field(struct btf *btf, const char *name, int type_id, __u32 bit_offset, __u32 bit_size) { struct btf_type *t; struct btf_member *m; bool is_bitfield; int sz, name_off = 0; /* last type should be union/struct */ if (btf->nr_types == 0) return libbpf_err(-EINVAL); t = btf_last_type(btf); if (!btf_is_composite(t)) return libbpf_err(-EINVAL); if (validate_type_id(type_id)) return libbpf_err(-EINVAL); /* best-effort bit field offset/size enforcement */ is_bitfield = bit_size || (bit_offset % 8 != 0); if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff)) return libbpf_err(-EINVAL); /* only offset 0 is allowed for unions */ if (btf_is_union(t) && bit_offset) return libbpf_err(-EINVAL); /* decompose and invalidate raw data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_member); m = btf_add_type_mem(btf, sz); if (!m) return libbpf_err(-ENOMEM); if (name && name[0]) { name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; } m->name_off = name_off; m->type = type_id; m->offset = bit_offset | (bit_size << 24); /* btf_add_type_mem can invalidate t pointer */ t = btf_last_type(btf); /* update parent type's vlen and kflag */ t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t)); btf->hdr->type_len += sz; btf->hdr->str_off += sz; return 0; } static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz, bool is_signed, __u8 kind) { struct btf_type *t; int sz, name_off = 0; /* byte_sz must be power of 2 */ if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8) return libbpf_err(-EINVAL); if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); if (name && name[0]) { name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; } /* start out with vlen=0; it will be adjusted when adding enum values */ t->name_off = name_off; t->info = btf_type_info(kind, 0, is_signed); t->size = byte_sz; return btf_commit_type(btf, sz); } /* * Append new BTF_KIND_ENUM type with: * - *name* - name of the enum, can be NULL or empty for anonymous enums; * - *byte_sz* - size of the enum, in bytes. * * Enum initially has no enum values in it (and corresponds to enum forward * declaration). Enumerator values can be added by btf__add_enum_value() * immediately after btf__add_enum() succeeds. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz) { /* * set the signedness to be unsigned, it will change to signed * if any later enumerator is negative. */ return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM); } /* * Append new enum value for the current ENUM type with: * - *name* - name of the enumerator value, can't be NULL or empty; * - *value* - integer value corresponding to enum value *name*; * Returns: * - 0, on success; * - <0, on error. */ int btf__add_enum_value(struct btf *btf, const char *name, __s64 value) { struct btf_type *t; struct btf_enum *v; int sz, name_off; /* last type should be BTF_KIND_ENUM */ if (btf->nr_types == 0) return libbpf_err(-EINVAL); t = btf_last_type(btf); if (!btf_is_enum(t)) return libbpf_err(-EINVAL); /* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL); if (value < INT_MIN || value > UINT_MAX) return libbpf_err(-E2BIG); /* decompose and invalidate raw data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_enum); v = btf_add_type_mem(btf, sz); if (!v) return libbpf_err(-ENOMEM); name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; v->name_off = name_off; v->val = value; /* update parent type's vlen */ t = btf_last_type(btf); btf_type_inc_vlen(t); /* if negative value, set signedness to signed */ if (value < 0) t->info = btf_type_info(btf_kind(t), btf_vlen(t), true); btf->hdr->type_len += sz; btf->hdr->str_off += sz; return 0; } /* * Append new BTF_KIND_ENUM64 type with: * - *name* - name of the enum, can be NULL or empty for anonymous enums; * - *byte_sz* - size of the enum, in bytes. * - *is_signed* - whether the enum values are signed or not; * * Enum initially has no enum values in it (and corresponds to enum forward * declaration). Enumerator values can be added by btf__add_enum64_value() * immediately after btf__add_enum64() succeeds. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz, bool is_signed) { return btf_add_enum_common(btf, name, byte_sz, is_signed, BTF_KIND_ENUM64); } /* * Append new enum value for the current ENUM64 type with: * - *name* - name of the enumerator value, can't be NULL or empty; * - *value* - integer value corresponding to enum value *name*; * Returns: * - 0, on success; * - <0, on error. */ int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value) { struct btf_enum64 *v; struct btf_type *t; int sz, name_off; /* last type should be BTF_KIND_ENUM64 */ if (btf->nr_types == 0) return libbpf_err(-EINVAL); t = btf_last_type(btf); if (!btf_is_enum64(t)) return libbpf_err(-EINVAL); /* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL); /* decompose and invalidate raw data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_enum64); v = btf_add_type_mem(btf, sz); if (!v) return libbpf_err(-ENOMEM); name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; v->name_off = name_off; v->val_lo32 = (__u32)value; v->val_hi32 = value >> 32; /* update parent type's vlen */ t = btf_last_type(btf); btf_type_inc_vlen(t); btf->hdr->type_len += sz; btf->hdr->str_off += sz; return 0; } /* * Append new BTF_KIND_FWD type with: * - *name*, non-empty/non-NULL name; * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT, * BTF_FWD_UNION, or BTF_FWD_ENUM; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind) { if (!name || !name[0]) return libbpf_err(-EINVAL); switch (fwd_kind) { case BTF_FWD_STRUCT: case BTF_FWD_UNION: { struct btf_type *t; int id; id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0); if (id <= 0) return id; t = btf_type_by_id(btf, id); t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION); return id; } case BTF_FWD_ENUM: /* enum forward in BTF currently is just an enum with no enum * values; we also assume a standard 4-byte size for it */ return btf__add_enum(btf, name, sizeof(int)); default: return libbpf_err(-EINVAL); } } /* * Append new BTF_KING_TYPEDEF type with: * - *name*, non-empty/non-NULL name; * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) { if (!name || !name[0]) return libbpf_err(-EINVAL); return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id); } /* * Append new BTF_KIND_VOLATILE type with: * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_volatile(struct btf *btf, int ref_type_id) { return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id); } /* * Append new BTF_KIND_CONST type with: * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_const(struct btf *btf, int ref_type_id) { return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id); } /* * Append new BTF_KIND_RESTRICT type with: * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_restrict(struct btf *btf, int ref_type_id) { return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id); } /* * Append new BTF_KIND_TYPE_TAG type with: * - *value*, non-empty/non-NULL tag value; * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id) { if (!value || !value[0]) return libbpf_err(-EINVAL); return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id); } /* * Append new BTF_KIND_FUNC type with: * - *name*, non-empty/non-NULL name; * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_func(struct btf *btf, const char *name, enum btf_func_linkage linkage, int proto_type_id) { int id; if (!name || !name[0]) return libbpf_err(-EINVAL); if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL && linkage != BTF_FUNC_EXTERN) return libbpf_err(-EINVAL); id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id); if (id > 0) { struct btf_type *t = btf_type_by_id(btf, id); t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0); } return libbpf_err(id); } /* * Append new BTF_KIND_FUNC_PROTO with: * - *ret_type_id* - type ID for return result of a function. * * Function prototype initially has no arguments, but they can be added by * btf__add_func_param() one by one, immediately after * btf__add_func_proto() succeeded. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_func_proto(struct btf *btf, int ret_type_id) { struct btf_type *t; int sz; if (validate_type_id(ret_type_id)) return libbpf_err(-EINVAL); if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); /* start out with vlen=0; this will be adjusted when adding enum * values, if necessary */ t->name_off = 0; t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0); t->type = ret_type_id; return btf_commit_type(btf, sz); } /* * Append new function parameter for current FUNC_PROTO type with: * - *name* - parameter name, can be NULL or empty; * - *type_id* - type ID describing the type of the parameter. * Returns: * - 0, on success; * - <0, on error. */ int btf__add_func_param(struct btf *btf, const char *name, int type_id) { struct btf_type *t; struct btf_param *p; int sz, name_off = 0; if (validate_type_id(type_id)) return libbpf_err(-EINVAL); /* last type should be BTF_KIND_FUNC_PROTO */ if (btf->nr_types == 0) return libbpf_err(-EINVAL); t = btf_last_type(btf); if (!btf_is_func_proto(t)) return libbpf_err(-EINVAL); /* decompose and invalidate raw data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_param); p = btf_add_type_mem(btf, sz); if (!p) return libbpf_err(-ENOMEM); if (name && name[0]) { name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; } p->name_off = name_off; p->type = type_id; /* update parent type's vlen */ t = btf_last_type(btf); btf_type_inc_vlen(t); btf->hdr->type_len += sz; btf->hdr->str_off += sz; return 0; } /* * Append new BTF_KIND_VAR type with: * - *name* - non-empty/non-NULL name; * - *linkage* - variable linkage, one of BTF_VAR_STATIC, * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN; * - *type_id* - type ID of the type describing the type of the variable. * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id) { struct btf_type *t; struct btf_var *v; int sz, name_off; /* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL); if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED && linkage != BTF_VAR_GLOBAL_EXTERN) return libbpf_err(-EINVAL); if (validate_type_id(type_id)) return libbpf_err(-EINVAL); /* deconstruct BTF, if necessary, and invalidate raw_data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type) + sizeof(struct btf_var); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; t->name_off = name_off; t->info = btf_type_info(BTF_KIND_VAR, 0, 0); t->type = type_id; v = btf_var(t); v->linkage = linkage; return btf_commit_type(btf, sz); } /* * Append new BTF_KIND_DATASEC type with: * - *name* - non-empty/non-NULL name; * - *byte_sz* - data section size, in bytes. * * Data section is initially empty. Variables info can be added with * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz) { struct btf_type *t; int sz, name_off; /* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL); if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); name_off = btf__add_str(btf, name); if (name_off < 0) return name_off; /* start with vlen=0, which will be update as var_secinfos are added */ t->name_off = name_off; t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0); t->size = byte_sz; return btf_commit_type(btf, sz); } /* * Append new data section variable information entry for current DATASEC type: * - *var_type_id* - type ID, describing type of the variable; * - *offset* - variable offset within data section, in bytes; * - *byte_sz* - variable size, in bytes. * * Returns: * - 0, on success; * - <0, on error. */ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz) { struct btf_type *t; struct btf_var_secinfo *v; int sz; /* last type should be BTF_KIND_DATASEC */ if (btf->nr_types == 0) return libbpf_err(-EINVAL); t = btf_last_type(btf); if (!btf_is_datasec(t)) return libbpf_err(-EINVAL); if (validate_type_id(var_type_id)) return libbpf_err(-EINVAL); /* decompose and invalidate raw data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_var_secinfo); v = btf_add_type_mem(btf, sz); if (!v) return libbpf_err(-ENOMEM); v->type = var_type_id; v->offset = offset; v->size = byte_sz; /* update parent type's vlen */ t = btf_last_type(btf); btf_type_inc_vlen(t); btf->hdr->type_len += sz; btf->hdr->str_off += sz; return 0; } /* * Append new BTF_KIND_DECL_TAG type with: * - *value* - non-empty/non-NULL string; * - *ref_type_id* - referenced type ID, it might not exist yet; * - *component_idx* - -1 for tagging reference type, otherwise struct/union * member or function argument index; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error. */ int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, int component_idx) { struct btf_type *t; int sz, value_off; if (!value || !value[0] || component_idx < -1) return libbpf_err(-EINVAL); if (validate_type_id(ref_type_id)) return libbpf_err(-EINVAL); if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM); sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag); t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM); value_off = btf__add_str(btf, value); if (value_off < 0) return value_off; t->name_off = value_off; t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false); t->type = ref_type_id; btf_decl_tag(t)->component_idx = component_idx; return btf_commit_type(btf, sz); } struct btf_ext_sec_info_param { __u32 off; __u32 len; __u32 min_rec_size; struct btf_ext_info *ext_info; const char *desc; }; /* * Parse a single info subsection of the BTF.ext info data: * - validate subsection structure and elements * - save info subsection start and sizing details in struct btf_ext * - endian-independent operation, for calling before byte-swapping */ static int btf_ext_parse_sec_info(struct btf_ext *btf_ext, struct btf_ext_sec_info_param *ext_sec, bool is_native) { const struct btf_ext_info_sec *sinfo; struct btf_ext_info *ext_info; __u32 info_left, record_size; size_t sec_cnt = 0; void *info; if (ext_sec->len == 0) return 0; if (ext_sec->off & 0x03) { pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n", ext_sec->desc); return -EINVAL; } /* The start of the info sec (including the __u32 record_size). */ info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off; info_left = ext_sec->len; if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) { pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", ext_sec->desc, ext_sec->off, ext_sec->len); return -EINVAL; } /* At least a record size */ if (info_left < sizeof(__u32)) { pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc); return -EINVAL; } /* The record size needs to meet either the minimum standard or, when * handling non-native endianness data, the exact standard so as * to allow safe byte-swapping. */ record_size = is_native ? *(__u32 *)info : bswap_32(*(__u32 *)info); if (record_size < ext_sec->min_rec_size || (!is_native && record_size != ext_sec->min_rec_size) || record_size & 0x03) { pr_debug("%s section in .BTF.ext has invalid record size %u\n", ext_sec->desc, record_size); return -EINVAL; } sinfo = info + sizeof(__u32); info_left -= sizeof(__u32); /* If no records, return failure now so .BTF.ext won't be used. */ if (!info_left) { pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc); return -EINVAL; } while (info_left) { unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); __u64 total_record_size; __u32 num_records; if (info_left < sec_hdrlen) { pr_debug("%s section header is not found in .BTF.ext\n", ext_sec->desc); return -EINVAL; } num_records = is_native ? sinfo->num_info : bswap_32(sinfo->num_info); if (num_records == 0) { pr_debug("%s section has incorrect num_records in .BTF.ext\n", ext_sec->desc); return -EINVAL; } total_record_size = sec_hdrlen + (__u64)num_records * record_size; if (info_left < total_record_size) { pr_debug("%s section has incorrect num_records in .BTF.ext\n", ext_sec->desc); return -EINVAL; } info_left -= total_record_size; sinfo = (void *)sinfo + total_record_size; sec_cnt++; } ext_info = ext_sec->ext_info; ext_info->len = ext_sec->len - sizeof(__u32); ext_info->rec_size = record_size; ext_info->info = info + sizeof(__u32); ext_info->sec_cnt = sec_cnt; return 0; } /* Parse all info secs in the BTF.ext info data */ static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native) { struct btf_ext_sec_info_param func_info = { .off = btf_ext->hdr->func_info_off, .len = btf_ext->hdr->func_info_len, .min_rec_size = sizeof(struct bpf_func_info_min), .ext_info = &btf_ext->func_info, .desc = "func_info" }; struct btf_ext_sec_info_param line_info = { .off = btf_ext->hdr->line_info_off, .len = btf_ext->hdr->line_info_len, .min_rec_size = sizeof(struct bpf_line_info_min), .ext_info = &btf_ext->line_info, .desc = "line_info", }; struct btf_ext_sec_info_param core_relo = { .off = btf_ext->hdr->core_relo_off, .len = btf_ext->hdr->core_relo_len, .min_rec_size = sizeof(struct bpf_core_relo), .ext_info = &btf_ext->core_relo_info, .desc = "core_relo", }; int err; err = btf_ext_parse_sec_info(btf_ext, &func_info, is_native); if (err) return err; err = btf_ext_parse_sec_info(btf_ext, &line_info, is_native); if (err) return err; if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) return 0; /* skip core relos parsing */ err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native); if (err) return err; return 0; } /* Swap byte-order of BTF.ext header with any endianness */ static void btf_ext_bswap_hdr(struct btf_ext_header *h) { bool is_native = h->magic == BTF_MAGIC; __u32 hdr_len; hdr_len = is_native ? h->hdr_len : bswap_32(h->hdr_len); h->magic = bswap_16(h->magic); h->hdr_len = bswap_32(h->hdr_len); h->func_info_off = bswap_32(h->func_info_off); h->func_info_len = bswap_32(h->func_info_len); h->line_info_off = bswap_32(h->line_info_off); h->line_info_len = bswap_32(h->line_info_len); if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) return; h->core_relo_off = bswap_32(h->core_relo_off); h->core_relo_len = bswap_32(h->core_relo_len); } /* Swap byte-order of generic info subsection */ static void btf_ext_bswap_info_sec(void *info, __u32 len, bool is_native, info_rec_bswap_fn bswap_fn) { struct btf_ext_info_sec *sec; __u32 info_left, rec_size, *rs; if (len == 0) return; rs = info; /* info record size */ rec_size = is_native ? *rs : bswap_32(*rs); *rs = bswap_32(*rs); sec = info + sizeof(__u32); /* info sec #1 */ info_left = len - sizeof(__u32); while (info_left) { unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); __u32 i, num_recs; void *p; num_recs = is_native ? sec->num_info : bswap_32(sec->num_info); sec->sec_name_off = bswap_32(sec->sec_name_off); sec->num_info = bswap_32(sec->num_info); p = sec->data; /* info rec #1 */ for (i = 0; i < num_recs; i++, p += rec_size) bswap_fn(p); sec = p; info_left -= sec_hdrlen + (__u64)rec_size * num_recs; } } /* * Swap byte-order of all info data in a BTF.ext section * - requires BTF.ext hdr in native endianness */ static void btf_ext_bswap_info(struct btf_ext *btf_ext, void *data) { const bool is_native = btf_ext->swapped_endian; const struct btf_ext_header *h = data; void *info; /* Swap func_info subsection byte-order */ info = data + h->hdr_len + h->func_info_off; btf_ext_bswap_info_sec(info, h->func_info_len, is_native, (info_rec_bswap_fn)bpf_func_info_bswap); /* Swap line_info subsection byte-order */ info = data + h->hdr_len + h->line_info_off; btf_ext_bswap_info_sec(info, h->line_info_len, is_native, (info_rec_bswap_fn)bpf_line_info_bswap); /* Swap core_relo subsection byte-order (if present) */ if (h->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) return; info = data + h->hdr_len + h->core_relo_off; btf_ext_bswap_info_sec(info, h->core_relo_len, is_native, (info_rec_bswap_fn)bpf_core_relo_bswap); } /* Parse hdr data and info sections: check and convert to native endianness */ static int btf_ext_parse(struct btf_ext *btf_ext) { __u32 hdr_len, data_size = btf_ext->data_size; struct btf_ext_header *hdr = btf_ext->hdr; bool swapped_endian = false; int err; if (data_size < offsetofend(struct btf_ext_header, hdr_len)) { pr_debug("BTF.ext header too short\n"); return -EINVAL; } hdr_len = hdr->hdr_len; if (hdr->magic == bswap_16(BTF_MAGIC)) { swapped_endian = true; hdr_len = bswap_32(hdr_len); } else if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); return -EINVAL; } /* Ensure known version of structs, current BTF_VERSION == 1 */ if (hdr->version != 1) { pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); return -ENOTSUP; } if (hdr->flags) { pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags); return -ENOTSUP; } if (data_size < hdr_len) { pr_debug("BTF.ext header not found\n"); return -EINVAL; } else if (data_size == hdr_len) { pr_debug("BTF.ext has no data\n"); return -EINVAL; } /* Verify mandatory hdr info details present */ if (hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { pr_warn("BTF.ext header missing func_info, line_info\n"); return -EINVAL; } /* Keep hdr native byte-order in memory for introspection */ if (swapped_endian) btf_ext_bswap_hdr(btf_ext->hdr); /* Validate info subsections and cache key metadata */ err = btf_ext_parse_info(btf_ext, !swapped_endian); if (err) return err; /* Keep infos native byte-order in memory for introspection */ if (swapped_endian) btf_ext_bswap_info(btf_ext, btf_ext->data); /* * Set btf_ext->swapped_endian only after all header and info data has * been swapped, helping bswap functions determine if their data are * in native byte-order when called. */ btf_ext->swapped_endian = swapped_endian; return 0; } void btf_ext__free(struct btf_ext *btf_ext) { if (IS_ERR_OR_NULL(btf_ext)) return; free(btf_ext->func_info.sec_idxs); free(btf_ext->line_info.sec_idxs); free(btf_ext->core_relo_info.sec_idxs); free(btf_ext->data); free(btf_ext->data_swapped); free(btf_ext); } struct btf_ext *btf_ext__new(const __u8 *data, __u32 size) { struct btf_ext *btf_ext; int err; btf_ext = calloc(1, sizeof(struct btf_ext)); if (!btf_ext) return libbpf_err_ptr(-ENOMEM); btf_ext->data_size = size; btf_ext->data = malloc(size); if (!btf_ext->data) { err = -ENOMEM; goto done; } memcpy(btf_ext->data, data, size); err = btf_ext_parse(btf_ext); done: if (err) { btf_ext__free(btf_ext); return libbpf_err_ptr(err); } return btf_ext; } static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, bool swap_endian) { struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro; const __u32 data_sz = btf_ext->data_size; void *data; /* Return native data (always present) or swapped data if present */ if (!swap_endian) return btf_ext->data; else if (btf_ext->data_swapped) return btf_ext->data_swapped; /* Recreate missing swapped data, then cache and return */ data = calloc(1, data_sz); if (!data) return NULL; memcpy(data, btf_ext->data, data_sz); btf_ext_bswap_info(btf_ext, data); btf_ext_bswap_hdr(data); btf_ext->data_swapped = data; return data; } const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size) { void *data; data = btf_ext_raw_data(btf_ext, btf_ext->swapped_endian); if (!data) return errno = ENOMEM, NULL; *size = btf_ext->data_size; return data; } __attribute__((alias("btf_ext__raw_data"))) const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext) { if (is_host_big_endian()) return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; else return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; } int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian) { if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) return libbpf_err(-EINVAL); btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); if (!btf_ext->swapped_endian) { free(btf_ext->data_swapped); btf_ext->data_swapped = NULL; } return 0; } struct btf_dedup; static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts); static void btf_dedup_free(struct btf_dedup *d); static int btf_dedup_prep(struct btf_dedup *d); static int btf_dedup_strings(struct btf_dedup *d); static int btf_dedup_prim_types(struct btf_dedup *d); static int btf_dedup_struct_types(struct btf_dedup *d); static int btf_dedup_ref_types(struct btf_dedup *d); static int btf_dedup_resolve_fwds(struct btf_dedup *d); static int btf_dedup_compact_types(struct btf_dedup *d); static int btf_dedup_remap_types(struct btf_dedup *d); /* * Deduplicate BTF types and strings. * * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF * section with all BTF type descriptors and string data. It overwrites that * memory in-place with deduplicated types and strings without any loss of * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section * is provided, all the strings referenced from .BTF.ext section are honored * and updated to point to the right offsets after deduplication. * * If function returns with error, type/string data might be garbled and should * be discarded. * * More verbose and detailed description of both problem btf_dedup is solving, * as well as solution could be found at: * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html * * Problem description and justification * ===================================== * * BTF type information is typically emitted either as a result of conversion * from DWARF to BTF or directly by compiler. In both cases, each compilation * unit contains information about a subset of all the types that are used * in an application. These subsets are frequently overlapping and contain a lot * of duplicated information when later concatenated together into a single * binary. This algorithm ensures that each unique type is represented by single * BTF type descriptor, greatly reducing resulting size of BTF data. * * Compilation unit isolation and subsequent duplication of data is not the only * problem. The same type hierarchy (e.g., struct and all the type that struct * references) in different compilation units can be represented in BTF to * various degrees of completeness (or, rather, incompleteness) due to * struct/union forward declarations. * * Let's take a look at an example, that we'll use to better understand the * problem (and solution). Suppose we have two compilation units, each using * same `struct S`, but each of them having incomplete type information about * struct's fields: * * // CU #1: * struct S; * struct A { * int a; * struct A* self; * struct S* parent; * }; * struct B; * struct S { * struct A* a_ptr; * struct B* b_ptr; * }; * * // CU #2: * struct S; * struct A; * struct B { * int b; * struct B* self; * struct S* parent; * }; * struct S { * struct A* a_ptr; * struct B* b_ptr; * }; * * In case of CU #1, BTF data will know only that `struct B` exist (but no * more), but will know the complete type information about `struct A`. While * for CU #2, it will know full type information about `struct B`, but will * only know about forward declaration of `struct A` (in BTF terms, it will * have `BTF_KIND_FWD` type descriptor with name `B`). * * This compilation unit isolation means that it's possible that there is no * single CU with complete type information describing structs `S`, `A`, and * `B`. Also, we might get tons of duplicated and redundant type information. * * Additional complication we need to keep in mind comes from the fact that * types, in general, can form graphs containing cycles, not just DAGs. * * While algorithm does deduplication, it also merges and resolves type * information (unless disabled throught `struct btf_opts`), whenever possible. * E.g., in the example above with two compilation units having partial type * information for structs `A` and `B`, the output of algorithm will emit * a single copy of each BTF type that describes structs `A`, `B`, and `S` * (as well as type information for `int` and pointers), as if they were defined * in a single compilation unit as: * * struct A { * int a; * struct A* self; * struct S* parent; * }; * struct B { * int b; * struct B* self; * struct S* parent; * }; * struct S { * struct A* a_ptr; * struct B* b_ptr; * }; * * Algorithm summary * ================= * * Algorithm completes its work in 7 separate passes: * * 1. Strings deduplication. * 2. Primitive types deduplication (int, enum, fwd). * 3. Struct/union types deduplication. * 4. Resolve unambiguous forward declarations. * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func * protos, and const/volatile/restrict modifiers). * 6. Types compaction. * 7. Types remapping. * * Algorithm determines canonical type descriptor, which is a single * representative type for each truly unique type. This canonical type is the * one that will go into final deduplicated BTF type information. For * struct/unions, it is also the type that algorithm will merge additional type * information into (while resolving FWDs), as it discovers it from data in * other CUs. Each input BTF type eventually gets either mapped to itself, if * that type is canonical, or to some other type, if that type is equivalent * and was chosen as canonical representative. This mapping is stored in * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that * FWD type got resolved to. * * To facilitate fast discovery of canonical types, we also maintain canonical * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types * that match that signature. With sufficiently good choice of type signature * hashing function, we can limit number of canonical types for each unique type * signature to a very small number, allowing to find canonical type for any * duplicated type very quickly. * * Struct/union deduplication is the most critical part and algorithm for * deduplicating structs/unions is described in greater details in comments for * `btf_dedup_is_equiv` function. */ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts) { struct btf_dedup *d; int err; if (!OPTS_VALID(opts, btf_dedup_opts)) return libbpf_err(-EINVAL); d = btf_dedup_new(btf, opts); if (IS_ERR(d)) { pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d)); return libbpf_err(-EINVAL); } if (btf_ensure_modifiable(btf)) { err = -ENOMEM; goto done; } err = btf_dedup_prep(d); if (err) { pr_debug("btf_dedup_prep failed:%d\n", err); goto done; } err = btf_dedup_strings(d); if (err < 0) { pr_debug("btf_dedup_strings failed:%d\n", err); goto done; } err = btf_dedup_prim_types(d); if (err < 0) { pr_debug("btf_dedup_prim_types failed:%d\n", err); goto done; } err = btf_dedup_struct_types(d); if (err < 0) { pr_debug("btf_dedup_struct_types failed:%d\n", err); goto done; } err = btf_dedup_resolve_fwds(d); if (err < 0) { pr_debug("btf_dedup_resolve_fwds failed:%d\n", err); goto done; } err = btf_dedup_ref_types(d); if (err < 0) { pr_debug("btf_dedup_ref_types failed:%d\n", err); goto done; } err = btf_dedup_compact_types(d); if (err < 0) { pr_debug("btf_dedup_compact_types failed:%d\n", err); goto done; } err = btf_dedup_remap_types(d); if (err < 0) { pr_debug("btf_dedup_remap_types failed:%d\n", err); goto done; } done: btf_dedup_free(d); return libbpf_err(err); } #define BTF_UNPROCESSED_ID ((__u32)-1) #define BTF_IN_PROGRESS_ID ((__u32)-2) struct btf_dedup { /* .BTF section to be deduped in-place */ struct btf *btf; /* * Optional .BTF.ext section. When provided, any strings referenced * from it will be taken into account when deduping strings */ struct btf_ext *btf_ext; /* * This is a map from any type's signature hash to a list of possible * canonical representative type candidates. Hash collisions are * ignored, so even types of various kinds can share same list of * candidates, which is fine because we rely on subsequent * btf_xxx_equal() checks to authoritatively verify type equality. */ struct hashmap *dedup_table; /* Canonical types map */ __u32 *map; /* Hypothetical mapping, used during type graph equivalence checks */ __u32 *hypot_map; __u32 *hypot_list; size_t hypot_cnt; size_t hypot_cap; /* Whether hypothetical mapping, if successful, would need to adjust * already canonicalized types (due to a new forward declaration to * concrete type resolution). In such case, during split BTF dedup * candidate type would still be considered as different, because base * BTF is considered to be immutable. */ bool hypot_adjust_canon; /* Various option modifying behavior of algorithm */ struct btf_dedup_opts opts; /* temporary strings deduplication state */ struct strset *strs_set; }; static long hash_combine(long h, long value) { return h * 31 + value; } #define for_each_dedup_cand(d, node, hash) \ hashmap__for_each_key_entry(d->dedup_table, node, hash) static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id) { return hashmap__append(d->dedup_table, hash, type_id); } static int btf_dedup_hypot_map_add(struct btf_dedup *d, __u32 from_id, __u32 to_id) { if (d->hypot_cnt == d->hypot_cap) { __u32 *new_list; d->hypot_cap += max((size_t)16, d->hypot_cap / 2); new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32)); if (!new_list) return -ENOMEM; d->hypot_list = new_list; } d->hypot_list[d->hypot_cnt++] = from_id; d->hypot_map[from_id] = to_id; return 0; } static void btf_dedup_clear_hypot_map(struct btf_dedup *d) { int i; for (i = 0; i < d->hypot_cnt; i++) d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; d->hypot_cnt = 0; d->hypot_adjust_canon = false; } static void btf_dedup_free(struct btf_dedup *d) { hashmap__free(d->dedup_table); d->dedup_table = NULL; free(d->map); d->map = NULL; free(d->hypot_map); d->hypot_map = NULL; free(d->hypot_list); d->hypot_list = NULL; free(d); } static size_t btf_dedup_identity_hash_fn(long key, void *ctx) { return key; } static size_t btf_dedup_collision_hash_fn(long key, void *ctx) { return 0; } static bool btf_dedup_equal_fn(long k1, long k2, void *ctx) { return k1 == k2; } static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts) { struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; int i, err = 0, type_cnt; if (!d) return ERR_PTR(-ENOMEM); if (OPTS_GET(opts, force_collisions, false)) hash_fn = btf_dedup_collision_hash_fn; d->btf = btf; d->btf_ext = OPTS_GET(opts, btf_ext, NULL); d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL); if (IS_ERR(d->dedup_table)) { err = PTR_ERR(d->dedup_table); d->dedup_table = NULL; goto done; } type_cnt = btf__type_cnt(btf); d->map = malloc(sizeof(__u32) * type_cnt); if (!d->map) { err = -ENOMEM; goto done; } /* special BTF "void" type is made canonical immediately */ d->map[0] = 0; for (i = 1; i < type_cnt; i++) { struct btf_type *t = btf_type_by_id(d->btf, i); /* VAR and DATASEC are never deduped and are self-canonical */ if (btf_is_var(t) || btf_is_datasec(t)) d->map[i] = i; else d->map[i] = BTF_UNPROCESSED_ID; } d->hypot_map = malloc(sizeof(__u32) * type_cnt); if (!d->hypot_map) { err = -ENOMEM; goto done; } for (i = 0; i < type_cnt; i++) d->hypot_map[i] = BTF_UNPROCESSED_ID; done: if (err) { btf_dedup_free(d); return ERR_PTR(err); } return d; } /* * Iterate over all possible places in .BTF and .BTF.ext that can reference * string and pass pointer to it to a provided callback `fn`. */ static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx) { int i, r; for (i = 0; i < d->btf->nr_types; i++) { struct btf_field_iter it; struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); __u32 *str_off; r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); if (r) return r; while ((str_off = btf_field_iter_next(&it))) { r = fn(str_off, ctx); if (r) return r; } } if (!d->btf_ext) return 0; r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx); if (r) return r; return 0; } static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx) { struct btf_dedup *d = ctx; __u32 str_off = *str_off_ptr; const char *s; int off, err; /* don't touch empty string or string in main BTF */ if (str_off == 0 || str_off < d->btf->start_str_off) return 0; s = btf__str_by_offset(d->btf, str_off); if (d->btf->base_btf) { err = btf__find_str(d->btf->base_btf, s); if (err >= 0) { *str_off_ptr = err; return 0; } if (err != -ENOENT) return err; } off = strset__add_str(d->strs_set, s); if (off < 0) return off; *str_off_ptr = d->btf->start_str_off + off; return 0; } /* * Dedup string and filter out those that are not referenced from either .BTF * or .BTF.ext (if provided) sections. * * This is done by building index of all strings in BTF's string section, * then iterating over all entities that can reference strings (e.g., type * names, struct field names, .BTF.ext line info, etc) and marking corresponding * strings as used. After that all used strings are deduped and compacted into * sequential blob of memory and new offsets are calculated. Then all the string * references are iterated again and rewritten using new offsets. */ static int btf_dedup_strings(struct btf_dedup *d) { int err; if (d->btf->strs_deduped) return 0; d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0); if (IS_ERR(d->strs_set)) { err = PTR_ERR(d->strs_set); goto err_out; } if (!d->btf->base_btf) { /* insert empty string; we won't be looking it up during strings * dedup, but it's good to have it for generic BTF string lookups */ err = strset__add_str(d->strs_set, ""); if (err < 0) goto err_out; } /* remap string offsets */ err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d); if (err) goto err_out; /* replace BTF string data and hash with deduped ones */ strset__free(d->btf->strs_set); d->btf->hdr->str_len = strset__data_size(d->strs_set); d->btf->strs_set = d->strs_set; d->strs_set = NULL; d->btf->strs_deduped = true; return 0; err_out: strset__free(d->strs_set); d->strs_set = NULL; return err; } static long btf_hash_common(struct btf_type *t) { long h; h = hash_combine(0, t->name_off); h = hash_combine(h, t->info); h = hash_combine(h, t->size); return h; } static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) { return t1->name_off == t2->name_off && t1->info == t2->info && t1->size == t2->size; } /* Calculate type signature hash of INT or TAG. */ static long btf_hash_int_decl_tag(struct btf_type *t) { __u32 info = *(__u32 *)(t + 1); long h; h = btf_hash_common(t); h = hash_combine(h, info); return h; } /* Check structural equality of two INTs or TAGs. */ static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2) { __u32 info1, info2; if (!btf_equal_common(t1, t2)) return false; info1 = *(__u32 *)(t1 + 1); info2 = *(__u32 *)(t2 + 1); return info1 == info2; } /* Calculate type signature hash of ENUM/ENUM64. */ static long btf_hash_enum(struct btf_type *t) { long h; /* don't hash vlen, enum members and size to support enum fwd resolving */ h = hash_combine(0, t->name_off); return h; } static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2) { const struct btf_enum *m1, *m2; __u16 vlen; int i; vlen = btf_vlen(t1); m1 = btf_enum(t1); m2 = btf_enum(t2); for (i = 0; i < vlen; i++) { if (m1->name_off != m2->name_off || m1->val != m2->val) return false; m1++; m2++; } return true; } static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2) { const struct btf_enum64 *m1, *m2; __u16 vlen; int i; vlen = btf_vlen(t1); m1 = btf_enum64(t1); m2 = btf_enum64(t2); for (i = 0; i < vlen; i++) { if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 || m1->val_hi32 != m2->val_hi32) return false; m1++; m2++; } return true; } /* Check structural equality of two ENUMs or ENUM64s. */ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) { if (!btf_equal_common(t1, t2)) return false; /* t1 & t2 kinds are identical because of btf_equal_common */ if (btf_kind(t1) == BTF_KIND_ENUM) return btf_equal_enum_members(t1, t2); else return btf_equal_enum64_members(t1, t2); } static inline bool btf_is_enum_fwd(struct btf_type *t) { return btf_is_any_enum(t) && btf_vlen(t) == 0; } static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) { if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2)) return btf_equal_enum(t1, t2); /* At this point either t1 or t2 or both are forward declarations, thus: * - skip comparing vlen because it is zero for forward declarations; * - skip comparing size to allow enum forward declarations * to be compatible with enum64 full declarations; * - skip comparing kind for the same reason. */ return t1->name_off == t2->name_off && btf_is_any_enum(t1) && btf_is_any_enum(t2); } /* * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, * as referenced type IDs equivalence is established separately during type * graph equivalence check algorithm. */ static long btf_hash_struct(struct btf_type *t) { const struct btf_member *member = btf_members(t); __u32 vlen = btf_vlen(t); long h = btf_hash_common(t); int i; for (i = 0; i < vlen; i++) { h = hash_combine(h, member->name_off); h = hash_combine(h, member->offset); /* no hashing of referenced type ID, it can be unresolved yet */ member++; } return h; } /* * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced * type IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) { const struct btf_member *m1, *m2; __u16 vlen; int i; if (!btf_equal_common(t1, t2)) return false; vlen = btf_vlen(t1); m1 = btf_members(t1); m2 = btf_members(t2); for (i = 0; i < vlen; i++) { if (m1->name_off != m2->name_off || m1->offset != m2->offset) return false; m1++; m2++; } return true; } /* * Calculate type signature hash of ARRAY, including referenced type IDs, * under assumption that they were already resolved to canonical type IDs and * are not going to change. */ static long btf_hash_array(struct btf_type *t) { const struct btf_array *info = btf_array(t); long h = btf_hash_common(t); h = hash_combine(h, info->type); h = hash_combine(h, info->index_type); h = hash_combine(h, info->nelems); return h; } /* * Check exact equality of two ARRAYs, taking into account referenced * type IDs, under assumption that they were already resolved to canonical * type IDs and are not going to change. * This function is called during reference types deduplication to compare * ARRAY to potential canonical representative. */ static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2) { const struct btf_array *info1, *info2; if (!btf_equal_common(t1, t2)) return false; info1 = btf_array(t1); info2 = btf_array(t2); return info1->type == info2->type && info1->index_type == info2->index_type && info1->nelems == info2->nelems; } /* * Check structural compatibility of two ARRAYs, ignoring referenced type * IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) { if (!btf_equal_common(t1, t2)) return false; return btf_array(t1)->nelems == btf_array(t2)->nelems; } /* * Calculate type signature hash of FUNC_PROTO, including referenced type IDs, * under assumption that they were already resolved to canonical type IDs and * are not going to change. */ static long btf_hash_fnproto(struct btf_type *t) { const struct btf_param *member = btf_params(t); __u16 vlen = btf_vlen(t); long h = btf_hash_common(t); int i; for (i = 0; i < vlen; i++) { h = hash_combine(h, member->name_off); h = hash_combine(h, member->type); member++; } return h; } /* * Check exact equality of two FUNC_PROTOs, taking into account referenced * type IDs, under assumption that they were already resolved to canonical * type IDs and are not going to change. * This function is called during reference types deduplication to compare * FUNC_PROTO to potential canonical representative. */ static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) { const struct btf_param *m1, *m2; __u16 vlen; int i; if (!btf_equal_common(t1, t2)) return false; vlen = btf_vlen(t1); m1 = btf_params(t1); m2 = btf_params(t2); for (i = 0; i < vlen; i++) { if (m1->name_off != m2->name_off || m1->type != m2->type) return false; m1++; m2++; } return true; } /* * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type * IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) { const struct btf_param *m1, *m2; __u16 vlen; int i; /* skip return type ID */ if (t1->name_off != t2->name_off || t1->info != t2->info) return false; vlen = btf_vlen(t1); m1 = btf_params(t1); m2 = btf_params(t2); for (i = 0; i < vlen; i++) { if (m1->name_off != m2->name_off) return false; m1++; m2++; } return true; } /* Prepare split BTF for deduplication by calculating hashes of base BTF's * types and initializing the rest of the state (canonical type mapping) for * the fixed base BTF part. */ static int btf_dedup_prep(struct btf_dedup *d) { struct btf_type *t; int type_id; long h; if (!d->btf->base_btf) return 0; for (type_id = 1; type_id < d->btf->start_id; type_id++) { t = btf_type_by_id(d->btf, type_id); /* all base BTF types are self-canonical by definition */ d->map[type_id] = type_id; switch (btf_kind(t)) { case BTF_KIND_VAR: case BTF_KIND_DATASEC: /* VAR and DATASEC are never hash/deduplicated */ continue; case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_FWD: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_FLOAT: case BTF_KIND_TYPE_TAG: h = btf_hash_common(t); break; case BTF_KIND_INT: case BTF_KIND_DECL_TAG: h = btf_hash_int_decl_tag(t); break; case BTF_KIND_ENUM: case BTF_KIND_ENUM64: h = btf_hash_enum(t); break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: h = btf_hash_struct(t); break; case BTF_KIND_ARRAY: h = btf_hash_array(t); break; case BTF_KIND_FUNC_PROTO: h = btf_hash_fnproto(t); break; default: pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id); return -EINVAL; } if (btf_dedup_table_add(d, h, type_id)) return -ENOMEM; } return 0; } /* * Deduplicate primitive types, that can't reference other types, by calculating * their type signature hash and comparing them with any possible canonical * candidate. If no canonical candidate matches, type itself is marked as * canonical and is added into `btf_dedup->dedup_table` as another candidate. */ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) { struct btf_type *t = btf_type_by_id(d->btf, type_id); struct hashmap_entry *hash_entry; struct btf_type *cand; /* if we don't find equivalent type, then we are canonical */ __u32 new_id = type_id; __u32 cand_id; long h; switch (btf_kind(t)) { case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_ARRAY: case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_FUNC: case BTF_KIND_FUNC_PROTO: case BTF_KIND_VAR: case BTF_KIND_DATASEC: case BTF_KIND_DECL_TAG: case BTF_KIND_TYPE_TAG: return 0; case BTF_KIND_INT: h = btf_hash_int_decl_tag(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_int_tag(t, cand)) { new_id = cand_id; break; } } break; case BTF_KIND_ENUM: case BTF_KIND_ENUM64: h = btf_hash_enum(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_enum(t, cand)) { new_id = cand_id; break; } if (btf_compat_enum(t, cand)) { if (btf_is_enum_fwd(t)) { /* resolve fwd to full enum */ new_id = cand_id; break; } /* resolve canonical enum fwd to full enum */ d->map[cand_id] = type_id; } } break; case BTF_KIND_FWD: case BTF_KIND_FLOAT: h = btf_hash_common(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_common(t, cand)) { new_id = cand_id; break; } } break; default: return -EINVAL; } d->map[type_id] = new_id; if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) return -ENOMEM; return 0; } static int btf_dedup_prim_types(struct btf_dedup *d) { int i, err; for (i = 0; i < d->btf->nr_types; i++) { err = btf_dedup_prim_type(d, d->btf->start_id + i); if (err) return err; } return 0; } /* * Check whether type is already mapped into canonical one (could be to itself). */ static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id) { return d->map[type_id] <= BTF_MAX_NR_TYPES; } /* * Resolve type ID into its canonical type ID, if any; otherwise return original * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow * STRUCT/UNION link and resolve it into canonical type ID as well. */ static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id) { while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) type_id = d->map[type_id]; return type_id; } /* * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original * type ID. */ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) { __u32 orig_type_id = type_id; if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) return type_id; while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) type_id = d->map[type_id]; if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) return type_id; return orig_type_id; } static inline __u16 btf_fwd_kind(struct btf_type *t) { return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; } /* Check if given two types are identical ARRAY definitions */ static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) { struct btf_type *t1, *t2; t1 = btf_type_by_id(d->btf, id1); t2 = btf_type_by_id(d->btf, id2); if (!btf_is_array(t1) || !btf_is_array(t2)) return false; return btf_equal_array(t1, t2); } /* Check if given two types are identical STRUCT/UNION definitions */ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2) { const struct btf_member *m1, *m2; struct btf_type *t1, *t2; int n, i; t1 = btf_type_by_id(d->btf, id1); t2 = btf_type_by_id(d->btf, id2); if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2)) return false; if (!btf_shallow_equal_struct(t1, t2)) return false; m1 = btf_members(t1); m2 = btf_members(t2); for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { if (m1->type != m2->type && !btf_dedup_identical_arrays(d, m1->type, m2->type) && !btf_dedup_identical_structs(d, m1->type, m2->type)) return false; } return true; } /* * Check equivalence of BTF type graph formed by candidate struct/union (we'll * call it "candidate graph" in this description for brevity) to a type graph * formed by (potential) canonical struct/union ("canonical graph" for brevity * here, though keep in mind that not all types in canonical graph are * necessarily canonical representatives themselves, some of them might be * duplicates or its uniqueness might not have been established yet). * Returns: * - >0, if type graphs are equivalent; * - 0, if not equivalent; * - <0, on error. * * Algorithm performs side-by-side DFS traversal of both type graphs and checks * equivalence of BTF types at each step. If at any point BTF types in candidate * and canonical graphs are not compatible structurally, whole graphs are * incompatible. If types are structurally equivalent (i.e., all information * except referenced type IDs is exactly the same), a mapping from `canon_id` to * a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`). * If a type references other types, then those referenced types are checked * for equivalence recursively. * * During DFS traversal, if we find that for current `canon_id` type we * already have some mapping in hypothetical map, we check for two possible * situations: * - `canon_id` is mapped to exactly the same type as `cand_id`. This will * happen when type graphs have cycles. In this case we assume those two * types are equivalent. * - `canon_id` is mapped to different type. This is contradiction in our * hypothetical mapping, because same graph in canonical graph corresponds * to two different types in candidate graph, which for equivalent type * graphs shouldn't happen. This condition terminates equivalence check * with negative result. * * If type graphs traversal exhausts types to check and find no contradiction, * then type graphs are equivalent. * * When checking types for equivalence, there is one special case: FWD types. * If FWD type resolution is allowed and one of the types (either from canonical * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind * flag) and their names match, hypothetical mapping is updated to point from * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully, * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently. * * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution, * if there are two exactly named (or anonymous) structs/unions that are * compatible structurally, one of which has FWD field, while other is concrete * STRUCT/UNION, but according to C sources they are different structs/unions * that are referencing different types with the same name. This is extremely * unlikely to happen, but btf_dedup API allows to disable FWD resolution if * this logic is causing problems. * * Doing FWD resolution means that both candidate and/or canonical graphs can * consists of portions of the graph that come from multiple compilation units. * This is due to the fact that types within single compilation unit are always * deduplicated and FWDs are already resolved, if referenced struct/union * definition is available. So, if we had unresolved FWD and found corresponding * STRUCT/UNION, they will be from different compilation units. This * consequently means that when we "link" FWD to corresponding STRUCT/UNION, * type graph will likely have at least two different BTF types that describe * same type (e.g., most probably there will be two different BTF types for the * same 'int' primitive type) and could even have "overlapping" parts of type * graph that describe same subset of types. * * This in turn means that our assumption that each type in canonical graph * must correspond to exactly one type in candidate graph might not hold * anymore and will make it harder to detect contradictions using hypothetical * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION * resolution only in canonical graph. FWDs in candidate graphs are never * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs * that can occur: * - Both types in canonical and candidate graphs are FWDs. If they are * structurally equivalent, then they can either be both resolved to the * same STRUCT/UNION or not resolved at all. In both cases they are * equivalent and there is no need to resolve FWD on candidate side. * - Both types in canonical and candidate graphs are concrete STRUCT/UNION, * so nothing to resolve as well, algorithm will check equivalence anyway. * - Type in canonical graph is FWD, while type in candidate is concrete * STRUCT/UNION. In this case candidate graph comes from single compilation * unit, so there is exactly one BTF type for each unique C type. After * resolving FWD into STRUCT/UNION, there might be more than one BTF type * in canonical graph mapping to single BTF type in candidate graph, but * because hypothetical mapping maps from canonical to candidate types, it's * alright, and we still maintain the property of having single `canon_id` * mapping to single `cand_id` (there could be two different `canon_id` * mapped to the same `cand_id`, but it's not contradictory). * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate * graph is FWD. In this case we are just going to check compatibility of * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from * canonical graph. */ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, __u32 canon_id) { struct btf_type *cand_type; struct btf_type *canon_type; __u32 hypot_type_id; __u16 cand_kind; __u16 canon_kind; int i, eq; /* if both resolve to the same canonical, they must be equivalent */ if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id)) return 1; canon_id = resolve_fwd_id(d, canon_id); hypot_type_id = d->hypot_map[canon_id]; if (hypot_type_id <= BTF_MAX_NR_TYPES) { if (hypot_type_id == cand_id) return 1; /* In some cases compiler will generate different DWARF types * for *identical* array type definitions and use them for * different fields within the *same* struct. This breaks type * equivalence check, which makes an assumption that candidate * types sub-graph has a consistent and deduped-by-compiler * types within a single CU. So work around that by explicitly * allowing identical array types here. */ if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id)) return 1; /* It turns out that similar situation can happen with * struct/union sometimes, sigh... Handle the case where * structs/unions are exactly the same, down to the referenced * type IDs. Anything more complicated (e.g., if referenced * types are different, but equivalent) is *way more* * complicated and requires a many-to-many equivalence mapping. */ if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) return 1; return 0; } if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) return -ENOMEM; cand_type = btf_type_by_id(d->btf, cand_id); canon_type = btf_type_by_id(d->btf, canon_id); cand_kind = btf_kind(cand_type); canon_kind = btf_kind(canon_type); if (cand_type->name_off != canon_type->name_off) return 0; /* FWD <--> STRUCT/UNION equivalence check, if enabled */ if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) && cand_kind != canon_kind) { __u16 real_kind; __u16 fwd_kind; if (cand_kind == BTF_KIND_FWD) { real_kind = canon_kind; fwd_kind = btf_fwd_kind(cand_type); } else { real_kind = cand_kind; fwd_kind = btf_fwd_kind(canon_type); /* we'd need to resolve base FWD to STRUCT/UNION */ if (fwd_kind == real_kind && canon_id < d->btf->start_id) d->hypot_adjust_canon = true; } return fwd_kind == real_kind; } if (cand_kind != canon_kind) return 0; switch (cand_kind) { case BTF_KIND_INT: return btf_equal_int_tag(cand_type, canon_type); case BTF_KIND_ENUM: case BTF_KIND_ENUM64: return btf_compat_enum(cand_type, canon_type); case BTF_KIND_FWD: case BTF_KIND_FLOAT: return btf_equal_common(cand_type, canon_type); case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_TYPE_TAG: if (cand_type->info != canon_type->info) return 0; return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); case BTF_KIND_ARRAY: { const struct btf_array *cand_arr, *canon_arr; if (!btf_compat_array(cand_type, canon_type)) return 0; cand_arr = btf_array(cand_type); canon_arr = btf_array(canon_type); eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type); if (eq <= 0) return eq; return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); } case BTF_KIND_STRUCT: case BTF_KIND_UNION: { const struct btf_member *cand_m, *canon_m; __u16 vlen; if (!btf_shallow_equal_struct(cand_type, canon_type)) return 0; vlen = btf_vlen(cand_type); cand_m = btf_members(cand_type); canon_m = btf_members(canon_type); for (i = 0; i < vlen; i++) { eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type); if (eq <= 0) return eq; cand_m++; canon_m++; } return 1; } case BTF_KIND_FUNC_PROTO: { const struct btf_param *cand_p, *canon_p; __u16 vlen; if (!btf_compat_fnproto(cand_type, canon_type)) return 0; eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type); if (eq <= 0) return eq; vlen = btf_vlen(cand_type); cand_p = btf_params(cand_type); canon_p = btf_params(canon_type); for (i = 0; i < vlen; i++) { eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type); if (eq <= 0) return eq; cand_p++; canon_p++; } return 1; } default: return -EINVAL; } return 0; } /* * Use hypothetical mapping, produced by successful type graph equivalence * check, to augment existing struct/union canonical mapping, where possible. * * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional: * it doesn't matter if FWD type was part of canonical graph or candidate one, * we are recording the mapping anyway. As opposed to carefulness required * for struct/union correspondence mapping (described below), for FWD resolution * it's not important, as by the time that FWD type (reference type) will be * deduplicated all structs/unions will be deduped already anyway. * * Recording STRUCT/UNION mapping is purely a performance optimization and is * not required for correctness. It needs to be done carefully to ensure that * struct/union from candidate's type graph is not mapped into corresponding * struct/union from canonical type graph that itself hasn't been resolved into * canonical representative. The only guarantee we have is that canonical * struct/union was determined as canonical and that won't change. But any * types referenced through that struct/union fields could have been not yet * resolved, so in case like that it's too early to establish any kind of * correspondence between structs/unions. * * No canonical correspondence is derived for primitive types (they are already * deduplicated completely already anyway) or reference types (they rely on * stability of struct/union canonical relationship for equivalence checks). */ static void btf_dedup_merge_hypot_map(struct btf_dedup *d) { __u32 canon_type_id, targ_type_id; __u16 t_kind, c_kind; __u32 t_id, c_id; int i; for (i = 0; i < d->hypot_cnt; i++) { canon_type_id = d->hypot_list[i]; targ_type_id = d->hypot_map[canon_type_id]; t_id = resolve_type_id(d, targ_type_id); c_id = resolve_type_id(d, canon_type_id); t_kind = btf_kind(btf__type_by_id(d->btf, t_id)); c_kind = btf_kind(btf__type_by_id(d->btf, c_id)); /* * Resolve FWD into STRUCT/UNION. * It's ok to resolve FWD into STRUCT/UNION that's not yet * mapped to canonical representative (as opposed to * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because * eventually that struct is going to be mapped and all resolved * FWDs will automatically resolve to correct canonical * representative. This will happen before ref type deduping, * which critically depends on stability of these mapping. This * stability is not a requirement for STRUCT/UNION equivalence * checks, though. */ /* if it's the split BTF case, we still need to point base FWD * to STRUCT/UNION in a split BTF, because FWDs from split BTF * will be resolved against base FWD. If we don't point base * canonical FWD to the resolved STRUCT/UNION, then all the * FWDs in split BTF won't be correctly resolved to a proper * STRUCT/UNION. */ if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) d->map[c_id] = t_id; /* if graph equivalence determined that we'd need to adjust * base canonical types, then we need to only point base FWDs * to STRUCTs/UNIONs and do no more modifications. For all * other purposes the type graphs were not equivalent. */ if (d->hypot_adjust_canon) continue; if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) d->map[t_id] = c_id; if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && c_kind != BTF_KIND_FWD && is_type_mapped(d, c_id) && !is_type_mapped(d, t_id)) { /* * as a perf optimization, we can map struct/union * that's part of type graph we just verified for * equivalence. We can do that for struct/union that has * canonical representative only, though. */ d->map[t_id] = c_id; } } } /* * Deduplicate struct/union types. * * For each struct/union type its type signature hash is calculated, taking * into account type's name, size, number, order and names of fields, but * ignoring type ID's referenced from fields, because they might not be deduped * completely until after reference types deduplication phase. This type hash * is used to iterate over all potential canonical types, sharing same hash. * For each canonical candidate we check whether type graphs that they form * (through referenced types in fields and so on) are equivalent using algorithm * implemented in `btf_dedup_is_equiv`. If such equivalence is found and * BTF_KIND_FWD resolution is allowed, then hypothetical mapping * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to * potentially map other structs/unions to their canonical representatives, * if such relationship hasn't yet been established. This speeds up algorithm * by eliminating some of the duplicate work. * * If no matching canonical representative was found, struct/union is marked * as canonical for itself and is added into btf_dedup->dedup_table hash map * for further look ups. */ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) { struct btf_type *cand_type, *t; struct hashmap_entry *hash_entry; /* if we don't find equivalent type, then we are canonical */ __u32 new_id = type_id; __u16 kind; long h; /* already deduped or is in process of deduping (loop detected) */ if (d->map[type_id] <= BTF_MAX_NR_TYPES) return 0; t = btf_type_by_id(d->btf, type_id); kind = btf_kind(t); if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) return 0; h = btf_hash_struct(t); for_each_dedup_cand(d, hash_entry, h) { __u32 cand_id = hash_entry->value; int eq; /* * Even though btf_dedup_is_equiv() checks for * btf_shallow_equal_struct() internally when checking two * structs (unions) for equivalence, we need to guard here * from picking matching FWD type as a dedup candidate. * This can happen due to hash collision. In such case just * relying on btf_dedup_is_equiv() would lead to potentially * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because * FWD and compatible STRUCT/UNION are considered equivalent. */ cand_type = btf_type_by_id(d->btf, cand_id); if (!btf_shallow_equal_struct(t, cand_type)) continue; btf_dedup_clear_hypot_map(d); eq = btf_dedup_is_equiv(d, type_id, cand_id); if (eq < 0) return eq; if (!eq) continue; btf_dedup_merge_hypot_map(d); if (d->hypot_adjust_canon) /* not really equivalent */ continue; new_id = cand_id; break; } d->map[type_id] = new_id; if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) return -ENOMEM; return 0; } static int btf_dedup_struct_types(struct btf_dedup *d) { int i, err; for (i = 0; i < d->btf->nr_types; i++) { err = btf_dedup_struct_type(d, d->btf->start_id + i); if (err) return err; } return 0; } /* * Deduplicate reference type. * * Once all primitive and struct/union types got deduplicated, we can easily * deduplicate all other (reference) BTF types. This is done in two steps: * * 1. Resolve all referenced type IDs into their canonical type IDs. This * resolution can be done either immediately for primitive or struct/union types * (because they were deduped in previous two phases) or recursively for * reference types. Recursion will always terminate at either primitive or * struct/union type, at which point we can "unwind" chain of reference types * one by one. There is no danger of encountering cycles because in C type * system the only way to form type cycle is through struct/union, so any chain * of reference types, even those taking part in a type cycle, will inevitably * reach struct/union at some point. * * 2. Once all referenced type IDs are resolved into canonical ones, BTF type * becomes "stable", in the sense that no further deduplication will cause * any changes to it. With that, it's now possible to calculate type's signature * hash (this time taking into account referenced type IDs) and loop over all * potential canonical representatives. If no match was found, current type * will become canonical representative of itself and will be added into * btf_dedup->dedup_table as another possible canonical representative. */ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) { struct hashmap_entry *hash_entry; __u32 new_id = type_id, cand_id; struct btf_type *t, *cand; /* if we don't find equivalent type, then we are representative type */ int ref_type_id; long h; if (d->map[type_id] == BTF_IN_PROGRESS_ID) return -ELOOP; if (d->map[type_id] <= BTF_MAX_NR_TYPES) return resolve_type_id(d, type_id); t = btf_type_by_id(d->btf, type_id); d->map[type_id] = BTF_IN_PROGRESS_ID; switch (btf_kind(t)) { case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_TYPE_TAG: ref_type_id = btf_dedup_ref_type(d, t->type); if (ref_type_id < 0) return ref_type_id; t->type = ref_type_id; h = btf_hash_common(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_common(t, cand)) { new_id = cand_id; break; } } break; case BTF_KIND_DECL_TAG: ref_type_id = btf_dedup_ref_type(d, t->type); if (ref_type_id < 0) return ref_type_id; t->type = ref_type_id; h = btf_hash_int_decl_tag(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_int_tag(t, cand)) { new_id = cand_id; break; } } break; case BTF_KIND_ARRAY: { struct btf_array *info = btf_array(t); ref_type_id = btf_dedup_ref_type(d, info->type); if (ref_type_id < 0) return ref_type_id; info->type = ref_type_id; ref_type_id = btf_dedup_ref_type(d, info->index_type); if (ref_type_id < 0) return ref_type_id; info->index_type = ref_type_id; h = btf_hash_array(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_array(t, cand)) { new_id = cand_id; break; } } break; } case BTF_KIND_FUNC_PROTO: { struct btf_param *param; __u16 vlen; int i; ref_type_id = btf_dedup_ref_type(d, t->type); if (ref_type_id < 0) return ref_type_id; t->type = ref_type_id; vlen = btf_vlen(t); param = btf_params(t); for (i = 0; i < vlen; i++) { ref_type_id = btf_dedup_ref_type(d, param->type); if (ref_type_id < 0) return ref_type_id; param->type = ref_type_id; param++; } h = btf_hash_fnproto(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = hash_entry->value; cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_fnproto(t, cand)) { new_id = cand_id; break; } } break; } default: return -EINVAL; } d->map[type_id] = new_id; if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) return -ENOMEM; return new_id; } static int btf_dedup_ref_types(struct btf_dedup *d) { int i, err; for (i = 0; i < d->btf->nr_types; i++) { err = btf_dedup_ref_type(d, d->btf->start_id + i); if (err < 0) return err; } /* we won't need d->dedup_table anymore */ hashmap__free(d->dedup_table); d->dedup_table = NULL; return 0; } /* * Collect a map from type names to type ids for all canonical structs * and unions. If the same name is shared by several canonical types * use a special value 0 to indicate this fact. */ static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map) { __u32 nr_types = btf__type_cnt(d->btf); struct btf_type *t; __u32 type_id; __u16 kind; int err; /* * Iterate over base and split module ids in order to get all * available structs in the map. */ for (type_id = 1; type_id < nr_types; ++type_id) { t = btf_type_by_id(d->btf, type_id); kind = btf_kind(t); if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) continue; /* Skip non-canonical types */ if (type_id != d->map[type_id]) continue; err = hashmap__add(names_map, t->name_off, type_id); if (err == -EEXIST) err = hashmap__set(names_map, t->name_off, 0, NULL, NULL); if (err) return err; } return 0; } static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id) { struct btf_type *t = btf_type_by_id(d->btf, type_id); enum btf_fwd_kind fwd_kind = btf_kflag(t); __u16 cand_kind, kind = btf_kind(t); struct btf_type *cand_t; uintptr_t cand_id; if (kind != BTF_KIND_FWD) return 0; /* Skip if this FWD already has a mapping */ if (type_id != d->map[type_id]) return 0; if (!hashmap__find(names_map, t->name_off, &cand_id)) return 0; /* Zero is a special value indicating that name is not unique */ if (!cand_id) return 0; cand_t = btf_type_by_id(d->btf, cand_id); cand_kind = btf_kind(cand_t); if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) || (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION)) return 0; d->map[type_id] = cand_id; return 0; } /* * Resolve unambiguous forward declarations. * * The lion's share of all FWD declarations is resolved during * `btf_dedup_struct_types` phase when different type graphs are * compared against each other. However, if in some compilation unit a * FWD declaration is not a part of a type graph compared against * another type graph that declaration's canonical type would not be * changed. Example: * * CU #1: * * struct foo; * struct foo *some_global; * * CU #2: * * struct foo { int u; }; * struct foo *another_global; * * After `btf_dedup_struct_types` the BTF looks as follows: * * [1] STRUCT 'foo' size=4 vlen=1 ... * [2] INT 'int' size=4 ... * [3] PTR '(anon)' type_id=1 * [4] FWD 'foo' fwd_kind=struct * [5] PTR '(anon)' type_id=4 * * This pass assumes that such FWD declarations should be mapped to * structs or unions with identical name in case if the name is not * ambiguous. */ static int btf_dedup_resolve_fwds(struct btf_dedup *d) { int i, err; struct hashmap *names_map; names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); if (IS_ERR(names_map)) return PTR_ERR(names_map); err = btf_dedup_fill_unique_names_map(d, names_map); if (err < 0) goto exit; for (i = 0; i < d->btf->nr_types; i++) { err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i); if (err < 0) break; } exit: hashmap__free(names_map); return err; } /* * Compact types. * * After we established for each type its corresponding canonical representative * type, we now can eliminate types that are not canonical and leave only * canonical ones layed out sequentially in memory by copying them over * duplicates. During compaction btf_dedup->hypot_map array is reused to store * a map from original type ID to a new compacted type ID, which will be used * during next phase to "fix up" type IDs, referenced from struct/union and * reference types. */ static int btf_dedup_compact_types(struct btf_dedup *d) { __u32 *new_offs; __u32 next_type_id = d->btf->start_id; const struct btf_type *t; void *p; int i, id, len; /* we are going to reuse hypot_map to store compaction remapping */ d->hypot_map[0] = 0; /* base BTF types are not renumbered */ for (id = 1; id < d->btf->start_id; id++) d->hypot_map[id] = id; for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) d->hypot_map[id] = BTF_UNPROCESSED_ID; p = d->btf->types_data; for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) { if (d->map[id] != id) continue; t = btf__type_by_id(d->btf, id); len = btf_type_size(t); if (len < 0) return len; memmove(p, t, len); d->hypot_map[id] = next_type_id; d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data; p += len; next_type_id++; } /* shrink struct btf's internal types index and update btf_header */ d->btf->nr_types = next_type_id - d->btf->start_id; d->btf->type_offs_cap = d->btf->nr_types; d->btf->hdr->type_len = p - d->btf->types_data; new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap, sizeof(*new_offs)); if (d->btf->type_offs_cap && !new_offs) return -ENOMEM; d->btf->type_offs = new_offs; d->btf->hdr->str_off = d->btf->hdr->type_len; d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len; return 0; } /* * Figure out final (deduplicated and compacted) type ID for provided original * `type_id` by first resolving it into corresponding canonical type ID and * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, * which is populated during compaction phase. */ static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx) { struct btf_dedup *d = ctx; __u32 resolved_type_id, new_type_id; resolved_type_id = resolve_type_id(d, *type_id); new_type_id = d->hypot_map[resolved_type_id]; if (new_type_id > BTF_MAX_NR_TYPES) return -EINVAL; *type_id = new_type_id; return 0; } /* * Remap referenced type IDs into deduped type IDs. * * After BTF types are deduplicated and compacted, their final type IDs may * differ from original ones. The map from original to a corresponding * deduped type ID is stored in btf_dedup->hypot_map and is populated during * compaction phase. During remapping phase we are rewriting all type IDs * referenced from any BTF type (e.g., struct fields, func proto args, etc) to * their final deduped type IDs. */ static int btf_dedup_remap_types(struct btf_dedup *d) { int i, r; for (i = 0; i < d->btf->nr_types; i++) { struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); struct btf_field_iter it; __u32 *type_id; r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (r) return r; while ((type_id = btf_field_iter_next(&it))) { __u32 resolved_id, new_id; resolved_id = resolve_type_id(d, *type_id); new_id = d->hypot_map[resolved_id]; if (new_id > BTF_MAX_NR_TYPES) return -EINVAL; *type_id = new_id; } } if (!d->btf_ext) return 0; r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d); if (r) return r; return 0; } /* * Probe few well-known locations for vmlinux kernel image and try to load BTF * data out of it to use for target BTF. */ struct btf *btf__load_vmlinux_btf(void) { const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux"; /* fall back locations, trying to find vmlinux on disk */ const char *locations[] = { "/boot/vmlinux-%1$s", "/lib/modules/%1$s/vmlinux-%1$s", "/lib/modules/%1$s/build/vmlinux", "/usr/lib/modules/%1$s/kernel/vmlinux", "/usr/lib/debug/boot/vmlinux-%1$s", "/usr/lib/debug/boot/vmlinux-%1$s.debug", "/usr/lib/debug/lib/modules/%1$s/vmlinux", }; char path[PATH_MAX + 1]; struct utsname buf; struct btf *btf; int i, err; /* is canonical sysfs location accessible? */ if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) { pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n", sysfs_btf_path); } else { btf = btf__parse(sysfs_btf_path, NULL); if (!btf) { err = -errno; pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err); return libbpf_err_ptr(err); } pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path); return btf; } /* try fallback locations */ uname(&buf); for (i = 0; i < ARRAY_SIZE(locations); i++) { snprintf(path, PATH_MAX, locations[i], buf.release); if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS)) continue; btf = btf__parse(path, NULL); err = libbpf_get_error(btf); pr_debug("loading kernel BTF '%s': %d\n", path, err); if (err) continue; return btf; } pr_warn("failed to find valid kernel BTF\n"); return libbpf_err_ptr(-ESRCH); } struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf"))); struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf) { char path[80]; snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name); return btf__parse_split(path, vmlinux_btf); } int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx) { const struct btf_ext_info *seg; struct btf_ext_info_sec *sec; int i, err; seg = &btf_ext->func_info; for_each_btf_ext_sec(seg, sec) { struct bpf_func_info_min *rec; for_each_btf_ext_rec(seg, sec, i, rec) { err = visit(&rec->type_id, ctx); if (err < 0) return err; } } seg = &btf_ext->core_relo_info; for_each_btf_ext_sec(seg, sec) { struct bpf_core_relo *rec; for_each_btf_ext_rec(seg, sec, i, rec) { err = visit(&rec->type_id, ctx); if (err < 0) return err; } } return 0; } int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx) { const struct btf_ext_info *seg; struct btf_ext_info_sec *sec; int i, err; seg = &btf_ext->func_info; for_each_btf_ext_sec(seg, sec) { err = visit(&sec->sec_name_off, ctx); if (err) return err; } seg = &btf_ext->line_info; for_each_btf_ext_sec(seg, sec) { struct bpf_line_info_min *rec; err = visit(&sec->sec_name_off, ctx); if (err) return err; for_each_btf_ext_rec(seg, sec, i, rec) { err = visit(&rec->file_name_off, ctx); if (err) return err; err = visit(&rec->line_off, ctx); if (err) return err; } } seg = &btf_ext->core_relo_info; for_each_btf_ext_sec(seg, sec) { struct bpf_core_relo *rec; err = visit(&sec->sec_name_off, ctx); if (err) return err; for_each_btf_ext_rec(seg, sec, i, rec) { err = visit(&rec->access_str_off, ctx); if (err) return err; } } return 0; } struct btf_distill { struct btf_pipe pipe; int *id_map; unsigned int split_start_id; unsigned int split_start_str; int diff_id; }; static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i) { struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i); struct btf_field_iter it; __u32 *id; int err; err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS); if (err) return err; while ((id = btf_field_iter_next(&it))) { struct btf_type *base_t; if (!*id) continue; /* split BTF id, not needed */ if (*id >= dist->split_start_id) continue; /* already added ? */ if (dist->id_map[*id] > 0) continue; /* only a subset of base BTF types should be referenced from * split BTF; ensure nothing unexpected is referenced. */ base_t = btf_type_by_id(dist->pipe.src, *id); switch (btf_kind(base_t)) { case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_FWD: case BTF_KIND_ARRAY: case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_TYPEDEF: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_PTR: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VOLATILE: case BTF_KIND_FUNC_PROTO: case BTF_KIND_TYPE_TAG: dist->id_map[*id] = *id; break; default: pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n", *id, btf_kind(base_t)); return -EINVAL; } /* If a base type is used, ensure types it refers to are * marked as used also; so for example if we find a PTR to INT * we need both the PTR and INT. * * The only exception is named struct/unions, since distilled * base BTF composite types have no members. */ if (btf_is_composite(base_t) && base_t->name_off) continue; err = btf_add_distilled_type_ids(dist, *id); if (err) return err; } return 0; } static int btf_add_distilled_types(struct btf_distill *dist) { bool adding_to_base = dist->pipe.dst->start_id == 1; int id = btf__type_cnt(dist->pipe.dst); struct btf_type *t; int i, err = 0; /* Add types for each of the required references to either distilled * base or split BTF, depending on type characteristics. */ for (i = 1; i < dist->split_start_id; i++) { const char *name; int kind; if (!dist->id_map[i]) continue; t = btf_type_by_id(dist->pipe.src, i); kind = btf_kind(t); name = btf__name_by_offset(dist->pipe.src, t->name_off); switch (kind) { case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_FWD: /* Named int, float, fwd are added to base. */ if (!adding_to_base) continue; err = btf_add_type(&dist->pipe, t); break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: /* Named struct/union are added to base as 0-vlen * struct/union of same size. Anonymous struct/unions * are added to split BTF as-is. */ if (adding_to_base) { if (!t->name_off) continue; err = btf_add_composite(dist->pipe.dst, kind, name, t->size); } else { if (t->name_off) continue; err = btf_add_type(&dist->pipe, t); } break; case BTF_KIND_ENUM: case BTF_KIND_ENUM64: /* Named enum[64]s are added to base as a sized * enum; relocation will match with appropriately-named * and sized enum or enum64. * * Anonymous enums are added to split BTF as-is. */ if (adding_to_base) { if (!t->name_off) continue; err = btf__add_enum(dist->pipe.dst, name, t->size); } else { if (t->name_off) continue; err = btf_add_type(&dist->pipe, t); } break; case BTF_KIND_ARRAY: case BTF_KIND_TYPEDEF: case BTF_KIND_PTR: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VOLATILE: case BTF_KIND_FUNC_PROTO: case BTF_KIND_TYPE_TAG: /* All other types are added to split BTF. */ if (adding_to_base) continue; err = btf_add_type(&dist->pipe, t); break; default: pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n", name, i, kind); return -EINVAL; } if (err < 0) break; dist->id_map[i] = id++; } return err; } /* Split BTF ids without a mapping will be shifted downwards since distilled * base BTF is smaller than the original base BTF. For those that have a * mapping (either to base or updated split BTF), update the id based on * that mapping. */ static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i) { struct btf_type *t = btf_type_by_id(dist->pipe.dst, i); struct btf_field_iter it; __u32 *id; int err; err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (err) return err; while ((id = btf_field_iter_next(&it))) { if (dist->id_map[*id]) *id = dist->id_map[*id]; else if (*id >= dist->split_start_id) *id -= dist->diff_id; } return 0; } /* Create updated split BTF with distilled base BTF; distilled base BTF * consists of BTF information required to clarify the types that split * BTF refers to, omitting unneeded details. Specifically it will contain * base types and memberless definitions of named structs, unions and enumerated * types. Associated reference types like pointers, arrays and anonymous * structs, unions and enumerated types will be added to split BTF. * Size is recorded for named struct/unions to help guide matching to the * target base BTF during later relocation. * * The only case where structs, unions or enumerated types are fully represented * is when they are anonymous; in such cases, the anonymous type is added to * split BTF in full. * * We return newly-created split BTF where the split BTF refers to a newly-created * distilled base BTF. Both must be freed separately by the caller. */ int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, struct btf **new_split_btf) { struct btf *new_base = NULL, *new_split = NULL; const struct btf *old_base; unsigned int n = btf__type_cnt(src_btf); struct btf_distill dist = {}; struct btf_type *t; int i, err = 0; /* src BTF must be split BTF. */ old_base = btf__base_btf(src_btf); if (!new_base_btf || !new_split_btf || !old_base) return libbpf_err(-EINVAL); new_base = btf__new_empty(); if (!new_base) return libbpf_err(-ENOMEM); btf__set_endianness(new_base, btf__endianness(src_btf)); dist.id_map = calloc(n, sizeof(*dist.id_map)); if (!dist.id_map) { err = -ENOMEM; goto done; } dist.pipe.src = src_btf; dist.pipe.dst = new_base; dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); if (IS_ERR(dist.pipe.str_off_map)) { err = -ENOMEM; goto done; } dist.split_start_id = btf__type_cnt(old_base); dist.split_start_str = old_base->hdr->str_len; /* Pass over src split BTF; generate the list of base BTF type ids it * references; these will constitute our distilled BTF set to be * distributed over base and split BTF as appropriate. */ for (i = src_btf->start_id; i < n; i++) { err = btf_add_distilled_type_ids(&dist, i); if (err < 0) goto done; } /* Next add types for each of the required references to base BTF and split BTF * in turn. */ err = btf_add_distilled_types(&dist); if (err < 0) goto done; /* Create new split BTF with distilled base BTF as its base; the final * state is split BTF with distilled base BTF that represents enough * about its base references to allow it to be relocated with the base * BTF available. */ new_split = btf__new_empty_split(new_base); if (!new_split) { err = -errno; goto done; } dist.pipe.dst = new_split; /* First add all split types */ for (i = src_btf->start_id; i < n; i++) { t = btf_type_by_id(src_btf, i); err = btf_add_type(&dist.pipe, t); if (err < 0) goto done; } /* Now add distilled types to split BTF that are not added to base. */ err = btf_add_distilled_types(&dist); if (err < 0) goto done; /* All split BTF ids will be shifted downwards since there are less base * BTF ids in distilled base BTF. */ dist.diff_id = dist.split_start_id - btf__type_cnt(new_base); n = btf__type_cnt(new_split); /* Now update base/split BTF ids. */ for (i = 1; i < n; i++) { err = btf_update_distilled_type_ids(&dist, i); if (err < 0) break; } done: free(dist.id_map); hashmap__free(dist.pipe.str_off_map); if (err) { btf__free(new_split); btf__free(new_base); return libbpf_err(err); } *new_base_btf = new_base; *new_split_btf = new_split; return 0; } const struct btf_header *btf_header(const struct btf *btf) { return btf->hdr; } void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) { btf->base_btf = (struct btf *)base_btf; btf->start_id = btf__type_cnt(base_btf); btf->start_str_off = base_btf->hdr->str_len; } int btf__relocate(struct btf *btf, const struct btf *base_btf) { int err = btf_relocate(btf, base_btf, NULL); if (!err) btf->owns_base = false; return libbpf_err(err); } xdp-tools-1.5.4/lib/libbpf/src/btf_iter.c0000644000175100001660000001005714706536574017576 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2021 Facebook */ /* Copyright (c) 2024, Oracle and/or its affiliates. */ #ifdef __KERNEL__ #include #include #define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t) #else #include "btf.h" #include "libbpf_internal.h" #endif int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind) { it->p = NULL; it->m_idx = -1; it->off_idx = 0; it->vlen = 0; switch (iter_kind) { case BTF_FIELD_ITER_IDS: switch (btf_kind(t)) { case BTF_KIND_UNKN: case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: it->desc = (struct btf_field_desc) {}; break; case BTF_KIND_FWD: case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: case BTF_KIND_TYPE_TAG: it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} }; break; case BTF_KIND_ARRAY: it->desc = (struct btf_field_desc) { 2, {sizeof(struct btf_type) + offsetof(struct btf_array, type), sizeof(struct btf_type) + offsetof(struct btf_array, index_type)} }; break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: it->desc = (struct btf_field_desc) { 0, {}, sizeof(struct btf_member), 1, {offsetof(struct btf_member, type)} }; break; case BTF_KIND_FUNC_PROTO: it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)}, sizeof(struct btf_param), 1, {offsetof(struct btf_param, type)} }; break; case BTF_KIND_DATASEC: it->desc = (struct btf_field_desc) { 0, {}, sizeof(struct btf_var_secinfo), 1, {offsetof(struct btf_var_secinfo, type)} }; break; default: return -EINVAL; } break; case BTF_FIELD_ITER_STRS: switch (btf_kind(t)) { case BTF_KIND_UNKN: it->desc = (struct btf_field_desc) {}; break; case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_FWD: case BTF_KIND_ARRAY: case BTF_KIND_CONST: case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: case BTF_KIND_TYPE_TAG: case BTF_KIND_DATASEC: it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, name_off)} }; break; case BTF_KIND_ENUM: it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, name_off)}, sizeof(struct btf_enum), 1, {offsetof(struct btf_enum, name_off)} }; break; case BTF_KIND_ENUM64: it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, name_off)}, sizeof(struct btf_enum64), 1, {offsetof(struct btf_enum64, name_off)} }; break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, name_off)}, sizeof(struct btf_member), 1, {offsetof(struct btf_member, name_off)} }; break; case BTF_KIND_FUNC_PROTO: it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, name_off)}, sizeof(struct btf_param), 1, {offsetof(struct btf_param, name_off)} }; break; default: return -EINVAL; } break; default: return -EINVAL; } if (it->desc.m_sz) it->vlen = btf_vlen(t); it->p = t; return 0; } __u32 *btf_field_iter_next(struct btf_field_iter *it) { if (!it->p) return NULL; if (it->m_idx < 0) { if (it->off_idx < it->desc.t_off_cnt) return it->p + it->desc.t_offs[it->off_idx++]; /* move to per-member iteration */ it->m_idx = 0; it->p += sizeof(struct btf_type); it->off_idx = 0; } /* if type doesn't have members, stop */ if (it->desc.m_sz == 0) { it->p = NULL; return NULL; } if (it->off_idx >= it->desc.m_off_cnt) { /* exhausted this member's fields, go to the next member */ it->m_idx++; it->p += it->desc.m_sz; it->off_idx = 0; } if (it->m_idx < it->vlen) return it->p + it->desc.m_offs[it->off_idx++]; it->p = NULL; return NULL; } xdp-tools-1.5.4/lib/libbpf/src/bpf_helpers.h0000644000175100001660000003707414706536574020306 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __BPF_HELPERS__ #define __BPF_HELPERS__ /* * Note that bpf programs need to include either * vmlinux.h (auto-generated from BTF) or linux/types.h * in advance since bpf_helper_defs.h uses such types * as __u64. */ #include "bpf_helper_defs.h" #define __uint(name, val) int (*name)[val] #define __type(name, val) typeof(val) *name #define __array(name, val) typeof(val) *name[] #define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name /* * Helper macro to place programs, maps, license in * different sections in elf_bpf file. Section names * are interpreted by libbpf depending on the context (BPF programs, BPF maps, * extern variables, etc). * To allow use of SEC() with externs (e.g., for extern .maps declarations), * make sure __attribute__((unused)) doesn't trigger compilation warning. */ #if __GNUC__ && !__clang__ /* * Pragma macros are broken on GCC * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400 */ #define SEC(name) __attribute__((section(name), used)) #else #define SEC(name) \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \ __attribute__((section(name), used)) \ _Pragma("GCC diagnostic pop") \ #endif /* Avoid 'linux/stddef.h' definition of '__always_inline'. */ #undef __always_inline #define __always_inline inline __attribute__((always_inline)) #ifndef __noinline #define __noinline __attribute__((noinline)) #endif #ifndef __weak #define __weak __attribute__((weak)) #endif /* * Use __hidden attribute to mark a non-static BPF subprogram effectively * static for BPF verifier's verification algorithm purposes, allowing more * extensive and permissive BPF verification process, taking into account * subprogram's caller context. */ #define __hidden __attribute__((visibility("hidden"))) /* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include * any system-level headers (such as stddef.h, linux/version.h, etc), and * commonly-used macros like NULL and KERNEL_VERSION aren't available through * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define * them on their own. So as a convenience, provide such definitions here. */ #ifndef NULL #define NULL ((void *)0) #endif #ifndef KERNEL_VERSION #define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c))) #endif /* * Helper macros to manipulate data structures */ /* offsetof() definition that uses __builtin_offset() might not preserve field * offset CO-RE relocation properly, so force-redefine offsetof() using * old-school approach which works with CO-RE correctly */ #undef offsetof #define offsetof(type, member) ((unsigned long)&((type *)0)->member) /* redefined container_of() to ensure we use the above offsetof() macro */ #undef container_of #define container_of(ptr, type, member) \ ({ \ void *__mptr = (void *)(ptr); \ ((type *)(__mptr - offsetof(type, member))); \ }) /* * Compiler (optimization) barrier. */ #ifndef barrier #define barrier() asm volatile("" ::: "memory") #endif /* Variable-specific compiler (optimization) barrier. It's a no-op which makes * compiler believe that there is some black box modification of a given * variable and thus prevents compiler from making extra assumption about its * value and potential simplifications and optimizations on this variable. * * E.g., compiler might often delay or even omit 32-bit to 64-bit casting of * a variable, making some code patterns unverifiable. Putting barrier_var() * in place will ensure that cast is performed before the barrier_var() * invocation, because compiler has to pessimistically assume that embedded * asm section might perform some extra operations on that variable. * * This is a variable-specific variant of more global barrier(). */ #ifndef barrier_var #define barrier_var(var) asm volatile("" : "+r"(var)) #endif /* * Helper macro to throw a compilation error if __bpf_unreachable() gets * built into the resulting code. This works given BPF back end does not * implement __builtin_trap(). This is useful to assert that certain paths * of the program code are never used and hence eliminated by the compiler. * * For example, consider a switch statement that covers known cases used by * the program. __bpf_unreachable() can then reside in the default case. If * the program gets extended such that a case is not covered in the switch * statement, then it will throw a build error due to the default case not * being compiled out. */ #ifndef __bpf_unreachable # define __bpf_unreachable() __builtin_trap() #endif /* * Helper function to perform a tail call with a constant/immediate map slot. */ #if (defined(__clang__) && __clang_major__ >= 8) || (!defined(__clang__) && __GNUC__ > 12) #if defined(__bpf__) static __always_inline void bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) { if (!__builtin_constant_p(slot)) __bpf_unreachable(); /* * Provide a hard guarantee that LLVM won't optimize setting r2 (map * pointer) and r3 (constant map index) from _different paths_ ending * up at the _same_ call insn as otherwise we won't be able to use the * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key * tracking for prog array pokes") for details on verifier tracking. * * Note on clobber list: we need to stay in-line with BPF calling * convention, so even if we don't end up using r0, r4, r5, we need * to mark them as clobber so that LLVM doesn't end up using them * before / after the call. */ asm volatile("r1 = %[ctx]\n\t" "r2 = %[map]\n\t" "r3 = %[slot]\n\t" "call 12" :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot) : "r0", "r1", "r2", "r3", "r4", "r5"); } #endif #endif enum libbpf_pin_type { LIBBPF_PIN_NONE, /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ LIBBPF_PIN_BY_NAME, }; enum libbpf_tristate { TRI_NO = 0, TRI_YES = 1, TRI_MODULE = 2, }; #define __kconfig __attribute__((section(".kconfig"))) #define __ksym __attribute__((section(".ksyms"))) #define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted"))) #define __kptr __attribute__((btf_type_tag("kptr"))) #define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr"))) #define __uptr __attribute__((btf_type_tag("uptr"))) #if defined (__clang__) #define bpf_ksym_exists(sym) ({ \ _Static_assert(!__builtin_constant_p(!!sym), \ #sym " should be marked as __weak"); \ !!sym; \ }) #elif __GNUC__ > 8 #define bpf_ksym_exists(sym) ({ \ _Static_assert(__builtin_has_attribute (*sym, __weak__), \ #sym " should be marked as __weak"); \ !!sym; \ }) #else #define bpf_ksym_exists(sym) !!sym #endif #define __arg_ctx __attribute__((btf_decl_tag("arg:ctx"))) #define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull"))) #define __arg_nullable __attribute((btf_decl_tag("arg:nullable"))) #define __arg_trusted __attribute((btf_decl_tag("arg:trusted"))) #define __arg_arena __attribute((btf_decl_tag("arg:arena"))) #ifndef ___bpf_concat #define ___bpf_concat(a, b) a ## b #endif #ifndef ___bpf_apply #define ___bpf_apply(fn, n) ___bpf_concat(fn, n) #endif #ifndef ___bpf_nth #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N #endif #ifndef ___bpf_narg #define ___bpf_narg(...) \ ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) #endif #define ___bpf_fill0(arr, p, x) do {} while (0) #define ___bpf_fill1(arr, p, x) arr[p] = x #define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args) #define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args) #define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args) #define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args) #define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args) #define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args) #define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args) #define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args) #define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args) #define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args) #define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args) #define ___bpf_fill(arr, args...) \ ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args) /* * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values * in a structure. */ #define BPF_SEQ_PRINTF(seq, fmt, args...) \ ({ \ static const char ___fmt[] = fmt; \ unsigned long long ___param[___bpf_narg(args)]; \ \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ ___bpf_fill(___param, args); \ _Pragma("GCC diagnostic pop") \ \ bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \ ___param, sizeof(___param)); \ }) /* * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of * an array of u64. */ #define BPF_SNPRINTF(out, out_size, fmt, args...) \ ({ \ static const char ___fmt[] = fmt; \ unsigned long long ___param[___bpf_narg(args)]; \ \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ ___bpf_fill(___param, args); \ _Pragma("GCC diagnostic pop") \ \ bpf_snprintf(out, out_size, ___fmt, \ ___param, sizeof(___param)); \ }) #ifdef BPF_NO_GLOBAL_DATA #define BPF_PRINTK_FMT_MOD #else #define BPF_PRINTK_FMT_MOD static const #endif #define __bpf_printk(fmt, ...) \ ({ \ BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \ bpf_trace_printk(____fmt, sizeof(____fmt), \ ##__VA_ARGS__); \ }) /* * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments * instead of an array of u64. */ #define __bpf_vprintk(fmt, args...) \ ({ \ static const char ___fmt[] = fmt; \ unsigned long long ___param[___bpf_narg(args)]; \ \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ ___bpf_fill(___param, args); \ _Pragma("GCC diagnostic pop") \ \ bpf_trace_vprintk(___fmt, sizeof(___fmt), \ ___param, sizeof(___param)); \ }) /* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args * Otherwise use __bpf_vprintk */ #define ___bpf_pick_printk(...) \ ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\ __bpf_printk /*1*/, __bpf_printk /*0*/) /* Helper macro to print out debug messages */ #define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) struct bpf_iter_num; extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym; extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym; extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym; #ifndef bpf_for_each /* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for * using BPF open-coded iterators without having to write mundane explicit * low-level loop logic. Instead, it provides for()-like generic construct * that can be used pretty naturally. E.g., for some hypothetical cgroup * iterator, you'd write: * * struct cgroup *cg, *parent_cg = <...>; * * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { * bpf_printk("Child cgroup id = %d", cg->cgroup_id); * if (cg->cgroup_id == 123) * break; * } * * I.e., it looks almost like high-level for each loop in other languages, * supports continue/break, and is verifiable by BPF verifier. * * For iterating integers, the difference between bpf_for_each(num, i, N, M) * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int * *`, not just `int`. So for integers bpf_for() is more convenient. * * Note: this macro relies on C99 feature of allowing to declare variables * inside for() loop, bound to for() loop lifetime. It also utilizes GCC * extension: __attribute__((cleanup())), supported by both GCC and * Clang. */ #define bpf_for_each(type, cur, args...) for ( \ /* initialize and define destructor */ \ struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ cleanup(bpf_iter_##type##_destroy))), \ /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ *___p __attribute__((unused)) = ( \ bpf_iter_##type##_new(&___it, ##args), \ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ (void)bpf_iter_##type##_destroy, (void *)0); \ /* iteration and termination check */ \ (((cur) = bpf_iter_##type##_next(&___it))); \ ) #endif /* bpf_for_each */ #ifndef bpf_for /* bpf_for(i, start, end) implements a for()-like looping construct that sets * provided integer variable *i* to values starting from *start* through, * but not including, *end*. It also proves to BPF verifier that *i* belongs * to range [start, end), so this can be used for accessing arrays without * extra checks. * * Note: *start* and *end* are assumed to be expressions with no side effects * and whose values do not change throughout bpf_for() loop execution. They do * not have to be statically known or constant, though. * * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() * loop bound variables and cleanup attribute, supported by GCC and Clang. */ #define bpf_for(i, start, end) for ( \ /* initialize and define destructor */ \ struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ cleanup(bpf_iter_num_destroy))), \ /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ *___p __attribute__((unused)) = ( \ bpf_iter_num_new(&___it, (start), (end)), \ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ (void)bpf_iter_num_destroy, (void *)0); \ ({ \ /* iteration step */ \ int *___t = bpf_iter_num_next(&___it); \ /* termination and bounds check */ \ (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ }); \ ) #endif /* bpf_for */ #ifndef bpf_repeat /* bpf_repeat(N) performs N iterations without exposing iteration number * * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() * loop bound variables and cleanup attribute, supported by GCC and Clang. */ #define bpf_repeat(N) for ( \ /* initialize and define destructor */ \ struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ cleanup(bpf_iter_num_destroy))), \ /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ *___p __attribute__((unused)) = ( \ bpf_iter_num_new(&___it, 0, (N)), \ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ (void)bpf_iter_num_destroy, (void *)0); \ bpf_iter_num_next(&___it); \ /* nothing here */ \ ) #endif /* bpf_repeat */ #endif xdp-tools-1.5.4/lib/libbpf/src/linker.c0000644000175100001660000023505714706536574017275 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * BPF static linker * * Copyright (c) 2021 Facebook */ #include #include #include #include #include #include #include #include #include #include #include #include #include "libbpf.h" #include "btf.h" #include "libbpf_internal.h" #include "strset.h" #define BTF_EXTERN_SEC ".extern" struct src_sec { const char *sec_name; /* positional (not necessarily ELF) index in an array of sections */ int id; /* positional (not necessarily ELF) index of a matching section in a final object file */ int dst_id; /* section data offset in a matching output section */ int dst_off; /* whether section is omitted from the final ELF file */ bool skipped; /* whether section is an ephemeral section, not mapped to an ELF section */ bool ephemeral; /* ELF info */ size_t sec_idx; Elf_Scn *scn; Elf64_Shdr *shdr; Elf_Data *data; /* corresponding BTF DATASEC type ID */ int sec_type_id; }; struct src_obj { const char *filename; int fd; Elf *elf; /* Section header strings section index */ size_t shstrs_sec_idx; /* SYMTAB section index */ size_t symtab_sec_idx; struct btf *btf; struct btf_ext *btf_ext; /* List of sections (including ephemeral). Slot zero is unused. */ struct src_sec *secs; int sec_cnt; /* mapping of symbol indices from src to dst ELF */ int *sym_map; /* mapping from the src BTF type IDs to dst ones */ int *btf_type_map; }; /* single .BTF.ext data section */ struct btf_ext_sec_data { size_t rec_cnt; __u32 rec_sz; void *recs; }; struct glob_sym { /* ELF symbol index */ int sym_idx; /* associated section id for .ksyms, .kconfig, etc, but not .extern */ int sec_id; /* extern name offset in STRTAB */ int name_off; /* optional associated BTF type ID */ int btf_id; /* BTF type ID to which VAR/FUNC type is pointing to; used for * rewriting types when extern VAR/FUNC is resolved to a concrete * definition */ int underlying_btf_id; /* sec_var index in the corresponding dst_sec, if exists */ int var_idx; /* extern or resolved/global symbol */ bool is_extern; /* weak or strong symbol, never goes back from strong to weak */ bool is_weak; }; struct dst_sec { char *sec_name; /* positional (not necessarily ELF) index in an array of sections */ int id; bool ephemeral; /* ELF info */ size_t sec_idx; Elf_Scn *scn; Elf64_Shdr *shdr; Elf_Data *data; /* final output section size */ int sec_sz; /* final output contents of the section */ void *raw_data; /* corresponding STT_SECTION symbol index in SYMTAB */ int sec_sym_idx; /* section's DATASEC variable info, emitted on BTF finalization */ bool has_btf; int sec_var_cnt; struct btf_var_secinfo *sec_vars; /* section's .BTF.ext data */ struct btf_ext_sec_data func_info; struct btf_ext_sec_data line_info; struct btf_ext_sec_data core_relo_info; }; struct bpf_linker { char *filename; int fd; Elf *elf; Elf64_Ehdr *elf_hdr; bool swapped_endian; /* Output sections metadata */ struct dst_sec *secs; int sec_cnt; struct strset *strtab_strs; /* STRTAB unique strings */ size_t strtab_sec_idx; /* STRTAB section index */ size_t symtab_sec_idx; /* SYMTAB section index */ struct btf *btf; struct btf_ext *btf_ext; /* global (including extern) ELF symbols */ int glob_sym_cnt; struct glob_sym *glob_syms; }; #define pr_warn_elf(fmt, ...) \ libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1)) static int init_output_elf(struct bpf_linker *linker, const char *file); static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, const struct bpf_linker_file_opts *opts, struct src_obj *obj); static int linker_sanity_check_elf(struct src_obj *obj); static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec); static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec); static int linker_sanity_check_btf(struct src_obj *obj); static int linker_sanity_check_btf_ext(struct src_obj *obj); static int linker_fixup_btf(struct src_obj *obj); static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj); static int linker_append_elf_syms(struct bpf_linker *linker, struct src_obj *obj); static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj, Elf64_Sym *sym, const char *sym_name, int src_sym_idx); static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj); static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj); static int linker_append_btf_ext(struct bpf_linker *linker, struct src_obj *obj); static int finalize_btf(struct bpf_linker *linker); static int finalize_btf_ext(struct bpf_linker *linker); void bpf_linker__free(struct bpf_linker *linker) { int i; if (!linker) return; free(linker->filename); if (linker->elf) elf_end(linker->elf); if (linker->fd >= 0) close(linker->fd); strset__free(linker->strtab_strs); btf__free(linker->btf); btf_ext__free(linker->btf_ext); for (i = 1; i < linker->sec_cnt; i++) { struct dst_sec *sec = &linker->secs[i]; free(sec->sec_name); free(sec->raw_data); free(sec->sec_vars); free(sec->func_info.recs); free(sec->line_info.recs); free(sec->core_relo_info.recs); } free(linker->secs); free(linker->glob_syms); free(linker); } struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts) { struct bpf_linker *linker; int err; if (!OPTS_VALID(opts, bpf_linker_opts)) return errno = EINVAL, NULL; if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn_elf("libelf initialization failed"); return errno = EINVAL, NULL; } linker = calloc(1, sizeof(*linker)); if (!linker) return errno = ENOMEM, NULL; linker->fd = -1; err = init_output_elf(linker, filename); if (err) goto err_out; return linker; err_out: bpf_linker__free(linker); return errno = -err, NULL; } static struct dst_sec *add_dst_sec(struct bpf_linker *linker, const char *sec_name) { struct dst_sec *secs = linker->secs, *sec; size_t new_cnt = linker->sec_cnt ? linker->sec_cnt + 1 : 2; secs = libbpf_reallocarray(secs, new_cnt, sizeof(*secs)); if (!secs) return NULL; /* zero out newly allocated memory */ memset(secs + linker->sec_cnt, 0, (new_cnt - linker->sec_cnt) * sizeof(*secs)); linker->secs = secs; linker->sec_cnt = new_cnt; sec = &linker->secs[new_cnt - 1]; sec->id = new_cnt - 1; sec->sec_name = strdup(sec_name); if (!sec->sec_name) return NULL; return sec; } static Elf64_Sym *add_new_sym(struct bpf_linker *linker, size_t *sym_idx) { struct dst_sec *symtab = &linker->secs[linker->symtab_sec_idx]; Elf64_Sym *syms, *sym; size_t sym_cnt = symtab->sec_sz / sizeof(*sym); syms = libbpf_reallocarray(symtab->raw_data, sym_cnt + 1, sizeof(*sym)); if (!syms) return NULL; sym = &syms[sym_cnt]; memset(sym, 0, sizeof(*sym)); symtab->raw_data = syms; symtab->sec_sz += sizeof(*sym); symtab->shdr->sh_size += sizeof(*sym); symtab->data->d_size += sizeof(*sym); if (sym_idx) *sym_idx = sym_cnt; return sym; } static int init_output_elf(struct bpf_linker *linker, const char *file) { int err, str_off; Elf64_Sym *init_sym; struct dst_sec *sec; linker->filename = strdup(file); if (!linker->filename) return -ENOMEM; linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644); if (linker->fd < 0) { err = -errno; pr_warn("failed to create '%s': %d\n", file, err); return err; } linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL); if (!linker->elf) { pr_warn_elf("failed to create ELF object"); return -EINVAL; } /* ELF header */ linker->elf_hdr = elf64_newehdr(linker->elf); if (!linker->elf_hdr) { pr_warn_elf("failed to create ELF header"); return -EINVAL; } linker->elf_hdr->e_machine = EM_BPF; linker->elf_hdr->e_type = ET_REL; /* Set unknown ELF endianness, assign later from input files */ linker->elf_hdr->e_ident[EI_DATA] = ELFDATANONE; /* STRTAB */ /* initialize strset with an empty string to conform to ELF */ linker->strtab_strs = strset__new(INT_MAX, "", sizeof("")); if (libbpf_get_error(linker->strtab_strs)) return libbpf_get_error(linker->strtab_strs); sec = add_dst_sec(linker, ".strtab"); if (!sec) return -ENOMEM; sec->scn = elf_newscn(linker->elf); if (!sec->scn) { pr_warn_elf("failed to create STRTAB section"); return -EINVAL; } sec->shdr = elf64_getshdr(sec->scn); if (!sec->shdr) return -EINVAL; sec->data = elf_newdata(sec->scn); if (!sec->data) { pr_warn_elf("failed to create STRTAB data"); return -EINVAL; } str_off = strset__add_str(linker->strtab_strs, sec->sec_name); if (str_off < 0) return str_off; sec->sec_idx = elf_ndxscn(sec->scn); linker->elf_hdr->e_shstrndx = sec->sec_idx; linker->strtab_sec_idx = sec->sec_idx; sec->shdr->sh_name = str_off; sec->shdr->sh_type = SHT_STRTAB; sec->shdr->sh_flags = SHF_STRINGS; sec->shdr->sh_offset = 0; sec->shdr->sh_link = 0; sec->shdr->sh_info = 0; sec->shdr->sh_addralign = 1; sec->shdr->sh_size = sec->sec_sz = 0; sec->shdr->sh_entsize = 0; /* SYMTAB */ sec = add_dst_sec(linker, ".symtab"); if (!sec) return -ENOMEM; sec->scn = elf_newscn(linker->elf); if (!sec->scn) { pr_warn_elf("failed to create SYMTAB section"); return -EINVAL; } sec->shdr = elf64_getshdr(sec->scn); if (!sec->shdr) return -EINVAL; sec->data = elf_newdata(sec->scn); if (!sec->data) { pr_warn_elf("failed to create SYMTAB data"); return -EINVAL; } /* Ensure libelf translates byte-order of symbol records */ sec->data->d_type = ELF_T_SYM; str_off = strset__add_str(linker->strtab_strs, sec->sec_name); if (str_off < 0) return str_off; sec->sec_idx = elf_ndxscn(sec->scn); linker->symtab_sec_idx = sec->sec_idx; sec->shdr->sh_name = str_off; sec->shdr->sh_type = SHT_SYMTAB; sec->shdr->sh_flags = 0; sec->shdr->sh_offset = 0; sec->shdr->sh_link = linker->strtab_sec_idx; /* sh_info should be one greater than the index of the last local * symbol (i.e., binding is STB_LOCAL). But why and who cares? */ sec->shdr->sh_info = 0; sec->shdr->sh_addralign = 8; sec->shdr->sh_entsize = sizeof(Elf64_Sym); /* .BTF */ linker->btf = btf__new_empty(); err = libbpf_get_error(linker->btf); if (err) return err; /* add the special all-zero symbol */ init_sym = add_new_sym(linker, NULL); if (!init_sym) return -EINVAL; init_sym->st_name = 0; init_sym->st_info = 0; init_sym->st_other = 0; init_sym->st_shndx = SHN_UNDEF; init_sym->st_value = 0; init_sym->st_size = 0; return 0; } int bpf_linker__add_file(struct bpf_linker *linker, const char *filename, const struct bpf_linker_file_opts *opts) { struct src_obj obj = {}; int err = 0; if (!OPTS_VALID(opts, bpf_linker_file_opts)) return libbpf_err(-EINVAL); if (!linker->elf) return libbpf_err(-EINVAL); err = err ?: linker_load_obj_file(linker, filename, opts, &obj); err = err ?: linker_append_sec_data(linker, &obj); err = err ?: linker_append_elf_syms(linker, &obj); err = err ?: linker_append_elf_relos(linker, &obj); err = err ?: linker_append_btf(linker, &obj); err = err ?: linker_append_btf_ext(linker, &obj); /* free up src_obj resources */ free(obj.btf_type_map); btf__free(obj.btf); btf_ext__free(obj.btf_ext); free(obj.secs); free(obj.sym_map); if (obj.elf) elf_end(obj.elf); if (obj.fd >= 0) close(obj.fd); return libbpf_err(err); } static bool is_dwarf_sec_name(const char *name) { /* approximation, but the actual list is too long */ return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; } static bool is_ignored_sec(struct src_sec *sec) { Elf64_Shdr *shdr = sec->shdr; const char *name = sec->sec_name; /* no special handling of .strtab */ if (shdr->sh_type == SHT_STRTAB) return true; /* ignore .llvm_addrsig section as well */ if (shdr->sh_type == SHT_LLVM_ADDRSIG) return true; /* no subprograms will lead to an empty .text section, ignore it */ if (shdr->sh_type == SHT_PROGBITS && shdr->sh_size == 0 && strcmp(sec->sec_name, ".text") == 0) return true; /* DWARF sections */ if (is_dwarf_sec_name(sec->sec_name)) return true; if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { name += sizeof(".rel") - 1; /* DWARF section relocations */ if (is_dwarf_sec_name(name)) return true; /* .BTF and .BTF.ext don't need relocations */ if (strcmp(name, BTF_ELF_SEC) == 0 || strcmp(name, BTF_EXT_ELF_SEC) == 0) return true; } return false; } static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name) { struct src_sec *secs = obj->secs, *sec; size_t new_cnt = obj->sec_cnt ? obj->sec_cnt + 1 : 2; secs = libbpf_reallocarray(secs, new_cnt, sizeof(*secs)); if (!secs) return NULL; /* zero out newly allocated memory */ memset(secs + obj->sec_cnt, 0, (new_cnt - obj->sec_cnt) * sizeof(*secs)); obj->secs = secs; obj->sec_cnt = new_cnt; sec = &obj->secs[new_cnt - 1]; sec->id = new_cnt - 1; sec->sec_name = sec_name; return sec; } static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, const struct bpf_linker_file_opts *opts, struct src_obj *obj) { int err = 0; Elf_Scn *scn; Elf_Data *data; Elf64_Ehdr *ehdr; Elf64_Shdr *shdr; struct src_sec *sec; unsigned char obj_byteorder; unsigned char link_byteorder = linker->elf_hdr->e_ident[EI_DATA]; #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ const unsigned char host_byteorder = ELFDATA2LSB; #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ const unsigned char host_byteorder = ELFDATA2MSB; #else #error "Unknown __BYTE_ORDER__" #endif pr_debug("linker: adding object file '%s'...\n", filename); obj->filename = filename; obj->fd = open(filename, O_RDONLY | O_CLOEXEC); if (obj->fd < 0) { err = -errno; pr_warn("failed to open file '%s': %d\n", filename, err); return err; } obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL); if (!obj->elf) { err = -errno; pr_warn_elf("failed to parse ELF file '%s'", filename); return err; } /* Sanity check ELF file high-level properties */ ehdr = elf64_getehdr(obj->elf); if (!ehdr) { err = -errno; pr_warn_elf("failed to get ELF header for %s", filename); return err; } /* Linker output endianness set by first input object */ obj_byteorder = ehdr->e_ident[EI_DATA]; if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) { err = -EOPNOTSUPP; pr_warn("unknown byte order of ELF file %s\n", filename); return err; } if (link_byteorder == ELFDATANONE) { linker->elf_hdr->e_ident[EI_DATA] = obj_byteorder; linker->swapped_endian = obj_byteorder != host_byteorder; pr_debug("linker: set %s-endian output byte order\n", obj_byteorder == ELFDATA2MSB ? "big" : "little"); } else if (link_byteorder != obj_byteorder) { err = -EOPNOTSUPP; pr_warn("byte order mismatch with ELF file %s\n", filename); return err; } if (ehdr->e_type != ET_REL || ehdr->e_machine != EM_BPF || ehdr->e_ident[EI_CLASS] != ELFCLASS64) { err = -EOPNOTSUPP; pr_warn_elf("unsupported kind of ELF file %s", filename); return err; } if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) { err = -errno; pr_warn_elf("failed to get SHSTRTAB section index for %s", filename); return err; } scn = NULL; while ((scn = elf_nextscn(obj->elf, scn)) != NULL) { size_t sec_idx = elf_ndxscn(scn); const char *sec_name; shdr = elf64_getshdr(scn); if (!shdr) { err = -errno; pr_warn_elf("failed to get section #%zu header for %s", sec_idx, filename); return err; } sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name); if (!sec_name) { err = -errno; pr_warn_elf("failed to get section #%zu name for %s", sec_idx, filename); return err; } data = elf_getdata(scn, 0); if (!data) { err = -errno; pr_warn_elf("failed to get section #%zu (%s) data from %s", sec_idx, sec_name, filename); return err; } sec = add_src_sec(obj, sec_name); if (!sec) return -ENOMEM; sec->scn = scn; sec->shdr = shdr; sec->data = data; sec->sec_idx = elf_ndxscn(scn); if (is_ignored_sec(sec)) { sec->skipped = true; continue; } switch (shdr->sh_type) { case SHT_SYMTAB: if (obj->symtab_sec_idx) { err = -EOPNOTSUPP; pr_warn("multiple SYMTAB sections found, not supported\n"); return err; } obj->symtab_sec_idx = sec_idx; break; case SHT_STRTAB: /* we'll construct our own string table */ break; case SHT_PROGBITS: if (strcmp(sec_name, BTF_ELF_SEC) == 0) { obj->btf = btf__new(data->d_buf, shdr->sh_size); err = libbpf_get_error(obj->btf); if (err) { pr_warn("failed to parse .BTF from %s: %d\n", filename, err); return err; } sec->skipped = true; continue; } if (strcmp(sec_name, BTF_EXT_ELF_SEC) == 0) { obj->btf_ext = btf_ext__new(data->d_buf, shdr->sh_size); err = libbpf_get_error(obj->btf_ext); if (err) { pr_warn("failed to parse .BTF.ext from '%s': %d\n", filename, err); return err; } sec->skipped = true; continue; } /* data & code */ break; case SHT_NOBITS: /* BSS */ break; case SHT_REL: /* relocations */ break; default: pr_warn("unrecognized section #%zu (%s) in %s\n", sec_idx, sec_name, filename); err = -EINVAL; return err; } } err = err ?: linker_sanity_check_elf(obj); err = err ?: linker_sanity_check_btf(obj); err = err ?: linker_sanity_check_btf_ext(obj); err = err ?: linker_fixup_btf(obj); return err; } static int linker_sanity_check_elf(struct src_obj *obj) { struct src_sec *sec; int i, err; if (!obj->symtab_sec_idx) { pr_warn("ELF is missing SYMTAB section in %s\n", obj->filename); return -EINVAL; } if (!obj->shstrs_sec_idx) { pr_warn("ELF is missing section headers STRTAB section in %s\n", obj->filename); return -EINVAL; } for (i = 1; i < obj->sec_cnt; i++) { sec = &obj->secs[i]; if (sec->sec_name[0] == '\0') { pr_warn("ELF section #%zu has empty name in %s\n", sec->sec_idx, obj->filename); return -EINVAL; } if (is_dwarf_sec_name(sec->sec_name)) continue; if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign)) { pr_warn("ELF section #%zu alignment %llu is non pow-of-2 alignment in %s\n", sec->sec_idx, (long long unsigned)sec->shdr->sh_addralign, obj->filename); return -EINVAL; } if (sec->shdr->sh_addralign != sec->data->d_align) { pr_warn("ELF section #%zu has inconsistent alignment addr=%llu != d=%llu in %s\n", sec->sec_idx, (long long unsigned)sec->shdr->sh_addralign, (long long unsigned)sec->data->d_align, obj->filename); return -EINVAL; } if (sec->shdr->sh_size != sec->data->d_size) { pr_warn("ELF section #%zu has inconsistent section size sh=%llu != d=%llu in %s\n", sec->sec_idx, (long long unsigned)sec->shdr->sh_size, (long long unsigned)sec->data->d_size, obj->filename); return -EINVAL; } switch (sec->shdr->sh_type) { case SHT_SYMTAB: err = linker_sanity_check_elf_symtab(obj, sec); if (err) return err; break; case SHT_STRTAB: break; case SHT_PROGBITS: if (sec->shdr->sh_flags & SHF_EXECINSTR) { if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0) { pr_warn("ELF section #%zu has unexpected size alignment %llu in %s\n", sec->sec_idx, (long long unsigned)sec->shdr->sh_size, obj->filename); return -EINVAL; } } break; case SHT_NOBITS: break; case SHT_REL: err = linker_sanity_check_elf_relos(obj, sec); if (err) return err; break; case SHT_LLVM_ADDRSIG: break; default: pr_warn("ELF section #%zu (%s) has unrecognized type %zu in %s\n", sec->sec_idx, sec->sec_name, (size_t)sec->shdr->sh_type, obj->filename); return -EINVAL; } } return 0; } static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec) { struct src_sec *link_sec; Elf64_Sym *sym; int i, n; if (sec->shdr->sh_entsize != sizeof(Elf64_Sym)) return -EINVAL; if (sec->shdr->sh_size % sec->shdr->sh_entsize != 0) return -EINVAL; if (!sec->shdr->sh_link || sec->shdr->sh_link >= obj->sec_cnt) { pr_warn("ELF SYMTAB section #%zu points to missing STRTAB section #%zu in %s\n", sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename); return -EINVAL; } link_sec = &obj->secs[sec->shdr->sh_link]; if (link_sec->shdr->sh_type != SHT_STRTAB) { pr_warn("ELF SYMTAB section #%zu points to invalid STRTAB section #%zu in %s\n", sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename); return -EINVAL; } n = sec->shdr->sh_size / sec->shdr->sh_entsize; sym = sec->data->d_buf; for (i = 0; i < n; i++, sym++) { int sym_type = ELF64_ST_TYPE(sym->st_info); int sym_bind = ELF64_ST_BIND(sym->st_info); int sym_vis = ELF64_ST_VISIBILITY(sym->st_other); if (i == 0) { if (sym->st_name != 0 || sym->st_info != 0 || sym->st_other != 0 || sym->st_shndx != 0 || sym->st_value != 0 || sym->st_size != 0) { pr_warn("ELF sym #0 is invalid in %s\n", obj->filename); return -EINVAL; } continue; } if (sym_bind != STB_LOCAL && sym_bind != STB_GLOBAL && sym_bind != STB_WEAK) { pr_warn("ELF sym #%d in section #%zu has unsupported symbol binding %d\n", i, sec->sec_idx, sym_bind); return -EINVAL; } if (sym_vis != STV_DEFAULT && sym_vis != STV_HIDDEN) { pr_warn("ELF sym #%d in section #%zu has unsupported symbol visibility %d\n", i, sec->sec_idx, sym_vis); return -EINVAL; } if (sym->st_shndx == 0) { if (sym_type != STT_NOTYPE || sym_bind == STB_LOCAL || sym->st_value != 0 || sym->st_size != 0) { pr_warn("ELF sym #%d is invalid extern symbol in %s\n", i, obj->filename); return -EINVAL; } continue; } if (sym->st_shndx < SHN_LORESERVE && sym->st_shndx >= obj->sec_cnt) { pr_warn("ELF sym #%d in section #%zu points to missing section #%zu in %s\n", i, sec->sec_idx, (size_t)sym->st_shndx, obj->filename); return -EINVAL; } if (sym_type == STT_SECTION) { if (sym->st_value != 0) return -EINVAL; continue; } } return 0; } static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec) { struct src_sec *link_sec, *sym_sec; Elf64_Rel *relo; int i, n; if (sec->shdr->sh_entsize != sizeof(Elf64_Rel)) return -EINVAL; if (sec->shdr->sh_size % sec->shdr->sh_entsize != 0) return -EINVAL; /* SHT_REL's sh_link should point to SYMTAB */ if (sec->shdr->sh_link != obj->symtab_sec_idx) { pr_warn("ELF relo section #%zu points to invalid SYMTAB section #%zu in %s\n", sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename); return -EINVAL; } /* SHT_REL's sh_info points to relocated section */ if (!sec->shdr->sh_info || sec->shdr->sh_info >= obj->sec_cnt) { pr_warn("ELF relo section #%zu points to missing section #%zu in %s\n", sec->sec_idx, (size_t)sec->shdr->sh_info, obj->filename); return -EINVAL; } link_sec = &obj->secs[sec->shdr->sh_info]; /* .rel -> pattern is followed */ if (strncmp(sec->sec_name, ".rel", sizeof(".rel") - 1) != 0 || strcmp(sec->sec_name + sizeof(".rel") - 1, link_sec->sec_name) != 0) { pr_warn("ELF relo section #%zu name has invalid name in %s\n", sec->sec_idx, obj->filename); return -EINVAL; } /* don't further validate relocations for ignored sections */ if (link_sec->skipped) return 0; /* relocatable section is data or instructions */ if (link_sec->shdr->sh_type != SHT_PROGBITS && link_sec->shdr->sh_type != SHT_NOBITS) { pr_warn("ELF relo section #%zu points to invalid section #%zu in %s\n", sec->sec_idx, (size_t)sec->shdr->sh_info, obj->filename); return -EINVAL; } /* check sanity of each relocation */ n = sec->shdr->sh_size / sec->shdr->sh_entsize; relo = sec->data->d_buf; sym_sec = &obj->secs[obj->symtab_sec_idx]; for (i = 0; i < n; i++, relo++) { size_t sym_idx = ELF64_R_SYM(relo->r_info); size_t sym_type = ELF64_R_TYPE(relo->r_info); if (sym_type != R_BPF_64_64 && sym_type != R_BPF_64_32 && sym_type != R_BPF_64_ABS64 && sym_type != R_BPF_64_ABS32) { pr_warn("ELF relo #%d in section #%zu has unexpected type %zu in %s\n", i, sec->sec_idx, sym_type, obj->filename); return -EINVAL; } if (!sym_idx || sym_idx * sizeof(Elf64_Sym) >= sym_sec->shdr->sh_size) { pr_warn("ELF relo #%d in section #%zu points to invalid symbol #%zu in %s\n", i, sec->sec_idx, sym_idx, obj->filename); return -EINVAL; } if (link_sec->shdr->sh_flags & SHF_EXECINSTR) { if (relo->r_offset % sizeof(struct bpf_insn) != 0) { pr_warn("ELF relo #%d in section #%zu points to missing symbol #%zu in %s\n", i, sec->sec_idx, sym_idx, obj->filename); return -EINVAL; } } } return 0; } static int check_btf_type_id(__u32 *type_id, void *ctx) { struct btf *btf = ctx; if (*type_id >= btf__type_cnt(btf)) return -EINVAL; return 0; } static int check_btf_str_off(__u32 *str_off, void *ctx) { struct btf *btf = ctx; const char *s; s = btf__str_by_offset(btf, *str_off); if (!s) return -EINVAL; return 0; } static int linker_sanity_check_btf(struct src_obj *obj) { struct btf_type *t; int i, n, err; if (!obj->btf) return 0; n = btf__type_cnt(obj->btf); for (i = 1; i < n; i++) { struct btf_field_iter it; __u32 *type_id, *str_off; t = btf_type_by_id(obj->btf, i); err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (err) return err; while ((type_id = btf_field_iter_next(&it))) { if (*type_id >= n) return -EINVAL; } err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); if (err) return err; while ((str_off = btf_field_iter_next(&it))) { if (!btf__str_by_offset(obj->btf, *str_off)) return -EINVAL; } } return 0; } static int linker_sanity_check_btf_ext(struct src_obj *obj) { int err = 0; if (!obj->btf_ext) return 0; /* can't use .BTF.ext without .BTF */ if (!obj->btf) return -EINVAL; err = err ?: btf_ext_visit_type_ids(obj->btf_ext, check_btf_type_id, obj->btf); err = err ?: btf_ext_visit_str_offs(obj->btf_ext, check_btf_str_off, obj->btf); if (err) return err; return 0; } static int init_sec(struct bpf_linker *linker, struct dst_sec *dst_sec, struct src_sec *src_sec) { Elf_Scn *scn; Elf_Data *data; Elf64_Shdr *shdr; int name_off; dst_sec->sec_sz = 0; dst_sec->sec_idx = 0; dst_sec->ephemeral = src_sec->ephemeral; /* ephemeral sections are just thin section shells lacking most parts */ if (src_sec->ephemeral) return 0; scn = elf_newscn(linker->elf); if (!scn) return -ENOMEM; data = elf_newdata(scn); if (!data) return -ENOMEM; shdr = elf64_getshdr(scn); if (!shdr) return -ENOMEM; dst_sec->scn = scn; dst_sec->shdr = shdr; dst_sec->data = data; dst_sec->sec_idx = elf_ndxscn(scn); name_off = strset__add_str(linker->strtab_strs, src_sec->sec_name); if (name_off < 0) return name_off; shdr->sh_name = name_off; shdr->sh_type = src_sec->shdr->sh_type; shdr->sh_flags = src_sec->shdr->sh_flags; shdr->sh_size = 0; /* sh_link and sh_info have different meaning for different types of * sections, so we leave it up to the caller code to fill them in, if * necessary */ shdr->sh_link = 0; shdr->sh_info = 0; shdr->sh_addralign = src_sec->shdr->sh_addralign; shdr->sh_entsize = src_sec->shdr->sh_entsize; data->d_type = src_sec->data->d_type; data->d_size = 0; data->d_buf = NULL; data->d_align = src_sec->data->d_align; data->d_off = 0; return 0; } static struct dst_sec *find_dst_sec_by_name(struct bpf_linker *linker, const char *sec_name) { struct dst_sec *sec; int i; for (i = 1; i < linker->sec_cnt; i++) { sec = &linker->secs[i]; if (strcmp(sec->sec_name, sec_name) == 0) return sec; } return NULL; } static bool secs_match(struct dst_sec *dst, struct src_sec *src) { if (dst->ephemeral || src->ephemeral) return true; if (dst->shdr->sh_type != src->shdr->sh_type) { pr_warn("sec %s types mismatch\n", dst->sec_name); return false; } if (dst->shdr->sh_flags != src->shdr->sh_flags) { pr_warn("sec %s flags mismatch\n", dst->sec_name); return false; } if (dst->shdr->sh_entsize != src->shdr->sh_entsize) { pr_warn("sec %s entsize mismatch\n", dst->sec_name); return false; } return true; } static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec) { if (dst_sec->sec_sz != src_sec->shdr->sh_size) return false; if (memcmp(dst_sec->raw_data, src_sec->data->d_buf, dst_sec->sec_sz) != 0) return false; return true; } static bool is_exec_sec(struct dst_sec *sec) { if (!sec || sec->ephemeral) return false; return (sec->shdr->sh_type == SHT_PROGBITS) && (sec->shdr->sh_flags & SHF_EXECINSTR); } static void exec_sec_bswap(void *raw_data, int size) { const int insn_cnt = size / sizeof(struct bpf_insn); struct bpf_insn *insn = raw_data; int i; for (i = 0; i < insn_cnt; i++, insn++) bpf_insn_bswap(insn); } static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src) { void *tmp; size_t dst_align, src_align; size_t dst_align_sz, dst_final_sz; int err; /* Ephemeral source section doesn't contribute anything to ELF * section data. */ if (src->ephemeral) return 0; /* Some sections (like .maps) can contain both externs (and thus be * ephemeral) and non-externs (map definitions). So it's possible that * it has to be "upgraded" from ephemeral to non-ephemeral when the * first non-ephemeral entity appears. In such case, we add ELF * section, data, etc. */ if (dst->ephemeral) { err = init_sec(linker, dst, src); if (err) return err; } dst_align = dst->shdr->sh_addralign; src_align = src->shdr->sh_addralign; if (dst_align == 0) dst_align = 1; if (dst_align < src_align) dst_align = src_align; dst_align_sz = (dst->sec_sz + dst_align - 1) / dst_align * dst_align; /* no need to re-align final size */ dst_final_sz = dst_align_sz + src->shdr->sh_size; if (src->shdr->sh_type != SHT_NOBITS) { tmp = realloc(dst->raw_data, dst_final_sz); /* If dst_align_sz == 0, realloc() behaves in a special way: * 1. When dst->raw_data is NULL it returns: * "either NULL or a pointer suitable to be passed to free()" [1]. * 2. When dst->raw_data is not-NULL it frees dst->raw_data and returns NULL, * thus invalidating any "pointer suitable to be passed to free()" obtained * at step (1). * * The dst_align_sz > 0 check avoids error exit after (2), otherwise * dst->raw_data would be freed again in bpf_linker__free(). * * [1] man 3 realloc */ if (!tmp && dst_align_sz > 0) return -ENOMEM; dst->raw_data = tmp; /* pad dst section, if it's alignment forced size increase */ memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz); /* now copy src data at a properly aligned offset */ memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size); /* convert added bpf insns to native byte-order */ if (linker->swapped_endian && is_exec_sec(dst)) exec_sec_bswap(dst->raw_data + dst_align_sz, src->shdr->sh_size); } dst->sec_sz = dst_final_sz; dst->shdr->sh_size = dst_final_sz; dst->data->d_size = dst_final_sz; dst->shdr->sh_addralign = dst_align; dst->data->d_align = dst_align; src->dst_off = dst_align_sz; return 0; } static bool is_data_sec(struct src_sec *sec) { if (!sec || sec->skipped) return false; /* ephemeral sections are data sections, e.g., .kconfig, .ksyms */ if (sec->ephemeral) return true; return sec->shdr->sh_type == SHT_PROGBITS || sec->shdr->sh_type == SHT_NOBITS; } static bool is_relo_sec(struct src_sec *sec) { if (!sec || sec->skipped || sec->ephemeral) return false; return sec->shdr->sh_type == SHT_REL; } static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj) { int i, err; for (i = 1; i < obj->sec_cnt; i++) { struct src_sec *src_sec; struct dst_sec *dst_sec; src_sec = &obj->secs[i]; if (!is_data_sec(src_sec)) continue; dst_sec = find_dst_sec_by_name(linker, src_sec->sec_name); if (!dst_sec) { dst_sec = add_dst_sec(linker, src_sec->sec_name); if (!dst_sec) return -ENOMEM; err = init_sec(linker, dst_sec, src_sec); if (err) { pr_warn("failed to init section '%s'\n", src_sec->sec_name); return err; } } else { if (!secs_match(dst_sec, src_sec)) { pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name); return -1; } /* "license" and "version" sections are deduped */ if (strcmp(src_sec->sec_name, "license") == 0 || strcmp(src_sec->sec_name, "version") == 0) { if (!sec_content_is_same(dst_sec, src_sec)) { pr_warn("non-identical contents of section '%s' are not supported\n", src_sec->sec_name); return -EINVAL; } src_sec->skipped = true; src_sec->dst_id = dst_sec->id; continue; } } /* record mapped section index */ src_sec->dst_id = dst_sec->id; err = extend_sec(linker, dst_sec, src_sec); if (err) return err; } return 0; } static int linker_append_elf_syms(struct bpf_linker *linker, struct src_obj *obj) { struct src_sec *symtab = &obj->secs[obj->symtab_sec_idx]; Elf64_Sym *sym = symtab->data->d_buf; int i, n = symtab->shdr->sh_size / symtab->shdr->sh_entsize, err; int str_sec_idx = symtab->shdr->sh_link; const char *sym_name; obj->sym_map = calloc(n + 1, sizeof(*obj->sym_map)); if (!obj->sym_map) return -ENOMEM; for (i = 0; i < n; i++, sym++) { /* We already validated all-zero symbol #0 and we already * appended it preventively to the final SYMTAB, so skip it. */ if (i == 0) continue; sym_name = elf_strptr(obj->elf, str_sec_idx, sym->st_name); if (!sym_name) { pr_warn("can't fetch symbol name for symbol #%d in '%s'\n", i, obj->filename); return -EINVAL; } err = linker_append_elf_sym(linker, obj, sym, sym_name, i); if (err) return err; } return 0; } static Elf64_Sym *get_sym_by_idx(struct bpf_linker *linker, size_t sym_idx) { struct dst_sec *symtab = &linker->secs[linker->symtab_sec_idx]; Elf64_Sym *syms = symtab->raw_data; return &syms[sym_idx]; } static struct glob_sym *find_glob_sym(struct bpf_linker *linker, const char *sym_name) { struct glob_sym *glob_sym; const char *name; int i; for (i = 0; i < linker->glob_sym_cnt; i++) { glob_sym = &linker->glob_syms[i]; name = strset__data(linker->strtab_strs) + glob_sym->name_off; if (strcmp(name, sym_name) == 0) return glob_sym; } return NULL; } static struct glob_sym *add_glob_sym(struct bpf_linker *linker) { struct glob_sym *syms, *sym; syms = libbpf_reallocarray(linker->glob_syms, linker->glob_sym_cnt + 1, sizeof(*linker->glob_syms)); if (!syms) return NULL; sym = &syms[linker->glob_sym_cnt]; memset(sym, 0, sizeof(*sym)); sym->var_idx = -1; linker->glob_syms = syms; linker->glob_sym_cnt++; return sym; } static bool glob_sym_btf_matches(const char *sym_name, bool exact, const struct btf *btf1, __u32 id1, const struct btf *btf2, __u32 id2) { const struct btf_type *t1, *t2; bool is_static1, is_static2; const char *n1, *n2; int i, n; recur: n1 = n2 = NULL; t1 = skip_mods_and_typedefs(btf1, id1, &id1); t2 = skip_mods_and_typedefs(btf2, id2, &id2); /* check if only one side is FWD, otherwise handle with common logic */ if (!exact && btf_is_fwd(t1) != btf_is_fwd(t2)) { n1 = btf__str_by_offset(btf1, t1->name_off); n2 = btf__str_by_offset(btf2, t2->name_off); if (strcmp(n1, n2) != 0) { pr_warn("global '%s': incompatible forward declaration names '%s' and '%s'\n", sym_name, n1, n2); return false; } /* validate if FWD kind matches concrete kind */ if (btf_is_fwd(t1)) { if (btf_kflag(t1) && btf_is_union(t2)) return true; if (!btf_kflag(t1) && btf_is_struct(t2)) return true; pr_warn("global '%s': incompatible %s forward declaration and concrete kind %s\n", sym_name, btf_kflag(t1) ? "union" : "struct", btf_kind_str(t2)); } else { if (btf_kflag(t2) && btf_is_union(t1)) return true; if (!btf_kflag(t2) && btf_is_struct(t1)) return true; pr_warn("global '%s': incompatible %s forward declaration and concrete kind %s\n", sym_name, btf_kflag(t2) ? "union" : "struct", btf_kind_str(t1)); } return false; } if (btf_kind(t1) != btf_kind(t2)) { pr_warn("global '%s': incompatible BTF kinds %s and %s\n", sym_name, btf_kind_str(t1), btf_kind_str(t2)); return false; } switch (btf_kind(t1)) { case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_FWD: case BTF_KIND_FUNC: case BTF_KIND_VAR: n1 = btf__str_by_offset(btf1, t1->name_off); n2 = btf__str_by_offset(btf2, t2->name_off); if (strcmp(n1, n2) != 0) { pr_warn("global '%s': incompatible %s names '%s' and '%s'\n", sym_name, btf_kind_str(t1), n1, n2); return false; } break; default: break; } switch (btf_kind(t1)) { case BTF_KIND_UNKN: /* void */ case BTF_KIND_FWD: return true; case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: /* ignore encoding for int and enum values for enum */ if (t1->size != t2->size) { pr_warn("global '%s': incompatible %s '%s' size %u and %u\n", sym_name, btf_kind_str(t1), n1, t1->size, t2->size); return false; } return true; case BTF_KIND_PTR: /* just validate overall shape of the referenced type, so no * contents comparison for struct/union, and allowed fwd vs * struct/union */ exact = false; id1 = t1->type; id2 = t2->type; goto recur; case BTF_KIND_ARRAY: /* ignore index type and array size */ id1 = btf_array(t1)->type; id2 = btf_array(t2)->type; goto recur; case BTF_KIND_FUNC: /* extern and global linkages are compatible */ is_static1 = btf_func_linkage(t1) == BTF_FUNC_STATIC; is_static2 = btf_func_linkage(t2) == BTF_FUNC_STATIC; if (is_static1 != is_static2) { pr_warn("global '%s': incompatible func '%s' linkage\n", sym_name, n1); return false; } id1 = t1->type; id2 = t2->type; goto recur; case BTF_KIND_VAR: /* extern and global linkages are compatible */ is_static1 = btf_var(t1)->linkage == BTF_VAR_STATIC; is_static2 = btf_var(t2)->linkage == BTF_VAR_STATIC; if (is_static1 != is_static2) { pr_warn("global '%s': incompatible var '%s' linkage\n", sym_name, n1); return false; } id1 = t1->type; id2 = t2->type; goto recur; case BTF_KIND_STRUCT: case BTF_KIND_UNION: { const struct btf_member *m1, *m2; if (!exact) return true; if (btf_vlen(t1) != btf_vlen(t2)) { pr_warn("global '%s': incompatible number of %s fields %u and %u\n", sym_name, btf_kind_str(t1), btf_vlen(t1), btf_vlen(t2)); return false; } n = btf_vlen(t1); m1 = btf_members(t1); m2 = btf_members(t2); for (i = 0; i < n; i++, m1++, m2++) { n1 = btf__str_by_offset(btf1, m1->name_off); n2 = btf__str_by_offset(btf2, m2->name_off); if (strcmp(n1, n2) != 0) { pr_warn("global '%s': incompatible field #%d names '%s' and '%s'\n", sym_name, i, n1, n2); return false; } if (m1->offset != m2->offset) { pr_warn("global '%s': incompatible field #%d ('%s') offsets\n", sym_name, i, n1); return false; } if (!glob_sym_btf_matches(sym_name, exact, btf1, m1->type, btf2, m2->type)) return false; } return true; } case BTF_KIND_FUNC_PROTO: { const struct btf_param *m1, *m2; if (btf_vlen(t1) != btf_vlen(t2)) { pr_warn("global '%s': incompatible number of %s params %u and %u\n", sym_name, btf_kind_str(t1), btf_vlen(t1), btf_vlen(t2)); return false; } n = btf_vlen(t1); m1 = btf_params(t1); m2 = btf_params(t2); for (i = 0; i < n; i++, m1++, m2++) { /* ignore func arg names */ if (!glob_sym_btf_matches(sym_name, exact, btf1, m1->type, btf2, m2->type)) return false; } /* now check return type as well */ id1 = t1->type; id2 = t2->type; goto recur; } /* skip_mods_and_typedefs() make this impossible */ case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: /* DATASECs are never compared with each other */ case BTF_KIND_DATASEC: default: pr_warn("global '%s': unsupported BTF kind %s\n", sym_name, btf_kind_str(t1)); return false; } } static bool map_defs_match(const char *sym_name, const struct btf *main_btf, const struct btf_map_def *main_def, const struct btf_map_def *main_inner_def, const struct btf *extra_btf, const struct btf_map_def *extra_def, const struct btf_map_def *extra_inner_def) { const char *reason; if (main_def->map_type != extra_def->map_type) { reason = "type"; goto mismatch; } /* check key type/size match */ if (main_def->key_size != extra_def->key_size) { reason = "key_size"; goto mismatch; } if (!!main_def->key_type_id != !!extra_def->key_type_id) { reason = "key type"; goto mismatch; } if ((main_def->parts & MAP_DEF_KEY_TYPE) && !glob_sym_btf_matches(sym_name, true /*exact*/, main_btf, main_def->key_type_id, extra_btf, extra_def->key_type_id)) { reason = "key type"; goto mismatch; } /* validate value type/size match */ if (main_def->value_size != extra_def->value_size) { reason = "value_size"; goto mismatch; } if (!!main_def->value_type_id != !!extra_def->value_type_id) { reason = "value type"; goto mismatch; } if ((main_def->parts & MAP_DEF_VALUE_TYPE) && !glob_sym_btf_matches(sym_name, true /*exact*/, main_btf, main_def->value_type_id, extra_btf, extra_def->value_type_id)) { reason = "key type"; goto mismatch; } if (main_def->max_entries != extra_def->max_entries) { reason = "max_entries"; goto mismatch; } if (main_def->map_flags != extra_def->map_flags) { reason = "map_flags"; goto mismatch; } if (main_def->numa_node != extra_def->numa_node) { reason = "numa_node"; goto mismatch; } if (main_def->pinning != extra_def->pinning) { reason = "pinning"; goto mismatch; } if ((main_def->parts & MAP_DEF_INNER_MAP) != (extra_def->parts & MAP_DEF_INNER_MAP)) { reason = "inner map"; goto mismatch; } if (main_def->parts & MAP_DEF_INNER_MAP) { char inner_map_name[128]; snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", sym_name); return map_defs_match(inner_map_name, main_btf, main_inner_def, NULL, extra_btf, extra_inner_def, NULL); } return true; mismatch: pr_warn("global '%s': map %s mismatch\n", sym_name, reason); return false; } static bool glob_map_defs_match(const char *sym_name, struct bpf_linker *linker, struct glob_sym *glob_sym, struct src_obj *obj, Elf64_Sym *sym, int btf_id) { struct btf_map_def dst_def = {}, dst_inner_def = {}; struct btf_map_def src_def = {}, src_inner_def = {}; const struct btf_type *t; int err; t = btf__type_by_id(obj->btf, btf_id); if (!btf_is_var(t)) { pr_warn("global '%s': invalid map definition type [%d]\n", sym_name, btf_id); return false; } t = skip_mods_and_typedefs(obj->btf, t->type, NULL); err = parse_btf_map_def(sym_name, obj->btf, t, true /*strict*/, &src_def, &src_inner_def); if (err) { pr_warn("global '%s': invalid map definition\n", sym_name); return false; } /* re-parse existing map definition */ t = btf__type_by_id(linker->btf, glob_sym->btf_id); t = skip_mods_and_typedefs(linker->btf, t->type, NULL); err = parse_btf_map_def(sym_name, linker->btf, t, true /*strict*/, &dst_def, &dst_inner_def); if (err) { /* this should not happen, because we already validated it */ pr_warn("global '%s': invalid dst map definition\n", sym_name); return false; } /* Currently extern map definition has to be complete and match * concrete map definition exactly. This restriction might be lifted * in the future. */ return map_defs_match(sym_name, linker->btf, &dst_def, &dst_inner_def, obj->btf, &src_def, &src_inner_def); } static bool glob_syms_match(const char *sym_name, struct bpf_linker *linker, struct glob_sym *glob_sym, struct src_obj *obj, Elf64_Sym *sym, size_t sym_idx, int btf_id) { const struct btf_type *src_t; /* if we are dealing with externs, BTF types describing both global * and extern VARs/FUNCs should be completely present in all files */ if (!glob_sym->btf_id || !btf_id) { pr_warn("BTF info is missing for global symbol '%s'\n", sym_name); return false; } src_t = btf__type_by_id(obj->btf, btf_id); if (!btf_is_var(src_t) && !btf_is_func(src_t)) { pr_warn("only extern variables and functions are supported, but got '%s' for '%s'\n", btf_kind_str(src_t), sym_name); return false; } /* deal with .maps definitions specially */ if (glob_sym->sec_id && strcmp(linker->secs[glob_sym->sec_id].sec_name, MAPS_ELF_SEC) == 0) return glob_map_defs_match(sym_name, linker, glob_sym, obj, sym, btf_id); if (!glob_sym_btf_matches(sym_name, true /*exact*/, linker->btf, glob_sym->btf_id, obj->btf, btf_id)) return false; return true; } static bool btf_is_non_static(const struct btf_type *t) { return (btf_is_var(t) && btf_var(t)->linkage != BTF_VAR_STATIC) || (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_STATIC); } static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name, int *out_btf_sec_id, int *out_btf_id) { int i, j, n, m, btf_id = 0; const struct btf_type *t; const struct btf_var_secinfo *vi; const char *name; if (!obj->btf) { pr_warn("failed to find BTF info for object '%s'\n", obj->filename); return -EINVAL; } n = btf__type_cnt(obj->btf); for (i = 1; i < n; i++) { t = btf__type_by_id(obj->btf, i); /* some global and extern FUNCs and VARs might not be associated with any * DATASEC, so try to detect them in the same pass */ if (btf_is_non_static(t)) { name = btf__str_by_offset(obj->btf, t->name_off); if (strcmp(name, sym_name) != 0) continue; /* remember and still try to find DATASEC */ btf_id = i; continue; } if (!btf_is_datasec(t)) continue; vi = btf_var_secinfos(t); for (j = 0, m = btf_vlen(t); j < m; j++, vi++) { t = btf__type_by_id(obj->btf, vi->type); name = btf__str_by_offset(obj->btf, t->name_off); if (strcmp(name, sym_name) != 0) continue; if (btf_is_var(t) && btf_var(t)->linkage == BTF_VAR_STATIC) continue; if (btf_is_func(t) && btf_func_linkage(t) == BTF_FUNC_STATIC) continue; if (btf_id && btf_id != vi->type) { pr_warn("global/extern '%s' BTF is ambiguous: both types #%d and #%u match\n", sym_name, btf_id, vi->type); return -EINVAL; } *out_btf_sec_id = i; *out_btf_id = vi->type; return 0; } } /* free-floating extern or global FUNC */ if (btf_id) { *out_btf_sec_id = 0; *out_btf_id = btf_id; return 0; } pr_warn("failed to find BTF info for global/extern symbol '%s'\n", sym_name); return -ENOENT; } static struct src_sec *find_src_sec_by_name(struct src_obj *obj, const char *sec_name) { struct src_sec *sec; int i; for (i = 1; i < obj->sec_cnt; i++) { sec = &obj->secs[i]; if (strcmp(sec->sec_name, sec_name) == 0) return sec; } return NULL; } static int complete_extern_btf_info(struct btf *dst_btf, int dst_id, struct btf *src_btf, int src_id) { struct btf_type *dst_t = btf_type_by_id(dst_btf, dst_id); struct btf_type *src_t = btf_type_by_id(src_btf, src_id); struct btf_param *src_p, *dst_p; const char *s; int i, n, off; /* We already made sure that source and destination types (FUNC or * VAR) match in terms of types and argument names. */ if (btf_is_var(dst_t)) { btf_var(dst_t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; return 0; } dst_t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_GLOBAL, 0); /* now onto FUNC_PROTO types */ src_t = btf_type_by_id(src_btf, src_t->type); dst_t = btf_type_by_id(dst_btf, dst_t->type); /* Fill in all the argument names, which for extern FUNCs are missing. * We'll end up with two copies of FUNCs/VARs for externs, but that * will be taken care of by BTF dedup at the very end. * It might be that BTF types for extern in one file has less/more BTF * information (e.g., FWD instead of full STRUCT/UNION information), * but that should be (in most cases, subject to BTF dedup rules) * handled and resolved by BTF dedup algorithm as well, so we won't * worry about it. Our only job is to make sure that argument names * are populated on both sides, otherwise BTF dedup will pedantically * consider them different. */ src_p = btf_params(src_t); dst_p = btf_params(dst_t); for (i = 0, n = btf_vlen(dst_t); i < n; i++, src_p++, dst_p++) { if (!src_p->name_off) continue; /* src_btf has more complete info, so add name to dst_btf */ s = btf__str_by_offset(src_btf, src_p->name_off); off = btf__add_str(dst_btf, s); if (off < 0) return off; dst_p->name_off = off; } return 0; } static void sym_update_bind(Elf64_Sym *sym, int sym_bind) { sym->st_info = ELF64_ST_INFO(sym_bind, ELF64_ST_TYPE(sym->st_info)); } static void sym_update_type(Elf64_Sym *sym, int sym_type) { sym->st_info = ELF64_ST_INFO(ELF64_ST_BIND(sym->st_info), sym_type); } static void sym_update_visibility(Elf64_Sym *sym, int sym_vis) { /* libelf doesn't provide setters for ST_VISIBILITY, * but it is stored in the lower 2 bits of st_other */ sym->st_other &= ~0x03; sym->st_other |= sym_vis; } static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj, Elf64_Sym *sym, const char *sym_name, int src_sym_idx) { struct src_sec *src_sec = NULL; struct dst_sec *dst_sec = NULL; struct glob_sym *glob_sym = NULL; int name_off, sym_type, sym_bind, sym_vis, err; int btf_sec_id = 0, btf_id = 0; size_t dst_sym_idx; Elf64_Sym *dst_sym; bool sym_is_extern; sym_type = ELF64_ST_TYPE(sym->st_info); sym_bind = ELF64_ST_BIND(sym->st_info); sym_vis = ELF64_ST_VISIBILITY(sym->st_other); sym_is_extern = sym->st_shndx == SHN_UNDEF; if (sym_is_extern) { if (!obj->btf) { pr_warn("externs without BTF info are not supported\n"); return -ENOTSUP; } } else if (sym->st_shndx < SHN_LORESERVE) { src_sec = &obj->secs[sym->st_shndx]; if (src_sec->skipped) return 0; dst_sec = &linker->secs[src_sec->dst_id]; /* allow only one STT_SECTION symbol per section */ if (sym_type == STT_SECTION && dst_sec->sec_sym_idx) { obj->sym_map[src_sym_idx] = dst_sec->sec_sym_idx; return 0; } } if (sym_bind == STB_LOCAL) goto add_sym; /* find matching BTF info */ err = find_glob_sym_btf(obj, sym, sym_name, &btf_sec_id, &btf_id); if (err) return err; if (sym_is_extern && btf_sec_id) { const char *sec_name = NULL; const struct btf_type *t; t = btf__type_by_id(obj->btf, btf_sec_id); sec_name = btf__str_by_offset(obj->btf, t->name_off); /* Clang puts unannotated extern vars into * '.extern' BTF DATASEC. Treat them the same * as unannotated extern funcs (which are * currently not put into any DATASECs). * Those don't have associated src_sec/dst_sec. */ if (strcmp(sec_name, BTF_EXTERN_SEC) != 0) { src_sec = find_src_sec_by_name(obj, sec_name); if (!src_sec) { pr_warn("failed to find matching ELF sec '%s'\n", sec_name); return -ENOENT; } dst_sec = &linker->secs[src_sec->dst_id]; } } glob_sym = find_glob_sym(linker, sym_name); if (glob_sym) { /* Preventively resolve to existing symbol. This is * needed for further relocation symbol remapping in * the next step of linking. */ obj->sym_map[src_sym_idx] = glob_sym->sym_idx; /* If both symbols are non-externs, at least one of * them has to be STB_WEAK, otherwise they are in * a conflict with each other. */ if (!sym_is_extern && !glob_sym->is_extern && !glob_sym->is_weak && sym_bind != STB_WEAK) { pr_warn("conflicting non-weak symbol #%d (%s) definition in '%s'\n", src_sym_idx, sym_name, obj->filename); return -EINVAL; } if (!glob_syms_match(sym_name, linker, glob_sym, obj, sym, src_sym_idx, btf_id)) return -EINVAL; dst_sym = get_sym_by_idx(linker, glob_sym->sym_idx); /* If new symbol is strong, then force dst_sym to be strong as * well; this way a mix of weak and non-weak extern * definitions will end up being strong. */ if (sym_bind == STB_GLOBAL) { /* We still need to preserve type (NOTYPE or * OBJECT/FUNC, depending on whether the symbol is * extern or not) */ sym_update_bind(dst_sym, STB_GLOBAL); glob_sym->is_weak = false; } /* Non-default visibility is "contaminating", with stricter * visibility overwriting more permissive ones, even if more * permissive visibility comes from just an extern definition. * Currently only STV_DEFAULT and STV_HIDDEN are allowed and * ensured by ELF symbol sanity checks above. */ if (sym_vis > ELF64_ST_VISIBILITY(dst_sym->st_other)) sym_update_visibility(dst_sym, sym_vis); /* If the new symbol is extern, then regardless if * existing symbol is extern or resolved global, just * keep the existing one untouched. */ if (sym_is_extern) return 0; /* If existing symbol is a strong resolved symbol, bail out, * because we lost resolution battle have nothing to * contribute. We already checked above that there is no * strong-strong conflict. We also already tightened binding * and visibility, so nothing else to contribute at that point. */ if (!glob_sym->is_extern && sym_bind == STB_WEAK) return 0; /* At this point, new symbol is strong non-extern, * so overwrite glob_sym with new symbol information. * Preserve binding and visibility. */ sym_update_type(dst_sym, sym_type); dst_sym->st_shndx = dst_sec->sec_idx; dst_sym->st_value = src_sec->dst_off + sym->st_value; dst_sym->st_size = sym->st_size; /* see comment below about dst_sec->id vs dst_sec->sec_idx */ glob_sym->sec_id = dst_sec->id; glob_sym->is_extern = false; if (complete_extern_btf_info(linker->btf, glob_sym->btf_id, obj->btf, btf_id)) return -EINVAL; /* request updating VAR's/FUNC's underlying BTF type when appending BTF type */ glob_sym->underlying_btf_id = 0; obj->sym_map[src_sym_idx] = glob_sym->sym_idx; return 0; } add_sym: name_off = strset__add_str(linker->strtab_strs, sym_name); if (name_off < 0) return name_off; dst_sym = add_new_sym(linker, &dst_sym_idx); if (!dst_sym) return -ENOMEM; dst_sym->st_name = name_off; dst_sym->st_info = sym->st_info; dst_sym->st_other = sym->st_other; dst_sym->st_shndx = dst_sec ? dst_sec->sec_idx : sym->st_shndx; dst_sym->st_value = (src_sec ? src_sec->dst_off : 0) + sym->st_value; dst_sym->st_size = sym->st_size; obj->sym_map[src_sym_idx] = dst_sym_idx; if (sym_type == STT_SECTION && dst_sym) { dst_sec->sec_sym_idx = dst_sym_idx; dst_sym->st_value = 0; } if (sym_bind != STB_LOCAL) { glob_sym = add_glob_sym(linker); if (!glob_sym) return -ENOMEM; glob_sym->sym_idx = dst_sym_idx; /* we use dst_sec->id (and not dst_sec->sec_idx), because * ephemeral sections (.kconfig, .ksyms, etc) don't have * sec_idx (as they don't have corresponding ELF section), but * still have id. .extern doesn't have even ephemeral section * associated with it, so dst_sec->id == dst_sec->sec_idx == 0. */ glob_sym->sec_id = dst_sec ? dst_sec->id : 0; glob_sym->name_off = name_off; /* we will fill btf_id in during BTF merging step */ glob_sym->btf_id = 0; glob_sym->is_extern = sym_is_extern; glob_sym->is_weak = sym_bind == STB_WEAK; } return 0; } static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj) { struct src_sec *src_symtab = &obj->secs[obj->symtab_sec_idx]; int i, err; for (i = 1; i < obj->sec_cnt; i++) { struct src_sec *src_sec, *src_linked_sec; struct dst_sec *dst_sec, *dst_linked_sec; Elf64_Rel *src_rel, *dst_rel; int j, n; src_sec = &obj->secs[i]; if (!is_relo_sec(src_sec)) continue; /* shdr->sh_info points to relocatable section */ src_linked_sec = &obj->secs[src_sec->shdr->sh_info]; if (src_linked_sec->skipped) continue; dst_sec = find_dst_sec_by_name(linker, src_sec->sec_name); if (!dst_sec) { dst_sec = add_dst_sec(linker, src_sec->sec_name); if (!dst_sec) return -ENOMEM; err = init_sec(linker, dst_sec, src_sec); if (err) { pr_warn("failed to init section '%s'\n", src_sec->sec_name); return err; } } else if (!secs_match(dst_sec, src_sec)) { pr_warn("sections %s are not compatible\n", src_sec->sec_name); return -1; } /* shdr->sh_link points to SYMTAB */ dst_sec->shdr->sh_link = linker->symtab_sec_idx; /* shdr->sh_info points to relocated section */ dst_linked_sec = &linker->secs[src_linked_sec->dst_id]; dst_sec->shdr->sh_info = dst_linked_sec->sec_idx; src_sec->dst_id = dst_sec->id; err = extend_sec(linker, dst_sec, src_sec); if (err) return err; src_rel = src_sec->data->d_buf; dst_rel = dst_sec->raw_data + src_sec->dst_off; n = src_sec->shdr->sh_size / src_sec->shdr->sh_entsize; for (j = 0; j < n; j++, src_rel++, dst_rel++) { size_t src_sym_idx, dst_sym_idx, sym_type; Elf64_Sym *src_sym; src_sym_idx = ELF64_R_SYM(src_rel->r_info); src_sym = src_symtab->data->d_buf + sizeof(*src_sym) * src_sym_idx; dst_sym_idx = obj->sym_map[src_sym_idx]; dst_rel->r_offset += src_linked_sec->dst_off; sym_type = ELF64_R_TYPE(src_rel->r_info); dst_rel->r_info = ELF64_R_INFO(dst_sym_idx, sym_type); if (ELF64_ST_TYPE(src_sym->st_info) == STT_SECTION) { struct src_sec *sec = &obj->secs[src_sym->st_shndx]; struct bpf_insn *insn; if (src_linked_sec->shdr->sh_flags & SHF_EXECINSTR) { /* calls to the very first static function inside * .text section at offset 0 will * reference section symbol, not the * function symbol. Fix that up, * otherwise it won't be possible to * relocate calls to two different * static functions with the same name * (rom two different object files) */ insn = dst_linked_sec->raw_data + dst_rel->r_offset; if (insn->code == (BPF_JMP | BPF_CALL)) insn->imm += sec->dst_off / sizeof(struct bpf_insn); else insn->imm += sec->dst_off; } else { pr_warn("relocation against STT_SECTION in non-exec section is not supported!\n"); return -EINVAL; } } } } return 0; } static Elf64_Sym *find_sym_by_name(struct src_obj *obj, size_t sec_idx, int sym_type, const char *sym_name) { struct src_sec *symtab = &obj->secs[obj->symtab_sec_idx]; Elf64_Sym *sym = symtab->data->d_buf; int i, n = symtab->shdr->sh_size / symtab->shdr->sh_entsize; int str_sec_idx = symtab->shdr->sh_link; const char *name; for (i = 0; i < n; i++, sym++) { if (sym->st_shndx != sec_idx) continue; if (ELF64_ST_TYPE(sym->st_info) != sym_type) continue; name = elf_strptr(obj->elf, str_sec_idx, sym->st_name); if (!name) return NULL; if (strcmp(sym_name, name) != 0) continue; return sym; } return NULL; } static int linker_fixup_btf(struct src_obj *obj) { const char *sec_name; struct src_sec *sec; int i, j, n, m; if (!obj->btf) return 0; n = btf__type_cnt(obj->btf); for (i = 1; i < n; i++) { struct btf_var_secinfo *vi; struct btf_type *t; t = btf_type_by_id(obj->btf, i); if (btf_kind(t) != BTF_KIND_DATASEC) continue; sec_name = btf__str_by_offset(obj->btf, t->name_off); sec = find_src_sec_by_name(obj, sec_name); if (sec) { /* record actual section size, unless ephemeral */ if (sec->shdr) t->size = sec->shdr->sh_size; } else { /* BTF can have some sections that are not represented * in ELF, e.g., .kconfig, .ksyms, .extern, which are used * for special extern variables. * * For all but one such special (ephemeral) * sections, we pre-create "section shells" to be able * to keep track of extra per-section metadata later * (e.g., those BTF extern variables). * * .extern is even more special, though, because it * contains extern variables that need to be resolved * by static linker, not libbpf and kernel. When such * externs are resolved, we are going to remove them * from .extern BTF section and might end up not * needing it at all. Each resolved extern should have * matching non-extern VAR/FUNC in other sections. * * We do support leaving some of the externs * unresolved, though, to support cases of building * libraries, which will later be linked against final * BPF applications. So if at finalization we still * see unresolved externs, we'll create .extern * section on our own. */ if (strcmp(sec_name, BTF_EXTERN_SEC) == 0) continue; sec = add_src_sec(obj, sec_name); if (!sec) return -ENOMEM; sec->ephemeral = true; sec->sec_idx = 0; /* will match UNDEF shndx in ELF */ } /* remember ELF section and its BTF type ID match */ sec->sec_type_id = i; /* fix up variable offsets */ vi = btf_var_secinfos(t); for (j = 0, m = btf_vlen(t); j < m; j++, vi++) { const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type); const char *var_name; int var_linkage; Elf64_Sym *sym; /* could be a variable or function */ if (!btf_is_var(vt)) continue; var_name = btf__str_by_offset(obj->btf, vt->name_off); var_linkage = btf_var(vt)->linkage; /* no need to patch up static or extern vars */ if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED) continue; sym = find_sym_by_name(obj, sec->sec_idx, STT_OBJECT, var_name); if (!sym) { pr_warn("failed to find symbol for variable '%s' in section '%s'\n", var_name, sec_name); return -ENOENT; } vi->offset = sym->st_value; } } return 0; } static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj) { const struct btf_type *t; int i, j, n, start_id, id, err; const char *name; if (!obj->btf) return 0; start_id = btf__type_cnt(linker->btf); n = btf__type_cnt(obj->btf); obj->btf_type_map = calloc(n + 1, sizeof(int)); if (!obj->btf_type_map) return -ENOMEM; for (i = 1; i < n; i++) { struct glob_sym *glob_sym = NULL; t = btf__type_by_id(obj->btf, i); /* DATASECs are handled specially below */ if (btf_kind(t) == BTF_KIND_DATASEC) continue; if (btf_is_non_static(t)) { /* there should be glob_sym already */ name = btf__str_by_offset(obj->btf, t->name_off); glob_sym = find_glob_sym(linker, name); /* VARs without corresponding glob_sym are those that * belong to skipped/deduplicated sections (i.e., * license and version), so just skip them */ if (!glob_sym) continue; /* linker_append_elf_sym() might have requested * updating underlying type ID, if extern was resolved * to strong symbol or weak got upgraded to non-weak */ if (glob_sym->underlying_btf_id == 0) glob_sym->underlying_btf_id = -t->type; /* globals from previous object files that match our * VAR/FUNC already have a corresponding associated * BTF type, so just make sure to use it */ if (glob_sym->btf_id) { /* reuse existing BTF type for global var/func */ obj->btf_type_map[i] = glob_sym->btf_id; continue; } } id = btf__add_type(linker->btf, obj->btf, t); if (id < 0) { pr_warn("failed to append BTF type #%d from file '%s'\n", i, obj->filename); return id; } obj->btf_type_map[i] = id; /* record just appended BTF type for var/func */ if (glob_sym) { glob_sym->btf_id = id; glob_sym->underlying_btf_id = -t->type; } } /* remap all the types except DATASECs */ n = btf__type_cnt(linker->btf); for (i = start_id; i < n; i++) { struct btf_type *dst_t = btf_type_by_id(linker->btf, i); struct btf_field_iter it; __u32 *type_id; err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS); if (err) return err; while ((type_id = btf_field_iter_next(&it))) { int new_id = obj->btf_type_map[*type_id]; /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */ if (new_id == 0 && *type_id != 0) { pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id); return -EINVAL; } *type_id = obj->btf_type_map[*type_id]; } } /* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's * actual type), if necessary */ for (i = 0; i < linker->glob_sym_cnt; i++) { struct glob_sym *glob_sym = &linker->glob_syms[i]; struct btf_type *glob_t; if (glob_sym->underlying_btf_id >= 0) continue; glob_sym->underlying_btf_id = obj->btf_type_map[-glob_sym->underlying_btf_id]; glob_t = btf_type_by_id(linker->btf, glob_sym->btf_id); glob_t->type = glob_sym->underlying_btf_id; } /* append DATASEC info */ for (i = 1; i < obj->sec_cnt; i++) { struct src_sec *src_sec; struct dst_sec *dst_sec; const struct btf_var_secinfo *src_var; struct btf_var_secinfo *dst_var; src_sec = &obj->secs[i]; if (!src_sec->sec_type_id || src_sec->skipped) continue; dst_sec = &linker->secs[src_sec->dst_id]; /* Mark section as having BTF regardless of the presence of * variables. In some cases compiler might generate empty BTF * with no variables information. E.g., when promoting local * array/structure variable initial values and BPF object * file otherwise has no read-only static variables in * .rodata. We need to preserve such empty BTF and just set * correct section size. */ dst_sec->has_btf = true; t = btf__type_by_id(obj->btf, src_sec->sec_type_id); src_var = btf_var_secinfos(t); n = btf_vlen(t); for (j = 0; j < n; j++, src_var++) { void *sec_vars = dst_sec->sec_vars; int new_id = obj->btf_type_map[src_var->type]; struct glob_sym *glob_sym = NULL; t = btf_type_by_id(linker->btf, new_id); if (btf_is_non_static(t)) { name = btf__str_by_offset(linker->btf, t->name_off); glob_sym = find_glob_sym(linker, name); if (glob_sym->sec_id != dst_sec->id) { pr_warn("global '%s': section mismatch %d vs %d\n", name, glob_sym->sec_id, dst_sec->id); return -EINVAL; } } /* If there is already a member (VAR or FUNC) mapped * to the same type, don't add a duplicate entry. * This will happen when multiple object files define * the same extern VARs/FUNCs. */ if (glob_sym && glob_sym->var_idx >= 0) { __s64 sz; /* FUNCs don't have size, nothing to update */ if (btf_is_func(t)) continue; dst_var = &dst_sec->sec_vars[glob_sym->var_idx]; /* Because underlying BTF type might have * changed, so might its size have changed, so * re-calculate and update it in sec_var. */ sz = btf__resolve_size(linker->btf, glob_sym->underlying_btf_id); if (sz < 0) { pr_warn("global '%s': failed to resolve size of underlying type: %d\n", name, (int)sz); return -EINVAL; } dst_var->size = sz; continue; } sec_vars = libbpf_reallocarray(sec_vars, dst_sec->sec_var_cnt + 1, sizeof(*dst_sec->sec_vars)); if (!sec_vars) return -ENOMEM; dst_sec->sec_vars = sec_vars; dst_sec->sec_var_cnt++; dst_var = &dst_sec->sec_vars[dst_sec->sec_var_cnt - 1]; dst_var->type = obj->btf_type_map[src_var->type]; dst_var->size = src_var->size; dst_var->offset = src_sec->dst_off + src_var->offset; if (glob_sym) glob_sym->var_idx = dst_sec->sec_var_cnt - 1; } } return 0; } static void *add_btf_ext_rec(struct btf_ext_sec_data *ext_data, const void *src_rec) { void *tmp; tmp = libbpf_reallocarray(ext_data->recs, ext_data->rec_cnt + 1, ext_data->rec_sz); if (!tmp) return NULL; ext_data->recs = tmp; tmp += ext_data->rec_cnt * ext_data->rec_sz; memcpy(tmp, src_rec, ext_data->rec_sz); ext_data->rec_cnt++; return tmp; } static int linker_append_btf_ext(struct bpf_linker *linker, struct src_obj *obj) { const struct btf_ext_info_sec *ext_sec; const char *sec_name, *s; struct src_sec *src_sec; struct dst_sec *dst_sec; int rec_sz, str_off, i; if (!obj->btf_ext) return 0; rec_sz = obj->btf_ext->func_info.rec_size; for_each_btf_ext_sec(&obj->btf_ext->func_info, ext_sec) { struct bpf_func_info_min *src_rec, *dst_rec; sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off); src_sec = find_src_sec_by_name(obj, sec_name); if (!src_sec) { pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name); return -EINVAL; } dst_sec = &linker->secs[src_sec->dst_id]; if (dst_sec->func_info.rec_sz == 0) dst_sec->func_info.rec_sz = rec_sz; if (dst_sec->func_info.rec_sz != rec_sz) { pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name); return -EINVAL; } for_each_btf_ext_rec(&obj->btf_ext->func_info, ext_sec, i, src_rec) { dst_rec = add_btf_ext_rec(&dst_sec->func_info, src_rec); if (!dst_rec) return -ENOMEM; dst_rec->insn_off += src_sec->dst_off; dst_rec->type_id = obj->btf_type_map[dst_rec->type_id]; } } rec_sz = obj->btf_ext->line_info.rec_size; for_each_btf_ext_sec(&obj->btf_ext->line_info, ext_sec) { struct bpf_line_info_min *src_rec, *dst_rec; sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off); src_sec = find_src_sec_by_name(obj, sec_name); if (!src_sec) { pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name); return -EINVAL; } dst_sec = &linker->secs[src_sec->dst_id]; if (dst_sec->line_info.rec_sz == 0) dst_sec->line_info.rec_sz = rec_sz; if (dst_sec->line_info.rec_sz != rec_sz) { pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name); return -EINVAL; } for_each_btf_ext_rec(&obj->btf_ext->line_info, ext_sec, i, src_rec) { dst_rec = add_btf_ext_rec(&dst_sec->line_info, src_rec); if (!dst_rec) return -ENOMEM; dst_rec->insn_off += src_sec->dst_off; s = btf__str_by_offset(obj->btf, src_rec->file_name_off); str_off = btf__add_str(linker->btf, s); if (str_off < 0) return -ENOMEM; dst_rec->file_name_off = str_off; s = btf__str_by_offset(obj->btf, src_rec->line_off); str_off = btf__add_str(linker->btf, s); if (str_off < 0) return -ENOMEM; dst_rec->line_off = str_off; /* dst_rec->line_col is fine */ } } rec_sz = obj->btf_ext->core_relo_info.rec_size; for_each_btf_ext_sec(&obj->btf_ext->core_relo_info, ext_sec) { struct bpf_core_relo *src_rec, *dst_rec; sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off); src_sec = find_src_sec_by_name(obj, sec_name); if (!src_sec) { pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name); return -EINVAL; } dst_sec = &linker->secs[src_sec->dst_id]; if (dst_sec->core_relo_info.rec_sz == 0) dst_sec->core_relo_info.rec_sz = rec_sz; if (dst_sec->core_relo_info.rec_sz != rec_sz) { pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name); return -EINVAL; } for_each_btf_ext_rec(&obj->btf_ext->core_relo_info, ext_sec, i, src_rec) { dst_rec = add_btf_ext_rec(&dst_sec->core_relo_info, src_rec); if (!dst_rec) return -ENOMEM; dst_rec->insn_off += src_sec->dst_off; dst_rec->type_id = obj->btf_type_map[dst_rec->type_id]; s = btf__str_by_offset(obj->btf, src_rec->access_str_off); str_off = btf__add_str(linker->btf, s); if (str_off < 0) return -ENOMEM; dst_rec->access_str_off = str_off; /* dst_rec->kind is fine */ } } return 0; } int bpf_linker__finalize(struct bpf_linker *linker) { struct dst_sec *sec; size_t strs_sz; const void *strs; int err, i; if (!linker->elf) return libbpf_err(-EINVAL); err = finalize_btf(linker); if (err) return libbpf_err(err); /* Finalize strings */ strs_sz = strset__data_size(linker->strtab_strs); strs = strset__data(linker->strtab_strs); sec = &linker->secs[linker->strtab_sec_idx]; sec->data->d_align = 1; sec->data->d_off = 0LL; sec->data->d_buf = (void *)strs; sec->data->d_type = ELF_T_BYTE; sec->data->d_size = strs_sz; sec->shdr->sh_size = strs_sz; for (i = 1; i < linker->sec_cnt; i++) { sec = &linker->secs[i]; /* STRTAB is handled specially above */ if (sec->sec_idx == linker->strtab_sec_idx) continue; /* special ephemeral sections (.ksyms, .kconfig, etc) */ if (!sec->scn) continue; /* restore sections with bpf insns to target byte-order */ if (linker->swapped_endian && is_exec_sec(sec)) exec_sec_bswap(sec->raw_data, sec->sec_sz); sec->data->d_buf = sec->raw_data; } /* Finalize ELF layout */ if (elf_update(linker->elf, ELF_C_NULL) < 0) { err = -errno; pr_warn_elf("failed to finalize ELF layout"); return libbpf_err(err); } /* Write out final ELF contents */ if (elf_update(linker->elf, ELF_C_WRITE) < 0) { err = -errno; pr_warn_elf("failed to write ELF contents"); return libbpf_err(err); } elf_end(linker->elf); close(linker->fd); linker->elf = NULL; linker->fd = -1; return 0; } static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name, size_t align, const void *raw_data, size_t raw_sz) { Elf_Scn *scn; Elf_Data *data; Elf64_Shdr *shdr; int name_off; name_off = strset__add_str(linker->strtab_strs, sec_name); if (name_off < 0) return name_off; scn = elf_newscn(linker->elf); if (!scn) return -ENOMEM; data = elf_newdata(scn); if (!data) return -ENOMEM; shdr = elf64_getshdr(scn); if (!shdr) return -EINVAL; shdr->sh_name = name_off; shdr->sh_type = SHT_PROGBITS; shdr->sh_flags = 0; shdr->sh_size = raw_sz; shdr->sh_link = 0; shdr->sh_info = 0; shdr->sh_addralign = align; shdr->sh_entsize = 0; data->d_type = ELF_T_BYTE; data->d_size = raw_sz; data->d_buf = (void *)raw_data; data->d_align = align; data->d_off = 0; return 0; } static int finalize_btf(struct bpf_linker *linker) { enum btf_endianness link_endianness; LIBBPF_OPTS(btf_dedup_opts, opts); struct btf *btf = linker->btf; const void *raw_data; int i, j, id, err; __u32 raw_sz; /* bail out if no BTF data was produced */ if (btf__type_cnt(linker->btf) == 1) return 0; for (i = 1; i < linker->sec_cnt; i++) { struct dst_sec *sec = &linker->secs[i]; if (!sec->has_btf) continue; id = btf__add_datasec(btf, sec->sec_name, sec->sec_sz); if (id < 0) { pr_warn("failed to add consolidated BTF type for datasec '%s': %d\n", sec->sec_name, id); return id; } for (j = 0; j < sec->sec_var_cnt; j++) { struct btf_var_secinfo *vi = &sec->sec_vars[j]; if (btf__add_datasec_var_info(btf, vi->type, vi->offset, vi->size)) return -EINVAL; } } err = finalize_btf_ext(linker); if (err) { pr_warn(".BTF.ext generation failed: %d\n", err); return err; } opts.btf_ext = linker->btf_ext; err = btf__dedup(linker->btf, &opts); if (err) { pr_warn("BTF dedup failed: %d\n", err); return err; } /* Set .BTF and .BTF.ext output byte order */ link_endianness = linker->elf_hdr->e_ident[EI_DATA] == ELFDATA2MSB ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; btf__set_endianness(linker->btf, link_endianness); if (linker->btf_ext) btf_ext__set_endianness(linker->btf_ext, link_endianness); /* Emit .BTF section */ raw_data = btf__raw_data(linker->btf, &raw_sz); if (!raw_data) return -ENOMEM; err = emit_elf_data_sec(linker, BTF_ELF_SEC, 8, raw_data, raw_sz); if (err) { pr_warn("failed to write out .BTF ELF section: %d\n", err); return err; } /* Emit .BTF.ext section */ if (linker->btf_ext) { raw_data = btf_ext__raw_data(linker->btf_ext, &raw_sz); if (!raw_data) return -ENOMEM; err = emit_elf_data_sec(linker, BTF_EXT_ELF_SEC, 8, raw_data, raw_sz); if (err) { pr_warn("failed to write out .BTF.ext ELF section: %d\n", err); return err; } } return 0; } static int emit_btf_ext_data(struct bpf_linker *linker, void *output, const char *sec_name, struct btf_ext_sec_data *sec_data) { struct btf_ext_info_sec *sec_info; void *cur = output; int str_off; size_t sz; if (!sec_data->rec_cnt) return 0; str_off = btf__add_str(linker->btf, sec_name); if (str_off < 0) return -ENOMEM; sec_info = cur; sec_info->sec_name_off = str_off; sec_info->num_info = sec_data->rec_cnt; cur += sizeof(struct btf_ext_info_sec); sz = sec_data->rec_cnt * sec_data->rec_sz; memcpy(cur, sec_data->recs, sz); cur += sz; return cur - output; } static int finalize_btf_ext(struct bpf_linker *linker) { size_t funcs_sz = 0, lines_sz = 0, core_relos_sz = 0, total_sz = 0; size_t func_rec_sz = 0, line_rec_sz = 0, core_relo_rec_sz = 0; struct btf_ext_header *hdr; void *data, *cur; int i, err, sz; /* validate that all sections have the same .BTF.ext record sizes * and calculate total data size for each type of data (func info, * line info, core relos) */ for (i = 1; i < linker->sec_cnt; i++) { struct dst_sec *sec = &linker->secs[i]; if (sec->func_info.rec_cnt) { if (func_rec_sz == 0) func_rec_sz = sec->func_info.rec_sz; if (func_rec_sz != sec->func_info.rec_sz) { pr_warn("mismatch in func_info record size %zu != %u\n", func_rec_sz, sec->func_info.rec_sz); return -EINVAL; } funcs_sz += sizeof(struct btf_ext_info_sec) + func_rec_sz * sec->func_info.rec_cnt; } if (sec->line_info.rec_cnt) { if (line_rec_sz == 0) line_rec_sz = sec->line_info.rec_sz; if (line_rec_sz != sec->line_info.rec_sz) { pr_warn("mismatch in line_info record size %zu != %u\n", line_rec_sz, sec->line_info.rec_sz); return -EINVAL; } lines_sz += sizeof(struct btf_ext_info_sec) + line_rec_sz * sec->line_info.rec_cnt; } if (sec->core_relo_info.rec_cnt) { if (core_relo_rec_sz == 0) core_relo_rec_sz = sec->core_relo_info.rec_sz; if (core_relo_rec_sz != sec->core_relo_info.rec_sz) { pr_warn("mismatch in core_relo_info record size %zu != %u\n", core_relo_rec_sz, sec->core_relo_info.rec_sz); return -EINVAL; } core_relos_sz += sizeof(struct btf_ext_info_sec) + core_relo_rec_sz * sec->core_relo_info.rec_cnt; } } if (!funcs_sz && !lines_sz && !core_relos_sz) return 0; total_sz += sizeof(struct btf_ext_header); if (funcs_sz) { funcs_sz += sizeof(__u32); /* record size prefix */ total_sz += funcs_sz; } if (lines_sz) { lines_sz += sizeof(__u32); /* record size prefix */ total_sz += lines_sz; } if (core_relos_sz) { core_relos_sz += sizeof(__u32); /* record size prefix */ total_sz += core_relos_sz; } cur = data = calloc(1, total_sz); if (!data) return -ENOMEM; hdr = cur; hdr->magic = BTF_MAGIC; hdr->version = BTF_VERSION; hdr->flags = 0; hdr->hdr_len = sizeof(struct btf_ext_header); cur += sizeof(struct btf_ext_header); /* All offsets are in bytes relative to the end of this header */ hdr->func_info_off = 0; hdr->func_info_len = funcs_sz; hdr->line_info_off = funcs_sz; hdr->line_info_len = lines_sz; hdr->core_relo_off = funcs_sz + lines_sz; hdr->core_relo_len = core_relos_sz; if (funcs_sz) { *(__u32 *)cur = func_rec_sz; cur += sizeof(__u32); for (i = 1; i < linker->sec_cnt; i++) { struct dst_sec *sec = &linker->secs[i]; sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->func_info); if (sz < 0) { err = sz; goto out; } cur += sz; } } if (lines_sz) { *(__u32 *)cur = line_rec_sz; cur += sizeof(__u32); for (i = 1; i < linker->sec_cnt; i++) { struct dst_sec *sec = &linker->secs[i]; sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->line_info); if (sz < 0) { err = sz; goto out; } cur += sz; } } if (core_relos_sz) { *(__u32 *)cur = core_relo_rec_sz; cur += sizeof(__u32); for (i = 1; i < linker->sec_cnt; i++) { struct dst_sec *sec = &linker->secs[i]; sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->core_relo_info); if (sz < 0) { err = sz; goto out; } cur += sz; } } linker->btf_ext = btf_ext__new(data, total_sz); err = libbpf_get_error(linker->btf_ext); if (err) { linker->btf_ext = NULL; pr_warn("failed to parse final .BTF.ext data: %d\n", err); goto out; } out: free(data); return err; } xdp-tools-1.5.4/lib/libbpf/src/features.c0000644000175100001660000004223714706536574017623 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ #include #include #include "bpf.h" #include "libbpf.h" #include "libbpf_common.h" #include "libbpf_internal.h" #include "str_error.h" static inline __u64 ptr_to_u64(const void *ptr) { return (__u64)(unsigned long)ptr; } int probe_fd(int fd) { if (fd >= 0) close(fd); return fd >= 0; } static int probe_kern_prog_name(int token_fd) { const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; union bpf_attr attr; int ret; memset(&attr, 0, attr_sz); attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; attr.license = ptr_to_u64("GPL"); attr.insns = ptr_to_u64(insns); attr.insn_cnt = (__u32)ARRAY_SIZE(insns); attr.prog_token_fd = token_fd; if (token_fd) attr.prog_flags |= BPF_F_TOKEN_FD; libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name)); /* make sure loading with name works */ ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS); return probe_fd(ret); } static int probe_kern_global_data(int token_fd) { char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; LIBBPF_OPTS(bpf_map_create_opts, map_opts, .token_fd = token_fd, .map_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); LIBBPF_OPTS(bpf_prog_load_opts, prog_opts, .token_fd = token_fd, .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); int ret, map, insn_cnt = ARRAY_SIZE(insns); map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts); if (map < 0) { ret = -errno; cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", __func__, cp, -ret); return ret; } insns[0].imm = map; ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts); close(map); return probe_fd(ret); } static int probe_kern_btf(int token_fd) { static const char strs[] = "\0int"; __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd)); } static int probe_kern_btf_func(int token_fd) { static const char strs[] = "\0int\0x\0a"; /* void x(int a) {} */ __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ /* FUNC_PROTO */ /* [2] */ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), BTF_PARAM_ENC(7, 1), /* FUNC x */ /* [3] */ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd)); } static int probe_kern_btf_func_global(int token_fd) { static const char strs[] = "\0int\0x\0a"; /* static void x(int a) {} */ __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ /* FUNC_PROTO */ /* [2] */ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), BTF_PARAM_ENC(7, 1), /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2), }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd)); } static int probe_kern_btf_datasec(int token_fd) { static const char strs[] = "\0x\0.data"; /* static int a; */ __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ /* VAR x */ /* [2] */ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), BTF_VAR_STATIC, /* DATASEC val */ /* [3] */ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), BTF_VAR_SECINFO_ENC(2, 0, 4), }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd)); } static int probe_kern_btf_qmark_datasec(int token_fd) { static const char strs[] = "\0x\0?.data"; /* static int a; */ __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ /* VAR x */ /* [2] */ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), BTF_VAR_STATIC, /* DATASEC ?.data */ /* [3] */ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), BTF_VAR_SECINFO_ENC(2, 0, 4), }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd)); } static int probe_kern_btf_float(int token_fd) { static const char strs[] = "\0float"; __u32 types[] = { /* float */ BTF_TYPE_FLOAT_ENC(1, 4), }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd)); } static int probe_kern_btf_decl_tag(int token_fd) { static const char strs[] = "\0tag"; __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ /* VAR x */ /* [2] */ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), BTF_VAR_STATIC, /* attr */ BTF_TYPE_DECL_TAG_ENC(1, 2, -1), }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd)); } static int probe_kern_btf_type_tag(int token_fd) { static const char strs[] = "\0tag"; __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ /* attr */ BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */ /* ptr */ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */ }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd)); } static int probe_kern_array_mmap(int token_fd) { LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0), .token_fd = token_fd, ); int fd; fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts); return probe_fd(fd); } static int probe_kern_exp_attach_type(int token_fd) { LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, .token_fd = token_fd, .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; int fd, insn_cnt = ARRAY_SIZE(insns); /* use any valid combination of program type and (optional) * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) * to see if kernel supports expected_attach_type field for * BPF_PROG_LOAD command */ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts); return probe_fd(fd); } static int probe_kern_probe_read_kernel(int token_fd) { LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd, .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); struct bpf_insn insns[] = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */ BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel), BPF_EXIT_INSN(), }; int fd, insn_cnt = ARRAY_SIZE(insns); fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); return probe_fd(fd); } static int probe_prog_bind_map(int token_fd) { char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; LIBBPF_OPTS(bpf_map_create_opts, map_opts, .token_fd = token_fd, .map_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); LIBBPF_OPTS(bpf_prog_load_opts, prog_opts, .token_fd = token_fd, .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); int ret, map, prog, insn_cnt = ARRAY_SIZE(insns); map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts); if (map < 0) { ret = -errno; cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", __func__, cp, -ret); return ret; } prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts); if (prog < 0) { close(map); return 0; } ret = bpf_prog_bind_map(prog, map, NULL); close(map); close(prog); return ret >= 0; } static int probe_module_btf(int token_fd) { static const char strs[] = "\0int"; __u32 types[] = { /* int */ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), }; struct bpf_btf_info info; __u32 len = sizeof(info); char name[16]; int fd, err; fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd); if (fd < 0) return 0; /* BTF not supported at all */ memset(&info, 0, sizeof(info)); info.name = ptr_to_u64(name); info.name_len = sizeof(name); /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer; * kernel's module BTF support coincides with support for * name/name_len fields in struct bpf_btf_info. */ err = bpf_btf_get_info_by_fd(fd, &info, &len); close(fd); return !err; } static int probe_perf_link(int token_fd) { struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd, .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); int prog_fd, link_fd, err; prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, ARRAY_SIZE(insns), &opts); if (prog_fd < 0) return -errno; /* use invalid perf_event FD to get EBADF, if link is supported; * otherwise EINVAL should be returned */ link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL); err = -errno; /* close() can clobber errno */ if (link_fd >= 0) close(link_fd); close(prog_fd); return link_fd < 0 && err == -EBADF; } static int probe_uprobe_multi_link(int token_fd) { LIBBPF_OPTS(bpf_prog_load_opts, load_opts, .expected_attach_type = BPF_TRACE_UPROBE_MULTI, .token_fd = token_fd, .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); LIBBPF_OPTS(bpf_link_create_opts, link_opts); struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; int prog_fd, link_fd, err; unsigned long offset = 0; prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, ARRAY_SIZE(insns), &load_opts); if (prog_fd < 0) return -errno; /* Creating uprobe in '/' binary should fail with -EBADF. */ link_opts.uprobe_multi.path = "/"; link_opts.uprobe_multi.offsets = &offset; link_opts.uprobe_multi.cnt = 1; link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts); err = -errno; /* close() can clobber errno */ if (link_fd >= 0 || err != -EBADF) { if (link_fd >= 0) close(link_fd); close(prog_fd); return 0; } /* Initial multi-uprobe support in kernel didn't handle PID filtering * correctly (it was doing thread filtering, not process filtering). * So now we'll detect if PID filtering logic was fixed, and, if not, * we'll pretend multi-uprobes are not supported, if not. * Multi-uprobes are used in USDT attachment logic, and we need to be * conservative here, because multi-uprobe selection happens early at * load time, while the use of PID filtering is known late at * attachment time, at which point it's too late to undo multi-uprobe * selection. * * Creating uprobe with pid == -1 for (invalid) '/' binary will fail * early with -EINVAL on kernels with fixed PID filtering logic; * otherwise -ESRCH would be returned if passed correct binary path * (but we'll just get -BADF, of course). */ link_opts.uprobe_multi.pid = -1; /* invalid PID */ link_opts.uprobe_multi.path = "/"; /* invalid path */ link_opts.uprobe_multi.offsets = &offset; link_opts.uprobe_multi.cnt = 1; link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts); err = -errno; /* close() can clobber errno */ if (link_fd >= 0) close(link_fd); close(prog_fd); return link_fd < 0 && err == -EINVAL; } static int probe_kern_bpf_cookie(int token_fd) { struct bpf_insn insns[] = { BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie), BPF_EXIT_INSN(), }; LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd, .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); int ret, insn_cnt = ARRAY_SIZE(insns); ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); return probe_fd(ret); } static int probe_kern_btf_enum64(int token_fd) { static const char strs[] = "\0enum64"; __u32 types[] = { BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8), }; return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd)); } static int probe_kern_arg_ctx_tag(int token_fd) { static const char strs[] = "\0a\0b\0arg:ctx\0"; const __u32 types[] = { /* [1] INT */ BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4), /* [2] PTR -> VOID */ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), /* [3] FUNC_PROTO `int(void *a)` */ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1), BTF_PARAM_ENC(1 /* "a" */, 2), /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */ BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3), /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1), BTF_PARAM_ENC(3 /* "b" */, 2), /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */ BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5), /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */ BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0), }; const struct bpf_insn insns[] = { /* main prog */ BPF_CALL_REL(+1), BPF_EXIT_INSN(), /* global subprog */ BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */ BPF_EXIT_INSN(), }; const struct bpf_func_info_min func_infos[] = { { 0, 4 }, /* main prog -> FUNC 'a' */ { 2, 6 }, /* subprog -> FUNC 'b' */ }; LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd, .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0, ); int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns); btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd); if (btf_fd < 0) return 0; opts.prog_btf_fd = btf_fd; opts.func_info = &func_infos; opts.func_info_cnt = ARRAY_SIZE(func_infos); opts.func_info_rec_size = sizeof(func_infos[0]); prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx", "GPL", insns, insn_cnt, &opts); close(btf_fd); return probe_fd(prog_fd); } typedef int (*feature_probe_fn)(int /* token_fd */); static struct kern_feature_cache feature_cache; static struct kern_feature_desc { const char *desc; feature_probe_fn probe; } feature_probes[__FEAT_CNT] = { [FEAT_PROG_NAME] = { "BPF program name", probe_kern_prog_name, }, [FEAT_GLOBAL_DATA] = { "global variables", probe_kern_global_data, }, [FEAT_BTF] = { "minimal BTF", probe_kern_btf, }, [FEAT_BTF_FUNC] = { "BTF functions", probe_kern_btf_func, }, [FEAT_BTF_GLOBAL_FUNC] = { "BTF global function", probe_kern_btf_func_global, }, [FEAT_BTF_DATASEC] = { "BTF data section and variable", probe_kern_btf_datasec, }, [FEAT_ARRAY_MMAP] = { "ARRAY map mmap()", probe_kern_array_mmap, }, [FEAT_EXP_ATTACH_TYPE] = { "BPF_PROG_LOAD expected_attach_type attribute", probe_kern_exp_attach_type, }, [FEAT_PROBE_READ_KERN] = { "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel, }, [FEAT_PROG_BIND_MAP] = { "BPF_PROG_BIND_MAP support", probe_prog_bind_map, }, [FEAT_MODULE_BTF] = { "module BTF support", probe_module_btf, }, [FEAT_BTF_FLOAT] = { "BTF_KIND_FLOAT support", probe_kern_btf_float, }, [FEAT_PERF_LINK] = { "BPF perf link support", probe_perf_link, }, [FEAT_BTF_DECL_TAG] = { "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag, }, [FEAT_BTF_TYPE_TAG] = { "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag, }, [FEAT_MEMCG_ACCOUNT] = { "memcg-based memory accounting", probe_memcg_account, }, [FEAT_BPF_COOKIE] = { "BPF cookie support", probe_kern_bpf_cookie, }, [FEAT_BTF_ENUM64] = { "BTF_KIND_ENUM64 support", probe_kern_btf_enum64, }, [FEAT_SYSCALL_WRAPPER] = { "Kernel using syscall wrapper", probe_kern_syscall_wrapper, }, [FEAT_UPROBE_MULTI_LINK] = { "BPF multi-uprobe link support", probe_uprobe_multi_link, }, [FEAT_ARG_CTX_TAG] = { "kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag, }, [FEAT_BTF_QMARK_DATASEC] = { "BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec, }, }; bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id) { struct kern_feature_desc *feat = &feature_probes[feat_id]; int ret; /* assume global feature cache, unless custom one is provided */ if (!cache) cache = &feature_cache; if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) { ret = feat->probe(cache->token_fd); if (ret > 0) { WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED); } else if (ret == 0) { WRITE_ONCE(cache->res[feat_id], FEAT_MISSING); } else { pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); WRITE_ONCE(cache->res[feat_id], FEAT_MISSING); } } return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED; } xdp-tools-1.5.4/lib/libbpf/src/strset.c0000644000175100001660000001043614706536574017325 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2021 Facebook */ #include #include #include #include #include #include "hashmap.h" #include "libbpf_internal.h" #include "strset.h" struct strset { void *strs_data; size_t strs_data_len; size_t strs_data_cap; size_t strs_data_max_len; /* lookup index for each unique string in strings set */ struct hashmap *strs_hash; }; static size_t strset_hash_fn(long key, void *ctx) { const struct strset *s = ctx; const char *str = s->strs_data + key; return str_hash(str); } static bool strset_equal_fn(long key1, long key2, void *ctx) { const struct strset *s = ctx; const char *str1 = s->strs_data + key1; const char *str2 = s->strs_data + key2; return strcmp(str1, str2) == 0; } struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz) { struct strset *set = calloc(1, sizeof(*set)); struct hashmap *hash; int err = -ENOMEM; if (!set) return ERR_PTR(-ENOMEM); hash = hashmap__new(strset_hash_fn, strset_equal_fn, set); if (IS_ERR(hash)) goto err_out; set->strs_data_max_len = max_data_sz; set->strs_hash = hash; if (init_data) { long off; set->strs_data = malloc(init_data_sz); if (!set->strs_data) goto err_out; memcpy(set->strs_data, init_data, init_data_sz); set->strs_data_len = init_data_sz; set->strs_data_cap = init_data_sz; for (off = 0; off < set->strs_data_len; off += strlen(set->strs_data + off) + 1) { /* hashmap__add() returns EEXIST if string with the same * content already is in the hash map */ err = hashmap__add(hash, off, off); if (err == -EEXIST) continue; /* duplicate */ if (err) goto err_out; } } return set; err_out: strset__free(set); return ERR_PTR(err); } void strset__free(struct strset *set) { if (IS_ERR_OR_NULL(set)) return; hashmap__free(set->strs_hash); free(set->strs_data); free(set); } size_t strset__data_size(const struct strset *set) { return set->strs_data_len; } const char *strset__data(const struct strset *set) { return set->strs_data; } static void *strset_add_str_mem(struct strset *set, size_t add_sz) { return libbpf_add_mem(&set->strs_data, &set->strs_data_cap, 1, set->strs_data_len, set->strs_data_max_len, add_sz); } /* Find string offset that corresponds to a given string *s*. * Returns: * - >0 offset into string data, if string is found; * - -ENOENT, if string is not in the string data; * - <0, on any other error. */ int strset__find_str(struct strset *set, const char *s) { long old_off, new_off, len; void *p; /* see strset__add_str() for why we do this */ len = strlen(s) + 1; p = strset_add_str_mem(set, len); if (!p) return -ENOMEM; new_off = set->strs_data_len; memcpy(p, s, len); if (hashmap__find(set->strs_hash, new_off, &old_off)) return old_off; return -ENOENT; } /* Add a string s to the string data. If the string already exists, return its * offset within string data. * Returns: * - > 0 offset into string data, on success; * - < 0, on error. */ int strset__add_str(struct strset *set, const char *s) { long old_off, new_off, len; void *p; int err; /* Hashmap keys are always offsets within set->strs_data, so to even * look up some string from the "outside", we need to first append it * at the end, so that it can be addressed with an offset. Luckily, * until set->strs_data_len is incremented, that string is just a piece * of garbage for the rest of the code, so no harm, no foul. On the * other hand, if the string is unique, it's already appended and * ready to be used, only a simple set->strs_data_len increment away. */ len = strlen(s) + 1; p = strset_add_str_mem(set, len); if (!p) return -ENOMEM; new_off = set->strs_data_len; memcpy(p, s, len); /* Now attempt to add the string, but only if the string with the same * contents doesn't exist already (HASHMAP_ADD strategy). If such * string exists, we'll get its offset in old_off (that's old_key). */ err = hashmap__insert(set->strs_hash, new_off, new_off, HASHMAP_ADD, &old_off, NULL); if (err == -EEXIST) return old_off; /* duplicated string, return existing offset */ if (err) return err; set->strs_data_len += len; /* new unique string, adjust data length */ return new_off; } xdp-tools-1.5.4/lib/libbpf/src/.gitignore0000644000175100001660000000006714706536574017624 0ustar runnerdocker*.o *.a /libbpf.pc /libbpf.so* /staticobjs /sharedobjs xdp-tools-1.5.4/lib/libbpf/src/libbpf_errno.c0000644000175100001660000000440314706536574020441 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. * Copyright (C) 2017 Nicira, Inc. */ #undef _GNU_SOURCE #include #include #include "libbpf.h" #include "libbpf_internal.h" /* make sure libbpf doesn't use kernel-only integer typedefs */ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) static const char *libbpf_strerror_table[NR_ERRNO] = { [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch", [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", [ERRCODE_OFFSET(RELOC)] = "Relocation failed", [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type", [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message", [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence", [ERRCODE_OFFSET(NLPARSE)] = "Incorrect netlink message parsing", }; int libbpf_strerror(int err, char *buf, size_t size) { int ret; if (!buf || !size) return libbpf_err(-EINVAL); err = err > 0 ? err : -err; if (err < __LIBBPF_ERRNO__START) { ret = strerror_r(err, buf, size); buf[size - 1] = '\0'; return libbpf_err_errno(ret); } if (err < __LIBBPF_ERRNO__END) { const char *msg; msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; ret = snprintf(buf, size, "%s", msg); buf[size - 1] = '\0'; /* The length of the buf and msg is positive. * A negative number may be returned only when the * size exceeds INT_MAX. Not likely to appear. */ if (ret >= size) return libbpf_err(-ERANGE); return 0; } ret = snprintf(buf, size, "Unknown libbpf error %d", err); buf[size - 1] = '\0'; if (ret >= size) return libbpf_err(-ERANGE); return libbpf_err(-ENOENT); } xdp-tools-1.5.4/lib/libbpf/src/relo_core.h0000644000175100001660000000640714706536574017762 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2019 Facebook */ #ifndef __RELO_CORE_H #define __RELO_CORE_H #include struct bpf_core_cand { const struct btf *btf; __u32 id; }; /* dynamically sized list of type IDs and its associated struct btf */ struct bpf_core_cand_list { struct bpf_core_cand *cands; int len; }; #define BPF_CORE_SPEC_MAX_LEN 64 /* represents BPF CO-RE field or array element accessor */ struct bpf_core_accessor { __u32 type_id; /* struct/union type or array element type */ __u32 idx; /* field index or array index */ const char *name; /* field name or NULL for array accessor */ }; struct bpf_core_spec { const struct btf *btf; /* high-level spec: named fields and array indices only */ struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN]; /* original unresolved (no skip_mods_or_typedefs) root type ID */ __u32 root_type_id; /* CO-RE relocation kind */ enum bpf_core_relo_kind relo_kind; /* high-level spec length */ int len; /* raw, low-level spec: 1-to-1 with accessor spec string */ int raw_spec[BPF_CORE_SPEC_MAX_LEN]; /* raw spec length */ int raw_len; /* field bit offset represented by spec */ __u32 bit_offset; }; struct bpf_core_relo_res { /* expected value in the instruction, unless validate == false */ __u64 orig_val; /* new value that needs to be patched up to */ __u64 new_val; /* relocation unsuccessful, poison instruction, but don't fail load */ bool poison; /* some relocations can't be validated against orig_val */ bool validate; /* for field byte offset relocations or the forms: * *(T *)(rX + ) = rY * rX = *(T *)(rY + ), * we remember original and resolved field size to adjust direct * memory loads of pointers and integers; this is necessary for 32-bit * host kernel architectures, but also allows to automatically * relocate fields that were resized from, e.g., u32 to u64, etc. */ bool fail_memsz_adjust; __u32 orig_sz; __u32 orig_type_id; __u32 new_sz; __u32 new_type_id; }; int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id, int level); int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id); int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id, bool behind_ptr, int level); int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id); size_t bpf_core_essential_name_len(const char *name); int bpf_core_calc_relo_insn(const char *prog_name, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, struct bpf_core_cand_list *cands, struct bpf_core_spec *specs_scratch, struct bpf_core_relo_res *targ_res); int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, int insn_idx, const struct bpf_core_relo *relo, int relo_idx, const struct bpf_core_relo_res *res); int bpf_core_parse_spec(const char *prog_name, const struct btf *btf, const struct bpf_core_relo *relo, struct bpf_core_spec *spec); int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec); #endif xdp-tools-1.5.4/lib/libbpf/src/bpf_gen_internal.h0000644000175100001660000000430314706536574021276 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2021 Facebook */ #ifndef __BPF_GEN_INTERNAL_H #define __BPF_GEN_INTERNAL_H #include "bpf.h" struct ksym_relo_desc { const char *name; int kind; int insn_idx; bool is_weak; bool is_typeless; bool is_ld64; }; struct ksym_desc { const char *name; int ref; int kind; union { /* used for kfunc */ int off; /* used for typeless ksym */ bool typeless; }; int insn; bool is_ld64; }; struct bpf_gen { struct gen_loader_opts *opts; void *data_start; void *data_cur; void *insn_start; void *insn_cur; bool swapped_endian; ssize_t cleanup_label; __u32 nr_progs; __u32 nr_maps; int log_level; int error; struct ksym_relo_desc *relos; int relo_cnt; struct bpf_core_relo *core_relos; int core_relo_cnt; char attach_target[128]; int attach_kind; struct ksym_desc *ksyms; __u32 nr_ksyms; int fd_array; int nr_fd_array; }; void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps); int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps); void bpf_gen__free(struct bpf_gen *gen); void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size); void bpf_gen__map_create(struct bpf_gen *gen, enum bpf_map_type map_type, const char *map_name, __u32 key_size, __u32 value_size, __u32 max_entries, struct bpf_map_create_opts *map_attr, int map_idx); void bpf_gen__prog_load(struct bpf_gen *gen, enum bpf_prog_type prog_type, const char *prog_name, const char *license, struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *load_attr, int prog_idx); void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size); void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx); void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type); void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, bool is_typeless, bool is_ld64, int kind, int insn_idx); void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo); void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int key, int inner_map_idx); #endif xdp-tools-1.5.4/lib/libbpf/src/relo_core.c0000644000175100001660000014500314706536574017751 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2019 Facebook */ #ifdef __KERNEL__ #include #include #include #include #include "relo_core.h" static const char *btf_kind_str(const struct btf_type *t) { return btf_type_str(t); } static bool is_ldimm64_insn(struct bpf_insn *insn) { return insn->code == (BPF_LD | BPF_IMM | BPF_DW); } static const struct btf_type * skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id) { return btf_type_skip_modifiers(btf, id, res_id); } static const char *btf__name_by_offset(const struct btf *btf, u32 offset) { return btf_name_by_offset(btf, offset); } static s64 btf__resolve_size(const struct btf *btf, u32 type_id) { const struct btf_type *t; int size; t = btf_type_by_id(btf, type_id); t = btf_resolve_size(btf, t, &size); if (IS_ERR(t)) return PTR_ERR(t); return size; } enum libbpf_print_level { LIBBPF_WARN, LIBBPF_INFO, LIBBPF_DEBUG, }; #undef pr_warn #undef pr_info #undef pr_debug #define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) #define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) #define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) #define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__) #else #include #include #include #include #include #include "libbpf.h" #include "bpf.h" #include "btf.h" #include "str_error.h" #include "libbpf_internal.h" #endif static bool is_flex_arr(const struct btf *btf, const struct bpf_core_accessor *acc, const struct btf_array *arr) { const struct btf_type *t; /* not a flexible array, if not inside a struct or has non-zero size */ if (!acc->name || arr->nelems > 0) return false; /* has to be the last member of enclosing struct */ t = btf_type_by_id(btf, acc->type_id); return acc->idx == btf_vlen(t) - 1; } static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) { switch (kind) { case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off"; case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz"; case BPF_CORE_FIELD_EXISTS: return "field_exists"; case BPF_CORE_FIELD_SIGNED: return "signed"; case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64"; case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64"; case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id"; case BPF_CORE_TYPE_ID_TARGET: return "target_type_id"; case BPF_CORE_TYPE_EXISTS: return "type_exists"; case BPF_CORE_TYPE_MATCHES: return "type_matches"; case BPF_CORE_TYPE_SIZE: return "type_size"; case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists"; case BPF_CORE_ENUMVAL_VALUE: return "enumval_value"; default: return "unknown"; } } static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) { switch (kind) { case BPF_CORE_FIELD_BYTE_OFFSET: case BPF_CORE_FIELD_BYTE_SIZE: case BPF_CORE_FIELD_EXISTS: case BPF_CORE_FIELD_SIGNED: case BPF_CORE_FIELD_LSHIFT_U64: case BPF_CORE_FIELD_RSHIFT_U64: return true; default: return false; } } static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) { switch (kind) { case BPF_CORE_TYPE_ID_LOCAL: case BPF_CORE_TYPE_ID_TARGET: case BPF_CORE_TYPE_EXISTS: case BPF_CORE_TYPE_MATCHES: case BPF_CORE_TYPE_SIZE: return true; default: return false; } } static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) { switch (kind) { case BPF_CORE_ENUMVAL_EXISTS: case BPF_CORE_ENUMVAL_VALUE: return true; default: return false; } } int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id, int level) { const struct btf_type *local_type, *targ_type; int depth = 32; /* max recursion depth */ /* caller made sure that names match (ignoring flavor suffix) */ local_type = btf_type_by_id(local_btf, local_id); targ_type = btf_type_by_id(targ_btf, targ_id); if (!btf_kind_core_compat(local_type, targ_type)) return 0; recur: depth--; if (depth < 0) return -EINVAL; local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); if (!local_type || !targ_type) return -EINVAL; if (!btf_kind_core_compat(local_type, targ_type)) return 0; switch (btf_kind(local_type)) { case BTF_KIND_UNKN: case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_ENUM: case BTF_KIND_FWD: case BTF_KIND_ENUM64: return 1; case BTF_KIND_INT: /* just reject deprecated bitfield-like integers; all other * integers are by default compatible between each other */ return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; case BTF_KIND_PTR: local_id = local_type->type; targ_id = targ_type->type; goto recur; case BTF_KIND_ARRAY: local_id = btf_array(local_type)->type; targ_id = btf_array(targ_type)->type; goto recur; case BTF_KIND_FUNC_PROTO: { struct btf_param *local_p = btf_params(local_type); struct btf_param *targ_p = btf_params(targ_type); __u16 local_vlen = btf_vlen(local_type); __u16 targ_vlen = btf_vlen(targ_type); int i, err; if (local_vlen != targ_vlen) return 0; for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { if (level <= 0) return -EINVAL; skip_mods_and_typedefs(local_btf, local_p->type, &local_id); skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); err = __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, level - 1); if (err <= 0) return err; } /* tail recurse for return type check */ skip_mods_and_typedefs(local_btf, local_type->type, &local_id); skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); goto recur; } default: pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n", btf_kind_str(local_type), local_id, targ_id); return 0; } } /* * Turn bpf_core_relo into a low- and high-level spec representation, * validating correctness along the way, as well as calculating resulting * field bit offset, specified by accessor string. Low-level spec captures * every single level of nestedness, including traversing anonymous * struct/union members. High-level one only captures semantically meaningful * "turning points": named fields and array indicies. * E.g., for this case: * * struct sample { * int __unimportant; * struct { * int __1; * int __2; * int a[7]; * }; * }; * * struct sample *s = ...; * * int x = &s->a[3]; // access string = '0:1:2:3' * * Low-level spec has 1:1 mapping with each element of access string (it's * just a parsed access string representation): [0, 1, 2, 3]. * * High-level spec will capture only 3 points: * - initial zero-index access by pointer (&s->... is the same as &s[0]...); * - field 'a' access (corresponds to '2' in low-level spec); * - array element #3 access (corresponds to '3' in low-level spec). * * Type-based relocations (TYPE_EXISTS/TYPE_MATCHES/TYPE_SIZE, * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their * spec and raw_spec are kept empty. * * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access * string to specify enumerator's value index that need to be relocated. */ int bpf_core_parse_spec(const char *prog_name, const struct btf *btf, const struct bpf_core_relo *relo, struct bpf_core_spec *spec) { int access_idx, parsed_len, i; struct bpf_core_accessor *acc; const struct btf_type *t; const char *name, *spec_str; __u32 id, name_off; __s64 sz; spec_str = btf__name_by_offset(btf, relo->access_str_off); if (str_is_empty(spec_str) || *spec_str == ':') return -EINVAL; memset(spec, 0, sizeof(*spec)); spec->btf = btf; spec->root_type_id = relo->type_id; spec->relo_kind = relo->kind; /* type-based relocations don't have a field access string */ if (core_relo_is_type_based(relo->kind)) { if (strcmp(spec_str, "0")) return -EINVAL; return 0; } /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */ while (*spec_str) { if (*spec_str == ':') ++spec_str; if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1) return -EINVAL; if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) return -E2BIG; spec_str += parsed_len; spec->raw_spec[spec->raw_len++] = access_idx; } if (spec->raw_len == 0) return -EINVAL; t = skip_mods_and_typedefs(btf, relo->type_id, &id); if (!t) return -EINVAL; access_idx = spec->raw_spec[0]; acc = &spec->spec[0]; acc->type_id = id; acc->idx = access_idx; spec->len++; if (core_relo_is_enumval_based(relo->kind)) { if (!btf_is_any_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) return -EINVAL; /* record enumerator name in a first accessor */ name_off = btf_is_enum(t) ? btf_enum(t)[access_idx].name_off : btf_enum64(t)[access_idx].name_off; acc->name = btf__name_by_offset(btf, name_off); return 0; } if (!core_relo_is_field_based(relo->kind)) return -EINVAL; sz = btf__resolve_size(btf, id); if (sz < 0) return sz; spec->bit_offset = access_idx * sz * 8; for (i = 1; i < spec->raw_len; i++) { t = skip_mods_and_typedefs(btf, id, &id); if (!t) return -EINVAL; access_idx = spec->raw_spec[i]; acc = &spec->spec[spec->len]; if (btf_is_composite(t)) { const struct btf_member *m; __u32 bit_offset; if (access_idx >= btf_vlen(t)) return -EINVAL; bit_offset = btf_member_bit_offset(t, access_idx); spec->bit_offset += bit_offset; m = btf_members(t) + access_idx; if (m->name_off) { name = btf__name_by_offset(btf, m->name_off); if (str_is_empty(name)) return -EINVAL; acc->type_id = id; acc->idx = access_idx; acc->name = name; spec->len++; } id = m->type; } else if (btf_is_array(t)) { const struct btf_array *a = btf_array(t); bool flex; t = skip_mods_and_typedefs(btf, a->type, &id); if (!t) return -EINVAL; flex = is_flex_arr(btf, acc - 1, a); if (!flex && access_idx >= a->nelems) return -EINVAL; spec->spec[spec->len].type_id = id; spec->spec[spec->len].idx = access_idx; spec->len++; sz = btf__resolve_size(btf, id); if (sz < 0) return sz; spec->bit_offset += access_idx * sz * 8; } else { pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t)); return -EINVAL; } } return 0; } /* Check two types for compatibility for the purpose of field access * relocation. const/volatile/restrict and typedefs are skipped to ensure we * are relocating semantically compatible entities: * - any two STRUCTs/UNIONs are compatible and can be mixed; * - any two FWDs are compatible, if their names match (modulo flavor suffix); * - any two PTRs are always compatible; * - for ENUMs, names should be the same (ignoring flavor suffix) or at * least one of enums should be anonymous; * - for ENUMs, check sizes, names are ignored; * - for INT, size and signedness are ignored; * - any two FLOATs are always compatible; * - for ARRAY, dimensionality is ignored, element types are checked for * compatibility recursively; * - everything else shouldn't be ever a target of relocation. * These rules are not set in stone and probably will be adjusted as we get * more experience with using BPF CO-RE relocations. */ static int bpf_core_fields_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id) { const struct btf_type *local_type, *targ_type; recur: local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); if (!local_type || !targ_type) return -EINVAL; if (btf_is_composite(local_type) && btf_is_composite(targ_type)) return 1; if (!btf_kind_core_compat(local_type, targ_type)) return 0; switch (btf_kind(local_type)) { case BTF_KIND_PTR: case BTF_KIND_FLOAT: return 1; case BTF_KIND_FWD: case BTF_KIND_ENUM64: case BTF_KIND_ENUM: { const char *local_name, *targ_name; size_t local_len, targ_len; local_name = btf__name_by_offset(local_btf, local_type->name_off); targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); local_len = bpf_core_essential_name_len(local_name); targ_len = bpf_core_essential_name_len(targ_name); /* one of them is anonymous or both w/ same flavor-less names */ return local_len == 0 || targ_len == 0 || (local_len == targ_len && strncmp(local_name, targ_name, local_len) == 0); } case BTF_KIND_INT: /* just reject deprecated bitfield-like integers; all other * integers are by default compatible between each other */ return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; case BTF_KIND_ARRAY: local_id = btf_array(local_type)->type; targ_id = btf_array(targ_type)->type; goto recur; default: return 0; } } /* * Given single high-level named field accessor in local type, find * corresponding high-level accessor for a target type. Along the way, * maintain low-level spec for target as well. Also keep updating target * bit offset. * * Searching is performed through recursive exhaustive enumeration of all * fields of a struct/union. If there are any anonymous (embedded) * structs/unions, they are recursively searched as well. If field with * desired name is found, check compatibility between local and target types, * before returning result. * * 1 is returned, if field is found. * 0 is returned if no compatible field is found. * <0 is returned on error. */ static int bpf_core_match_member(const struct btf *local_btf, const struct bpf_core_accessor *local_acc, const struct btf *targ_btf, __u32 targ_id, struct bpf_core_spec *spec, __u32 *next_targ_id) { const struct btf_type *local_type, *targ_type; const struct btf_member *local_member, *m; const char *local_name, *targ_name; __u32 local_id; int i, n, found; targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); if (!targ_type) return -EINVAL; if (!btf_is_composite(targ_type)) return 0; local_id = local_acc->type_id; local_type = btf_type_by_id(local_btf, local_id); local_member = btf_members(local_type) + local_acc->idx; local_name = btf__name_by_offset(local_btf, local_member->name_off); n = btf_vlen(targ_type); m = btf_members(targ_type); for (i = 0; i < n; i++, m++) { __u32 bit_offset; bit_offset = btf_member_bit_offset(targ_type, i); /* too deep struct/union/array nesting */ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) return -E2BIG; /* speculate this member will be the good one */ spec->bit_offset += bit_offset; spec->raw_spec[spec->raw_len++] = i; targ_name = btf__name_by_offset(targ_btf, m->name_off); if (str_is_empty(targ_name)) { /* embedded struct/union, we need to go deeper */ found = bpf_core_match_member(local_btf, local_acc, targ_btf, m->type, spec, next_targ_id); if (found) /* either found or error */ return found; } else if (strcmp(local_name, targ_name) == 0) { /* matching named field */ struct bpf_core_accessor *targ_acc; targ_acc = &spec->spec[spec->len++]; targ_acc->type_id = targ_id; targ_acc->idx = i; targ_acc->name = targ_name; *next_targ_id = m->type; found = bpf_core_fields_are_compat(local_btf, local_member->type, targ_btf, m->type); if (!found) spec->len--; /* pop accessor */ return found; } /* member turned out not to be what we looked for */ spec->bit_offset -= bit_offset; spec->raw_len--; } return 0; } /* * Try to match local spec to a target type and, if successful, produce full * target spec (high-level, low-level + bit offset). */ static int bpf_core_spec_match(struct bpf_core_spec *local_spec, const struct btf *targ_btf, __u32 targ_id, struct bpf_core_spec *targ_spec) { const struct btf_type *targ_type; const struct bpf_core_accessor *local_acc; struct bpf_core_accessor *targ_acc; int i, sz, matched; __u32 name_off; memset(targ_spec, 0, sizeof(*targ_spec)); targ_spec->btf = targ_btf; targ_spec->root_type_id = targ_id; targ_spec->relo_kind = local_spec->relo_kind; if (core_relo_is_type_based(local_spec->relo_kind)) { if (local_spec->relo_kind == BPF_CORE_TYPE_MATCHES) return bpf_core_types_match(local_spec->btf, local_spec->root_type_id, targ_btf, targ_id); else return bpf_core_types_are_compat(local_spec->btf, local_spec->root_type_id, targ_btf, targ_id); } local_acc = &local_spec->spec[0]; targ_acc = &targ_spec->spec[0]; if (core_relo_is_enumval_based(local_spec->relo_kind)) { size_t local_essent_len, targ_essent_len; const char *targ_name; /* has to resolve to an enum */ targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); if (!btf_is_any_enum(targ_type)) return 0; local_essent_len = bpf_core_essential_name_len(local_acc->name); for (i = 0; i < btf_vlen(targ_type); i++) { if (btf_is_enum(targ_type)) name_off = btf_enum(targ_type)[i].name_off; else name_off = btf_enum64(targ_type)[i].name_off; targ_name = btf__name_by_offset(targ_spec->btf, name_off); targ_essent_len = bpf_core_essential_name_len(targ_name); if (targ_essent_len != local_essent_len) continue; if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { targ_acc->type_id = targ_id; targ_acc->idx = i; targ_acc->name = targ_name; targ_spec->len++; targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; targ_spec->raw_len++; return 1; } } return 0; } if (!core_relo_is_field_based(local_spec->relo_kind)) return -EINVAL; for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); if (!targ_type) return -EINVAL; if (local_acc->name) { matched = bpf_core_match_member(local_spec->btf, local_acc, targ_btf, targ_id, targ_spec, &targ_id); if (matched <= 0) return matched; } else { /* for i=0, targ_id is already treated as array element * type (because it's the original struct), for others * we should find array element type first */ if (i > 0) { const struct btf_array *a; bool flex; if (!btf_is_array(targ_type)) return 0; a = btf_array(targ_type); flex = is_flex_arr(targ_btf, targ_acc - 1, a); if (!flex && local_acc->idx >= a->nelems) return 0; if (!skip_mods_and_typedefs(targ_btf, a->type, &targ_id)) return -EINVAL; } /* too deep struct/union/array nesting */ if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN) return -E2BIG; targ_acc->type_id = targ_id; targ_acc->idx = local_acc->idx; targ_acc->name = NULL; targ_spec->len++; targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; targ_spec->raw_len++; sz = btf__resolve_size(targ_btf, targ_id); if (sz < 0) return sz; targ_spec->bit_offset += local_acc->idx * sz * 8; } } return 1; } static int bpf_core_calc_field_relo(const char *prog_name, const struct bpf_core_relo *relo, const struct bpf_core_spec *spec, __u64 *val, __u32 *field_sz, __u32 *type_id, bool *validate) { const struct bpf_core_accessor *acc; const struct btf_type *t; __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id; const struct btf_member *m; const struct btf_type *mt; bool bitfield; __s64 sz; *field_sz = 0; if (relo->kind == BPF_CORE_FIELD_EXISTS) { *val = spec ? 1 : 0; return 0; } if (!spec) return -EUCLEAN; /* request instruction poisoning */ acc = &spec->spec[spec->len - 1]; t = btf_type_by_id(spec->btf, acc->type_id); /* a[n] accessor needs special handling */ if (!acc->name) { if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) { *val = spec->bit_offset / 8; /* remember field size for load/store mem size */ sz = btf__resolve_size(spec->btf, acc->type_id); if (sz < 0) return -EINVAL; *field_sz = sz; *type_id = acc->type_id; } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) { sz = btf__resolve_size(spec->btf, acc->type_id); if (sz < 0) return -EINVAL; *val = sz; } else { pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n", prog_name, relo->kind, relo->insn_off / 8); return -EINVAL; } if (validate) *validate = true; return 0; } m = btf_members(t) + acc->idx; mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id); bit_off = spec->bit_offset; bit_sz = btf_member_bitfield_size(t, acc->idx); bitfield = bit_sz > 0; if (bitfield) { byte_sz = mt->size; byte_off = bit_off / 8 / byte_sz * byte_sz; /* figure out smallest int size necessary for bitfield load */ while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { if (byte_sz >= 8) { /* bitfield can't be read with 64-bit read */ pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n", prog_name, relo->kind, relo->insn_off / 8); return -E2BIG; } byte_sz *= 2; byte_off = bit_off / 8 / byte_sz * byte_sz; } } else { sz = btf__resolve_size(spec->btf, field_type_id); if (sz < 0) return -EINVAL; byte_sz = sz; byte_off = spec->bit_offset / 8; bit_sz = byte_sz * 8; } /* for bitfields, all the relocatable aspects are ambiguous and we * might disagree with compiler, so turn off validation of expected * value, except for signedness */ if (validate) *validate = !bitfield; switch (relo->kind) { case BPF_CORE_FIELD_BYTE_OFFSET: *val = byte_off; if (!bitfield) { *field_sz = byte_sz; *type_id = field_type_id; } break; case BPF_CORE_FIELD_BYTE_SIZE: *val = byte_sz; break; case BPF_CORE_FIELD_SIGNED: *val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) || (btf_is_int(mt) && (btf_int_encoding(mt) & BTF_INT_SIGNED)); if (validate) *validate = true; /* signedness is never ambiguous */ break; case BPF_CORE_FIELD_LSHIFT_U64: #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ *val = 64 - (bit_off + bit_sz - byte_off * 8); #else *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); #endif break; case BPF_CORE_FIELD_RSHIFT_U64: *val = 64 - bit_sz; if (validate) *validate = true; /* right shift is never ambiguous */ break; case BPF_CORE_FIELD_EXISTS: default: return -EOPNOTSUPP; } return 0; } static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo, const struct bpf_core_spec *spec, __u64 *val, bool *validate) { __s64 sz; /* by default, always check expected value in bpf_insn */ if (validate) *validate = true; /* type-based relos return zero when target type is not found */ if (!spec) { *val = 0; return 0; } switch (relo->kind) { case BPF_CORE_TYPE_ID_TARGET: *val = spec->root_type_id; /* type ID, embedded in bpf_insn, might change during linking, * so enforcing it is pointless */ if (validate) *validate = false; break; case BPF_CORE_TYPE_EXISTS: case BPF_CORE_TYPE_MATCHES: *val = 1; break; case BPF_CORE_TYPE_SIZE: sz = btf__resolve_size(spec->btf, spec->root_type_id); if (sz < 0) return -EINVAL; *val = sz; break; case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */ default: return -EOPNOTSUPP; } return 0; } static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, const struct bpf_core_spec *spec, __u64 *val) { const struct btf_type *t; switch (relo->kind) { case BPF_CORE_ENUMVAL_EXISTS: *val = spec ? 1 : 0; break; case BPF_CORE_ENUMVAL_VALUE: if (!spec) return -EUCLEAN; /* request instruction poisoning */ t = btf_type_by_id(spec->btf, spec->spec[0].type_id); if (btf_is_enum(t)) *val = btf_enum(t)[spec->spec[0].idx].val; else *val = btf_enum64_value(btf_enum64(t) + spec->spec[0].idx); break; default: return -EOPNOTSUPP; } return 0; } /* Calculate original and target relocation values, given local and target * specs and relocation kind. These values are calculated for each candidate. * If there are multiple candidates, resulting values should all be consistent * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity. * If instruction has to be poisoned, *poison will be set to true. */ static int bpf_core_calc_relo(const char *prog_name, const struct bpf_core_relo *relo, int relo_idx, const struct bpf_core_spec *local_spec, const struct bpf_core_spec *targ_spec, struct bpf_core_relo_res *res) { int err = -EOPNOTSUPP; res->orig_val = 0; res->new_val = 0; res->poison = false; res->validate = true; res->fail_memsz_adjust = false; res->orig_sz = res->new_sz = 0; res->orig_type_id = res->new_type_id = 0; if (core_relo_is_field_based(relo->kind)) { err = bpf_core_calc_field_relo(prog_name, relo, local_spec, &res->orig_val, &res->orig_sz, &res->orig_type_id, &res->validate); err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec, &res->new_val, &res->new_sz, &res->new_type_id, NULL); if (err) goto done; /* Validate if it's safe to adjust load/store memory size. * Adjustments are performed only if original and new memory * sizes differ. */ res->fail_memsz_adjust = false; if (res->orig_sz != res->new_sz) { const struct btf_type *orig_t, *new_t; orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id); new_t = btf_type_by_id(targ_spec->btf, res->new_type_id); /* There are two use cases in which it's safe to * adjust load/store's mem size: * - reading a 32-bit kernel pointer, while on BPF * size pointers are always 64-bit; in this case * it's safe to "downsize" instruction size due to * pointer being treated as unsigned integer with * zero-extended upper 32-bits; * - reading unsigned integers, again due to * zero-extension is preserving the value correctly. * * In all other cases it's incorrect to attempt to * load/store field because read value will be * incorrect, so we poison relocated instruction. */ if (btf_is_ptr(orig_t) && btf_is_ptr(new_t)) goto done; if (btf_is_int(orig_t) && btf_is_int(new_t) && btf_int_encoding(orig_t) != BTF_INT_SIGNED && btf_int_encoding(new_t) != BTF_INT_SIGNED) goto done; /* mark as invalid mem size adjustment, but this will * only be checked for LDX/STX/ST insns */ res->fail_memsz_adjust = true; } } else if (core_relo_is_type_based(relo->kind)) { err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate); err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL); } else if (core_relo_is_enumval_based(relo->kind)) { err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); } done: if (err == -EUCLEAN) { /* EUCLEAN is used to signal instruction poisoning request */ res->poison = true; err = 0; } else if (err == -EOPNOTSUPP) { /* EOPNOTSUPP means unknown/unsupported relocation */ pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind, relo->insn_off / 8); } return err; } /* * Turn instruction for which CO_RE relocation failed into invalid one with * distinct signature. */ static void bpf_core_poison_insn(const char *prog_name, int relo_idx, int insn_idx, struct bpf_insn *insn) { pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n", prog_name, relo_idx, insn_idx); insn->code = BPF_JMP | BPF_CALL; insn->dst_reg = 0; insn->src_reg = 0; insn->off = 0; /* if this instruction is reachable (not a dead code), * verifier will complain with the following message: * invalid func unknown#195896080 */ insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ } static int insn_bpf_size_to_bytes(struct bpf_insn *insn) { switch (BPF_SIZE(insn->code)) { case BPF_DW: return 8; case BPF_W: return 4; case BPF_H: return 2; case BPF_B: return 1; default: return -1; } } static int insn_bytes_to_bpf_size(__u32 sz) { switch (sz) { case 8: return BPF_DW; case 4: return BPF_W; case 2: return BPF_H; case 1: return BPF_B; default: return -1; } } /* * Patch relocatable BPF instruction. * * Patched value is determined by relocation kind and target specification. * For existence relocations target spec will be NULL if field/type is not found. * Expected insn->imm value is determined using relocation kind and local * spec, and is checked before patching instruction. If actual insn->imm value * is wrong, bail out with error. * * Currently supported classes of BPF instruction are: * 1. rX = (assignment with immediate operand); * 2. rX += (arithmetic operations with immediate operand); * 3. rX = (load with 64-bit immediate value); * 4. rX = *(T *)(rY + ), where T is one of {u8, u16, u32, u64}; * 5. *(T *)(rX + ) = rY, where T is one of {u8, u16, u32, u64}; * 6. *(T *)(rX + ) = , where T is one of {u8, u16, u32, u64}. */ int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, int insn_idx, const struct bpf_core_relo *relo, int relo_idx, const struct bpf_core_relo_res *res) { __u64 orig_val, new_val; __u8 class; class = BPF_CLASS(insn->code); if (res->poison) { poison: /* poison second part of ldimm64 to avoid confusing error from * verifier about "unknown opcode 00" */ if (is_ldimm64_insn(insn)) bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1); bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn); return 0; } orig_val = res->orig_val; new_val = res->new_val; switch (class) { case BPF_ALU: case BPF_ALU64: if (BPF_SRC(insn->code) != BPF_K) return -EINVAL; if (res->validate && insn->imm != orig_val) { pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %llu -> %llu\n", prog_name, relo_idx, insn_idx, insn->imm, (unsigned long long)orig_val, (unsigned long long)new_val); return -EINVAL; } orig_val = insn->imm; insn->imm = new_val; pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %llu -> %llu\n", prog_name, relo_idx, insn_idx, (unsigned long long)orig_val, (unsigned long long)new_val); break; case BPF_LDX: case BPF_ST: case BPF_STX: if (res->validate && insn->off != orig_val) { pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %llu -> %llu\n", prog_name, relo_idx, insn_idx, insn->off, (unsigned long long)orig_val, (unsigned long long)new_val); return -EINVAL; } if (new_val > SHRT_MAX) { pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %llu\n", prog_name, relo_idx, insn_idx, (unsigned long long)new_val); return -ERANGE; } if (res->fail_memsz_adjust) { pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. " "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n", prog_name, relo_idx, insn_idx); goto poison; } orig_val = insn->off; insn->off = new_val; pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %llu -> %llu\n", prog_name, relo_idx, insn_idx, (unsigned long long)orig_val, (unsigned long long)new_val); if (res->new_sz != res->orig_sz) { int insn_bytes_sz, insn_bpf_sz; insn_bytes_sz = insn_bpf_size_to_bytes(insn); if (insn_bytes_sz != res->orig_sz) { pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n", prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); return -EINVAL; } insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz); if (insn_bpf_sz < 0) { pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n", prog_name, relo_idx, insn_idx, res->new_sz); return -EINVAL; } insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n", prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz); } break; case BPF_LD: { __u64 imm; if (!is_ldimm64_insn(insn) || insn[0].src_reg != 0 || insn[0].off != 0 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n", prog_name, relo_idx, insn_idx); return -EINVAL; } imm = (__u32)insn[0].imm | ((__u64)insn[1].imm << 32); if (res->validate && imm != orig_val) { pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %llu -> %llu\n", prog_name, relo_idx, insn_idx, (unsigned long long)imm, (unsigned long long)orig_val, (unsigned long long)new_val); return -EINVAL; } insn[0].imm = new_val; insn[1].imm = new_val >> 32; pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %llu\n", prog_name, relo_idx, insn_idx, (unsigned long long)imm, (unsigned long long)new_val); break; } default: pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n", prog_name, relo_idx, insn_idx, insn->code, insn->src_reg, insn->dst_reg, insn->off, insn->imm); return -EINVAL; } return 0; } /* Output spec definition in the format: * [] () + => @, * where is a C-syntax view of recorded field access, e.g.: x.a[3].b */ int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec) { const struct btf_type *t; const char *s; __u32 type_id; int i, len = 0; #define append_buf(fmt, args...) \ ({ \ int r; \ r = snprintf(buf, buf_sz, fmt, ##args); \ len += r; \ if (r >= buf_sz) \ r = buf_sz; \ buf += r; \ buf_sz -= r; \ }) type_id = spec->root_type_id; t = btf_type_by_id(spec->btf, type_id); s = btf__name_by_offset(spec->btf, t->name_off); append_buf("<%s> [%u] %s %s", core_relo_kind_str(spec->relo_kind), type_id, btf_kind_str(t), str_is_empty(s) ? "" : s); if (core_relo_is_type_based(spec->relo_kind)) return len; if (core_relo_is_enumval_based(spec->relo_kind)) { t = skip_mods_and_typedefs(spec->btf, type_id, NULL); if (btf_is_enum(t)) { const struct btf_enum *e; const char *fmt_str; e = btf_enum(t) + spec->raw_spec[0]; s = btf__name_by_offset(spec->btf, e->name_off); fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %d" : "::%s = %u"; append_buf(fmt_str, s, e->val); } else { const struct btf_enum64 *e; const char *fmt_str; e = btf_enum64(t) + spec->raw_spec[0]; s = btf__name_by_offset(spec->btf, e->name_off); fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %lld" : "::%s = %llu"; append_buf(fmt_str, s, (unsigned long long)btf_enum64_value(e)); } return len; } if (core_relo_is_field_based(spec->relo_kind)) { for (i = 0; i < spec->len; i++) { if (spec->spec[i].name) append_buf(".%s", spec->spec[i].name); else if (i > 0 || spec->spec[i].idx > 0) append_buf("[%u]", spec->spec[i].idx); } append_buf(" ("); for (i = 0; i < spec->raw_len; i++) append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); if (spec->bit_offset % 8) append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8); else append_buf(" @ offset %u)", spec->bit_offset / 8); return len; } return len; #undef append_buf } /* * Calculate CO-RE relocation target result. * * The outline and important points of the algorithm: * 1. For given local type, find corresponding candidate target types. * Candidate type is a type with the same "essential" name, ignoring * everything after last triple underscore (___). E.g., `sample`, * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates * for each other. Names with triple underscore are referred to as * "flavors" and are useful, among other things, to allow to * specify/support incompatible variations of the same kernel struct, which * might differ between different kernel versions and/or build * configurations. * * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C * converter, when deduplicated BTF of a kernel still contains more than * one different types with the same name. In that case, ___2, ___3, etc * are appended starting from second name conflict. But start flavors are * also useful to be defined "locally", in BPF program, to extract same * data from incompatible changes between different kernel * versions/configurations. For instance, to handle field renames between * kernel versions, one can use two flavors of the struct name with the * same common name and use conditional relocations to extract that field, * depending on target kernel version. * 2. For each candidate type, try to match local specification to this * candidate target type. Matching involves finding corresponding * high-level spec accessors, meaning that all named fields should match, * as well as all array accesses should be within the actual bounds. Also, * types should be compatible (see bpf_core_fields_are_compat for details). * 3. It is supported and expected that there might be multiple flavors * matching the spec. As long as all the specs resolve to the same set of * offsets across all candidates, there is no error. If there is any * ambiguity, CO-RE relocation will fail. This is necessary to accommodate * imperfection of BTF deduplication, which can cause slight duplication of * the same BTF type, if some directly or indirectly referenced (by * pointer) type gets resolved to different actual types in different * object files. If such a situation occurs, deduplicated BTF will end up * with two (or more) structurally identical types, which differ only in * types they refer to through pointer. This should be OK in most cases and * is not an error. * 4. Candidate types search is performed by linearly scanning through all * types in target BTF. It is anticipated that this is overall more * efficient memory-wise and not significantly worse (if not better) * CPU-wise compared to prebuilding a map from all local type names to * a list of candidate type names. It's also sped up by caching resolved * list of matching candidates per each local "root" type ID, that has at * least one bpf_core_relo associated with it. This list is shared * between multiple relocations for the same type ID and is updated as some * of the candidates are pruned due to structural incompatibility. */ int bpf_core_calc_relo_insn(const char *prog_name, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, struct bpf_core_cand_list *cands, struct bpf_core_spec *specs_scratch, struct bpf_core_relo_res *targ_res) { struct bpf_core_spec *local_spec = &specs_scratch[0]; struct bpf_core_spec *cand_spec = &specs_scratch[1]; struct bpf_core_spec *targ_spec = &specs_scratch[2]; struct bpf_core_relo_res cand_res; const struct btf_type *local_type; const char *local_name; __u32 local_id; char spec_buf[256]; int i, j, err; local_id = relo->type_id; local_type = btf_type_by_id(local_btf, local_id); local_name = btf__name_by_offset(local_btf, local_type->name_off); if (!local_name) return -EINVAL; err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec); if (err) { const char *spec_str; spec_str = btf__name_by_offset(local_btf, relo->access_str_off); pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", prog_name, relo_idx, local_id, btf_kind_str(local_type), str_is_empty(local_name) ? "" : local_name, spec_str ?: "", err); return -EINVAL; } bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec); pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf); /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) { /* bpf_insn's imm value could get out of sync during linking */ memset(targ_res, 0, sizeof(*targ_res)); targ_res->validate = false; targ_res->poison = false; targ_res->orig_val = local_spec->root_type_id; targ_res->new_val = local_spec->root_type_id; return 0; } /* libbpf doesn't support candidate search for anonymous types */ if (str_is_empty(local_name)) { pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n", prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); return -EOPNOTSUPP; } for (i = 0, j = 0; i < cands->len; i++) { err = bpf_core_spec_match(local_spec, cands->cands[i].btf, cands->cands[i].id, cand_spec); if (err < 0) { bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec); pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n", prog_name, relo_idx, i, spec_buf, err); return err; } bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec); pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name, relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf); if (err == 0) continue; err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res); if (err) return err; if (j == 0) { *targ_res = cand_res; *targ_spec = *cand_spec; } else if (cand_spec->bit_offset != targ_spec->bit_offset) { /* if there are many field relo candidates, they * should all resolve to the same bit offset */ pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n", prog_name, relo_idx, cand_spec->bit_offset, targ_spec->bit_offset); return -EINVAL; } else if (cand_res.poison != targ_res->poison || cand_res.new_val != targ_res->new_val) { /* all candidates should result in the same relocation * decision and value, otherwise it's dangerous to * proceed due to ambiguity */ pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %llu != %s %llu\n", prog_name, relo_idx, cand_res.poison ? "failure" : "success", (unsigned long long)cand_res.new_val, targ_res->poison ? "failure" : "success", (unsigned long long)targ_res->new_val); return -EINVAL; } cands->cands[j++] = cands->cands[i]; } /* * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field * existence checks or kernel version/config checks, it's expected * that we might not find any candidates. In this case, if field * wasn't found in any candidate, the list of candidates shouldn't * change at all, we'll just handle relocating appropriately, * depending on relo's kind. */ if (j > 0) cands->len = j; /* * If no candidates were found, it might be both a programmer error, * as well as expected case, depending whether instruction w/ * relocation is guarded in some way that makes it unreachable (dead * code) if relocation can't be resolved. This is handled in * bpf_core_patch_insn() uniformly by replacing that instruction with * BPF helper call insn (using invalid helper ID). If that instruction * is indeed unreachable, then it will be ignored and eliminated by * verifier. If it was an error, then verifier will complain and point * to a specific instruction number in its log. */ if (j == 0) { pr_debug("prog '%s': relo #%d: no matching targets found\n", prog_name, relo_idx); /* calculate single target relo result explicitly */ err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res); if (err) return err; } return 0; } static bool bpf_core_names_match(const struct btf *local_btf, size_t local_name_off, const struct btf *targ_btf, size_t targ_name_off) { const char *local_n, *targ_n; size_t local_len, targ_len; local_n = btf__name_by_offset(local_btf, local_name_off); targ_n = btf__name_by_offset(targ_btf, targ_name_off); if (str_is_empty(targ_n)) return str_is_empty(local_n); targ_len = bpf_core_essential_name_len(targ_n); local_len = bpf_core_essential_name_len(local_n); return targ_len == local_len && strncmp(local_n, targ_n, local_len) == 0; } static int bpf_core_enums_match(const struct btf *local_btf, const struct btf_type *local_t, const struct btf *targ_btf, const struct btf_type *targ_t) { __u16 local_vlen = btf_vlen(local_t); __u16 targ_vlen = btf_vlen(targ_t); int i, j; if (local_t->size != targ_t->size) return 0; if (local_vlen > targ_vlen) return 0; /* iterate over the local enum's variants and make sure each has * a symbolic name correspondent in the target */ for (i = 0; i < local_vlen; i++) { bool matched = false; __u32 local_n_off, targ_n_off; local_n_off = btf_is_enum(local_t) ? btf_enum(local_t)[i].name_off : btf_enum64(local_t)[i].name_off; for (j = 0; j < targ_vlen; j++) { targ_n_off = btf_is_enum(targ_t) ? btf_enum(targ_t)[j].name_off : btf_enum64(targ_t)[j].name_off; if (bpf_core_names_match(local_btf, local_n_off, targ_btf, targ_n_off)) { matched = true; break; } } if (!matched) return 0; } return 1; } static int bpf_core_composites_match(const struct btf *local_btf, const struct btf_type *local_t, const struct btf *targ_btf, const struct btf_type *targ_t, bool behind_ptr, int level) { const struct btf_member *local_m = btf_members(local_t); __u16 local_vlen = btf_vlen(local_t); __u16 targ_vlen = btf_vlen(targ_t); int i, j, err; if (local_vlen > targ_vlen) return 0; /* check that all local members have a match in the target */ for (i = 0; i < local_vlen; i++, local_m++) { const struct btf_member *targ_m = btf_members(targ_t); bool matched = false; for (j = 0; j < targ_vlen; j++, targ_m++) { if (!bpf_core_names_match(local_btf, local_m->name_off, targ_btf, targ_m->name_off)) continue; err = __bpf_core_types_match(local_btf, local_m->type, targ_btf, targ_m->type, behind_ptr, level - 1); if (err < 0) return err; if (err > 0) { matched = true; break; } } if (!matched) return 0; } return 1; } /* Check that two types "match". This function assumes that root types were * already checked for name match. * * The matching relation is defined as follows: * - modifiers and typedefs are stripped (and, hence, effectively ignored) * - generally speaking types need to be of same kind (struct vs. struct, union * vs. union, etc.) * - exceptions are struct/union behind a pointer which could also match a * forward declaration of a struct or union, respectively, and enum vs. * enum64 (see below) * Then, depending on type: * - integers: * - match if size and signedness match * - arrays & pointers: * - target types are recursively matched * - structs & unions: * - local members need to exist in target with the same name * - for each member we recursively check match unless it is already behind a * pointer, in which case we only check matching names and compatible kind * - enums: * - local variants have to have a match in target by symbolic name (but not * numeric value) * - size has to match (but enum may match enum64 and vice versa) * - function pointers: * - number and position of arguments in local type has to match target * - for each argument and the return value we recursively check match */ int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id, bool behind_ptr, int level) { const struct btf_type *local_t, *targ_t; int depth = 32; /* max recursion depth */ __u16 local_k, targ_k; if (level <= 0) return -EINVAL; recur: depth--; if (depth < 0) return -EINVAL; local_t = skip_mods_and_typedefs(local_btf, local_id, &local_id); targ_t = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); if (!local_t || !targ_t) return -EINVAL; /* While the name check happens after typedefs are skipped, root-level * typedefs would still be name-matched as that's the contract with * callers. */ if (!bpf_core_names_match(local_btf, local_t->name_off, targ_btf, targ_t->name_off)) return 0; local_k = btf_kind(local_t); targ_k = btf_kind(targ_t); switch (local_k) { case BTF_KIND_UNKN: return local_k == targ_k; case BTF_KIND_FWD: { bool local_f = BTF_INFO_KFLAG(local_t->info); if (behind_ptr) { if (local_k == targ_k) return local_f == BTF_INFO_KFLAG(targ_t->info); /* for forward declarations kflag dictates whether the * target is a struct (0) or union (1) */ return (targ_k == BTF_KIND_STRUCT && !local_f) || (targ_k == BTF_KIND_UNION && local_f); } else { if (local_k != targ_k) return 0; /* match if the forward declaration is for the same kind */ return local_f == BTF_INFO_KFLAG(targ_t->info); } } case BTF_KIND_ENUM: case BTF_KIND_ENUM64: if (!btf_is_any_enum(targ_t)) return 0; return bpf_core_enums_match(local_btf, local_t, targ_btf, targ_t); case BTF_KIND_STRUCT: case BTF_KIND_UNION: if (behind_ptr) { bool targ_f = BTF_INFO_KFLAG(targ_t->info); if (local_k == targ_k) return 1; if (targ_k != BTF_KIND_FWD) return 0; return (local_k == BTF_KIND_UNION) == targ_f; } else { if (local_k != targ_k) return 0; return bpf_core_composites_match(local_btf, local_t, targ_btf, targ_t, behind_ptr, level); } case BTF_KIND_INT: { __u8 local_sgn; __u8 targ_sgn; if (local_k != targ_k) return 0; local_sgn = btf_int_encoding(local_t) & BTF_INT_SIGNED; targ_sgn = btf_int_encoding(targ_t) & BTF_INT_SIGNED; return local_t->size == targ_t->size && local_sgn == targ_sgn; } case BTF_KIND_PTR: if (local_k != targ_k) return 0; behind_ptr = true; local_id = local_t->type; targ_id = targ_t->type; goto recur; case BTF_KIND_ARRAY: { const struct btf_array *local_array = btf_array(local_t); const struct btf_array *targ_array = btf_array(targ_t); if (local_k != targ_k) return 0; if (local_array->nelems != targ_array->nelems) return 0; local_id = local_array->type; targ_id = targ_array->type; goto recur; } case BTF_KIND_FUNC_PROTO: { struct btf_param *local_p = btf_params(local_t); struct btf_param *targ_p = btf_params(targ_t); __u16 local_vlen = btf_vlen(local_t); __u16 targ_vlen = btf_vlen(targ_t); int i, err; if (local_k != targ_k) return 0; if (local_vlen != targ_vlen) return 0; for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { err = __bpf_core_types_match(local_btf, local_p->type, targ_btf, targ_p->type, behind_ptr, level - 1); if (err <= 0) return err; } /* tail recurse for return type check */ local_id = local_t->type; targ_id = targ_t->type; goto recur; } default: pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n", btf_kind_str(local_t), local_id, targ_id); return 0; } } xdp-tools-1.5.4/lib/libbpf/src/libbpf_common.h0000644000175100001660000000655514706536574020623 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * Common user-facing libbpf helpers. * * Copyright (c) 2019 Facebook */ #ifndef __LIBBPF_LIBBPF_COMMON_H #define __LIBBPF_LIBBPF_COMMON_H #include #include "libbpf_version.h" #ifndef LIBBPF_API #define LIBBPF_API __attribute__((visibility("default"))) #endif #define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg))) /* Mark a symbol as deprecated when libbpf version is >= {major}.{minor} */ #define LIBBPF_DEPRECATED_SINCE(major, minor, msg) \ __LIBBPF_MARK_DEPRECATED_ ## major ## _ ## minor \ (LIBBPF_DEPRECATED("libbpf v" # major "." # minor "+: " msg)) #define __LIBBPF_CURRENT_VERSION_GEQ(major, minor) \ (LIBBPF_MAJOR_VERSION > (major) || \ (LIBBPF_MAJOR_VERSION == (major) && LIBBPF_MINOR_VERSION >= (minor))) /* Add checks for other versions below when planning deprecation of API symbols * with the LIBBPF_DEPRECATED_SINCE macro. */ #if __LIBBPF_CURRENT_VERSION_GEQ(1, 0) #define __LIBBPF_MARK_DEPRECATED_1_0(X) X #else #define __LIBBPF_MARK_DEPRECATED_1_0(X) #endif /* This set of internal macros allows to do "function overloading" based on * number of arguments provided by used in backwards-compatible way during the * transition to libbpf 1.0 * It's ugly but necessary evil that will be cleaned up when we get to 1.0. * See bpf_prog_load() overload for example. */ #define ___libbpf_cat(A, B) A ## B #define ___libbpf_select(NAME, NUM) ___libbpf_cat(NAME, NUM) #define ___libbpf_nth(_1, _2, _3, _4, _5, _6, N, ...) N #define ___libbpf_cnt(...) ___libbpf_nth(__VA_ARGS__, 6, 5, 4, 3, 2, 1) #define ___libbpf_overload(NAME, ...) ___libbpf_select(NAME, ___libbpf_cnt(__VA_ARGS__))(__VA_ARGS__) /* Helper macro to declare and initialize libbpf options struct * * This dance with uninitialized declaration, followed by memset to zero, * followed by assignment using compound literal syntax is done to preserve * ability to use a nice struct field initialization syntax and **hopefully** * have all the padding bytes initialized to zero. It's not guaranteed though, * when copying literal, that compiler won't copy garbage in literal's padding * bytes, but that's the best way I've found and it seems to work in practice. * * Macro declares opts struct of given type and name, zero-initializes, * including any extra padding, it with memset() and then assigns initial * values provided by users in struct initializer-syntax as varargs. */ #define LIBBPF_OPTS(TYPE, NAME, ...) \ struct TYPE NAME = ({ \ memset(&NAME, 0, sizeof(struct TYPE)); \ (struct TYPE) { \ .sz = sizeof(struct TYPE), \ __VA_ARGS__ \ }; \ }) /* Helper macro to clear and optionally reinitialize libbpf options struct * * Small helper macro to reset all fields and to reinitialize the common * structure size member. Values provided by users in struct initializer- * syntax as varargs can be provided as well to reinitialize options struct * specific members. */ #define LIBBPF_OPTS_RESET(NAME, ...) \ do { \ typeof(NAME) ___##NAME = ({ \ memset(&___##NAME, 0, sizeof(NAME)); \ (typeof(NAME)) { \ .sz = sizeof(NAME), \ __VA_ARGS__ \ }; \ }); \ memcpy(&NAME, &___##NAME, sizeof(NAME)); \ } while (0) #endif /* __LIBBPF_LIBBPF_COMMON_H */ xdp-tools-1.5.4/lib/libbpf/src/libbpf.h0000644000175100001660000022241614706536574017247 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * Common eBPF ELF object loading operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. */ #ifndef __LIBBPF_LIBBPF_H #define __LIBBPF_LIBBPF_H #include #include #include #include #include // for size_t #include #include "libbpf_common.h" #include "libbpf_legacy.h" #ifdef __cplusplus extern "C" { #endif LIBBPF_API __u32 libbpf_major_version(void); LIBBPF_API __u32 libbpf_minor_version(void); LIBBPF_API const char *libbpf_version_string(void); enum libbpf_errno { __LIBBPF_ERRNO__START = 4000, /* Something wrong in libelf */ LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START, LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */ LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */ LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */ LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */ LIBBPF_ERRNO__RELOC, /* Relocation failed */ LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */ LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */ LIBBPF_ERRNO__PROG2BIG, /* Program too big */ LIBBPF_ERRNO__KVER, /* Incorrect kernel version */ LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */ LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */ LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */ LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */ __LIBBPF_ERRNO__END, }; LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); /** * @brief **libbpf_bpf_attach_type_str()** converts the provided attach type * value into a textual representation. * @param t The attach type. * @return Pointer to a static string identifying the attach type. NULL is * returned for unknown **bpf_attach_type** values. */ LIBBPF_API const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t); /** * @brief **libbpf_bpf_link_type_str()** converts the provided link type value * into a textual representation. * @param t The link type. * @return Pointer to a static string identifying the link type. NULL is * returned for unknown **bpf_link_type** values. */ LIBBPF_API const char *libbpf_bpf_link_type_str(enum bpf_link_type t); /** * @brief **libbpf_bpf_map_type_str()** converts the provided map type value * into a textual representation. * @param t The map type. * @return Pointer to a static string identifying the map type. NULL is * returned for unknown **bpf_map_type** values. */ LIBBPF_API const char *libbpf_bpf_map_type_str(enum bpf_map_type t); /** * @brief **libbpf_bpf_prog_type_str()** converts the provided program type * value into a textual representation. * @param t The program type. * @return Pointer to a static string identifying the program type. NULL is * returned for unknown **bpf_prog_type** values. */ LIBBPF_API const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t); enum libbpf_print_level { LIBBPF_WARN, LIBBPF_INFO, LIBBPF_DEBUG, }; typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, const char *, va_list ap); /** * @brief **libbpf_set_print()** sets user-provided log callback function to * be used for libbpf warnings and informational messages. If the user callback * is not set, messages are logged to stderr by default. The verbosity of these * messages can be controlled by setting the environment variable * LIBBPF_LOG_LEVEL to either warn, info, or debug. * @param fn The log print function. If NULL, libbpf won't print anything. * @return Pointer to old print function. * * This function is thread-safe. */ LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn); /* Hide internal to user */ struct bpf_object; struct bpf_object_open_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* object name override, if provided: * - for object open from file, this will override setting object * name from file path's base name; * - for object open from memory buffer, this will specify an object * name and will override default "-" name; */ const char *object_name; /* parse map definitions non-strictly, allowing extra attributes/data */ bool relaxed_maps; /* maps that set the 'pinning' attribute in their definition will have * their pin_path attribute set to a file in this directory, and be * auto-pinned to that path on load; defaults to "/sys/fs/bpf". */ const char *pin_root_path; __u32 :32; /* stub out now removed attach_prog_fd */ /* Additional kernel config content that augments and overrides * system Kconfig for CONFIG_xxx externs. */ const char *kconfig; /* Path to the custom BTF to be used for BPF CO-RE relocations. * This custom BTF completely replaces the use of vmlinux BTF * for the purpose of CO-RE relocations. * NOTE: any other BPF feature (e.g., fentry/fexit programs, * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux. */ const char *btf_custom_path; /* Pointer to a buffer for storing kernel logs for applicable BPF * commands. Valid kernel_log_size has to be specified as well and are * passed-through to bpf() syscall. Keep in mind that kernel might * fail operation with -ENOSPC error if provided buffer is too small * to contain entire log output. * See the comment below for kernel_log_level for interaction between * log_buf and log_level settings. * * If specified, this log buffer will be passed for: * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overridden * with bpf_program__set_log() on per-program level, to get * BPF verifier log output. * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get * BTF sanity checking log. * * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite * previous contents, so if you need more fine-grained control, set * per-program buffer with bpf_program__set_log_buf() to preserve each * individual program's verification log. Keep using kernel_log_buf * for BTF verification log, if necessary. */ char *kernel_log_buf; size_t kernel_log_size; /* * Log level can be set independently from log buffer. Log_level=0 * means that libbpf will attempt loading BTF or program without any * logging requested, but will retry with either its own or custom log * buffer, if provided, and log_level=1 on any error. * And vice versa, setting log_level>0 will request BTF or prog * loading with verbose log from the first attempt (and as such also * for successfully loaded BTF or program), and the actual log buffer * could be either libbpf's own auto-allocated log buffer, if * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf. * If user didn't provide custom log buffer, libbpf will emit captured * logs through its print callback. */ __u32 kernel_log_level; /* Path to BPF FS mount point to derive BPF token from. * * Created BPF token will be used for all bpf() syscall operations * that accept BPF token (e.g., map creation, BTF and program loads, * etc) automatically within instantiated BPF object. * * If bpf_token_path is not specified, libbpf will consult * LIBBPF_BPF_TOKEN_PATH environment variable. If set, it will be * taken as a value of bpf_token_path option and will force libbpf to * either create BPF token from provided custom BPF FS path, or will * disable implicit BPF token creation, if envvar value is an empty * string. bpf_token_path overrides LIBBPF_BPF_TOKEN_PATH, if both are * set at the same time. * * Setting bpf_token_path option to empty string disables libbpf's * automatic attempt to create BPF token from default BPF FS mount * point (/sys/fs/bpf), in case this default behavior is undesirable. */ const char *bpf_token_path; size_t :0; }; #define bpf_object_open_opts__last_field bpf_token_path /** * @brief **bpf_object__open()** creates a bpf_object by opening * the BPF ELF object file pointed to by the passed path and loading it * into memory. * @param path BPF object file path. * @return pointer to the new bpf_object; or NULL is returned on error, * error code is stored in errno */ LIBBPF_API struct bpf_object *bpf_object__open(const char *path); /** * @brief **bpf_object__open_file()** creates a bpf_object by opening * the BPF ELF object file pointed to by the passed path and loading it * into memory. * @param path BPF object file path * @param opts options for how to load the bpf object, this parameter is * optional and can be set to NULL * @return pointer to the new bpf_object; or NULL is returned on error, * error code is stored in errno */ LIBBPF_API struct bpf_object * bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts); /** * @brief **bpf_object__open_mem()** creates a bpf_object by reading * the BPF objects raw bytes from a memory buffer containing a valid * BPF ELF object file. * @param obj_buf pointer to the buffer containing ELF file bytes * @param obj_buf_sz number of bytes in the buffer * @param opts options for how to load the bpf object * @return pointer to the new bpf_object; or NULL is returned on error, * error code is stored in errno */ LIBBPF_API struct bpf_object * bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts); /** * @brief **bpf_object__load()** loads BPF object into kernel. * @param obj Pointer to a valid BPF object instance returned by * **bpf_object__open*()** APIs * @return 0, on success; negative error code, otherwise, error code is * stored in errno */ LIBBPF_API int bpf_object__load(struct bpf_object *obj); /** * @brief **bpf_object__close()** closes a BPF object and releases all * resources. * @param obj Pointer to a valid BPF object */ LIBBPF_API void bpf_object__close(struct bpf_object *obj); /** * @brief **bpf_object__pin_maps()** pins each map contained within * the BPF object at the passed directory. * @param obj Pointer to a valid BPF object * @param path A directory where maps should be pinned. * @return 0, on success; negative error code, otherwise * * If `path` is NULL `bpf_map__pin` (which is being used on each map) * will use the pin_path attribute of each map. In this case, maps that * don't have a pin_path set will be ignored. */ LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); /** * @brief **bpf_object__unpin_maps()** unpins each map contained within * the BPF object found in the passed directory. * @param obj Pointer to a valid BPF object * @param path A directory where pinned maps should be searched for. * @return 0, on success; negative error code, otherwise * * If `path` is NULL `bpf_map__unpin` (which is being used on each map) * will use the pin_path attribute of each map. In this case, maps that * don't have a pin_path set will be ignored. */ LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); LIBBPF_API int bpf_object__unpin(struct bpf_object *object, const char *path); LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj); LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version); /** * @brief **bpf_object__token_fd** is an accessor for BPF token FD associated * with BPF object. * @param obj Pointer to a valid BPF object * @return BPF token FD or -1, if it wasn't set */ LIBBPF_API int bpf_object__token_fd(const struct bpf_object *obj); struct btf; LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj); LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); LIBBPF_API struct bpf_program * bpf_object__find_program_by_name(const struct bpf_object *obj, const char *name); LIBBPF_API int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type); LIBBPF_API int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type); LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, enum bpf_attach_type attach_type); /* Accessors of bpf_program */ struct bpf_program; LIBBPF_API struct bpf_program * bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog); #define bpf_object__for_each_program(pos, obj) \ for ((pos) = bpf_object__next_program((obj), NULL); \ (pos) != NULL; \ (pos) = bpf_object__next_program((obj), (pos))) LIBBPF_API struct bpf_program * bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog); LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex); LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog); LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog); LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); LIBBPF_API bool bpf_program__autoattach(const struct bpf_program *prog); LIBBPF_API void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach); struct bpf_insn; /** * @brief **bpf_program__insns()** gives read-only access to BPF program's * underlying BPF instructions. * @param prog BPF program for which to return instructions * @return a pointer to an array of BPF instructions that belong to the * specified BPF program * * Returned pointer is always valid and not NULL. Number of `struct bpf_insn` * pointed to can be fetched using **bpf_program__insn_cnt()** API. * * Keep in mind, libbpf can modify and append/delete BPF program's * instructions as it processes BPF object file and prepares everything for * uploading into the kernel. So depending on the point in BPF object * lifetime, **bpf_program__insns()** can return different sets of * instructions. As an example, during BPF object load phase BPF program * instructions will be CO-RE-relocated, BPF subprograms instructions will be * appended, ldimm64 instructions will have FDs embedded, etc. So instructions * returned before **bpf_object__load()** and after it might be quite * different. */ LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog); /** * @brief **bpf_program__set_insns()** can set BPF program's underlying * BPF instructions. * * WARNING: This is a very advanced libbpf API and users need to know * what they are doing. This should be used from prog_prepare_load_fn * callback only. * * @param prog BPF program for which to return instructions * @param new_insns a pointer to an array of BPF instructions * @param new_insn_cnt number of `struct bpf_insn`'s that form * specified BPF program * @return 0, on success; negative error code, otherwise */ LIBBPF_API int bpf_program__set_insns(struct bpf_program *prog, struct bpf_insn *new_insns, size_t new_insn_cnt); /** * @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s * that form specified BPF program. * @param prog BPF program for which to return number of BPF instructions * * See **bpf_program__insns()** documentation for notes on how libbpf can * change instructions and their count during different phases of * **bpf_object** lifetime. */ LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog); LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); /** * @brief **bpf_program__pin()** pins the BPF program to a file * in the BPF FS specified by a path. This increments the programs * reference count, allowing it to stay loaded after the process * which loaded it has exited. * * @param prog BPF program to pin, must already be loaded * @param path file path in a BPF file system * @return 0, on success; negative error code, otherwise */ LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path); /** * @brief **bpf_program__unpin()** unpins the BPF program from a file * in the BPFFS specified by a path. This decrements the programs * reference count. * * The file pinning the BPF program can also be unlinked by a different * process in which case this function will return an error. * * @param prog BPF program to unpin * @param path file path to the pin in a BPF file system * @return 0, on success; negative error code, otherwise */ LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path); LIBBPF_API void bpf_program__unload(struct bpf_program *prog); struct bpf_link; LIBBPF_API struct bpf_link *bpf_link__open(const char *path); LIBBPF_API int bpf_link__fd(const struct bpf_link *link); LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link); /** * @brief **bpf_link__pin()** pins the BPF link to a file * in the BPF FS specified by a path. This increments the links * reference count, allowing it to stay loaded after the process * which loaded it has exited. * * @param link BPF link to pin, must already be loaded * @param path file path in a BPF file system * @return 0, on success; negative error code, otherwise */ LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path); /** * @brief **bpf_link__unpin()** unpins the BPF link from a file * in the BPFFS specified by a path. This decrements the links * reference count. * * The file pinning the BPF link can also be unlinked by a different * process in which case this function will return an error. * * @param prog BPF program to unpin * @param path file path to the pin in a BPF file system * @return 0, on success; negative error code, otherwise */ LIBBPF_API int bpf_link__unpin(struct bpf_link *link); LIBBPF_API int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog); LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); LIBBPF_API int bpf_link__detach(struct bpf_link *link); LIBBPF_API int bpf_link__destroy(struct bpf_link *link); /** * @brief **bpf_program__attach()** is a generic function for attaching * a BPF program based on auto-detection of program type, attach type, * and extra parameters, where applicable. * * @param prog BPF program to attach * @return Reference to the newly created BPF link; or NULL is returned on error, * error code is stored in errno * * This is supported for: * - kprobe/kretprobe (depends on SEC() definition) * - uprobe/uretprobe (depends on SEC() definition) * - tracepoint * - raw tracepoint * - tracing programs (typed raw TP/fentry/fexit/fmod_ret) */ LIBBPF_API struct bpf_link * bpf_program__attach(const struct bpf_program *prog); struct bpf_perf_event_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; /* don't use BPF link when attach BPF program */ bool force_ioctl_attach; size_t :0; }; #define bpf_perf_event_opts__last_field force_ioctl_attach LIBBPF_API struct bpf_link * bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd); LIBBPF_API struct bpf_link * bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, const struct bpf_perf_event_opts *opts); /** * enum probe_attach_mode - the mode to attach kprobe/uprobe * * force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will * be returned if it is not supported by the kernel. */ enum probe_attach_mode { /* attach probe in latest supported mode by kernel */ PROBE_ATTACH_MODE_DEFAULT = 0, /* attach probe in legacy mode, using debugfs/tracefs */ PROBE_ATTACH_MODE_LEGACY, /* create perf event with perf_event_open() syscall */ PROBE_ATTACH_MODE_PERF, /* attach probe with BPF link */ PROBE_ATTACH_MODE_LINK, }; struct bpf_kprobe_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; /* function's offset to install kprobe to */ size_t offset; /* kprobe is return probe */ bool retprobe; /* kprobe attach mode */ enum probe_attach_mode attach_mode; size_t :0; }; #define bpf_kprobe_opts__last_field attach_mode LIBBPF_API struct bpf_link * bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, const char *func_name); LIBBPF_API struct bpf_link * bpf_program__attach_kprobe_opts(const struct bpf_program *prog, const char *func_name, const struct bpf_kprobe_opts *opts); struct bpf_kprobe_multi_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* array of function symbols to attach */ const char **syms; /* array of function addresses to attach */ const unsigned long *addrs; /* array of user-provided values fetchable through bpf_get_attach_cookie */ const __u64 *cookies; /* number of elements in syms/addrs/cookies arrays */ size_t cnt; /* create return kprobes */ bool retprobe; /* create session kprobes */ bool session; size_t :0; }; #define bpf_kprobe_multi_opts__last_field session LIBBPF_API struct bpf_link * bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, const char *pattern, const struct bpf_kprobe_multi_opts *opts); struct bpf_uprobe_multi_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* array of function symbols to attach to */ const char **syms; /* array of function addresses to attach to */ const unsigned long *offsets; /* optional, array of associated ref counter offsets */ const unsigned long *ref_ctr_offsets; /* optional, array of associated BPF cookies */ const __u64 *cookies; /* number of elements in syms/addrs/cookies arrays */ size_t cnt; /* create return uprobes */ bool retprobe; size_t :0; }; #define bpf_uprobe_multi_opts__last_field retprobe /** * @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program * to multiple uprobes with uprobe_multi link. * * User can specify 2 mutually exclusive set of inputs: * * 1) use only path/func_pattern/pid arguments * * 2) use path/pid with allowed combinations of * syms/offsets/ref_ctr_offsets/cookies/cnt * * - syms and offsets are mutually exclusive * - ref_ctr_offsets and cookies are optional * * * @param prog BPF program to attach * @param pid Process ID to attach the uprobe to, 0 for self (own process), * -1 for all processes * @param binary_path Path to binary * @param func_pattern Regular expression to specify functions to attach * BPF program to * @param opts Additional options (see **struct bpf_uprobe_multi_opts**) * @return 0, on success; negative error code, otherwise */ LIBBPF_API struct bpf_link * bpf_program__attach_uprobe_multi(const struct bpf_program *prog, pid_t pid, const char *binary_path, const char *func_pattern, const struct bpf_uprobe_multi_opts *opts); struct bpf_ksyscall_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; /* attach as return probe? */ bool retprobe; size_t :0; }; #define bpf_ksyscall_opts__last_field retprobe /** * @brief **bpf_program__attach_ksyscall()** attaches a BPF program * to kernel syscall handler of a specified syscall. Optionally it's possible * to request to install retprobe that will be triggered at syscall exit. It's * also possible to associate BPF cookie (though options). * * Libbpf automatically will determine correct full kernel function name, * which depending on system architecture and kernel version/configuration * could be of the form ___sys_ or __se_sys_, and will * attach specified program using kprobe/kretprobe mechanism. * * **bpf_program__attach_ksyscall()** is an API counterpart of declarative * **SEC("ksyscall/")** annotation of BPF programs. * * At the moment **SEC("ksyscall")** and **bpf_program__attach_ksyscall()** do * not handle all the calling convention quirks for mmap(), clone() and compat * syscalls. It also only attaches to "native" syscall interfaces. If host * system supports compat syscalls or defines 32-bit syscalls in 64-bit * kernel, such syscall interfaces won't be attached to by libbpf. * * These limitations may or may not change in the future. Therefore it is * recommended to use SEC("kprobe") for these syscalls or if working with * compat and 32-bit interfaces is required. * * @param prog BPF program to attach * @param syscall_name Symbolic name of the syscall (e.g., "bpf") * @param opts Additional options (see **struct bpf_ksyscall_opts**) * @return Reference to the newly created BPF link; or NULL is returned on * error, error code is stored in errno */ LIBBPF_API struct bpf_link * bpf_program__attach_ksyscall(const struct bpf_program *prog, const char *syscall_name, const struct bpf_ksyscall_opts *opts); struct bpf_uprobe_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* offset of kernel reference counted USDT semaphore, added in * a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe") */ size_t ref_ctr_offset; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; /* uprobe is return probe, invoked at function return time */ bool retprobe; /* Function name to attach to. Could be an unqualified ("abc") or library-qualified * "abc@LIBXYZ" name. To specify function entry, func_name should be set while * func_offset argument to bpf_prog__attach_uprobe_opts() should be 0. To trace an * offset within a function, specify func_name and use func_offset argument to specify * offset within the function. Shared library functions must specify the shared library * binary_path. */ const char *func_name; /* uprobe attach mode */ enum probe_attach_mode attach_mode; size_t :0; }; #define bpf_uprobe_opts__last_field attach_mode /** * @brief **bpf_program__attach_uprobe()** attaches a BPF program * to the userspace function which is found by binary path and * offset. You can optionally specify a particular process to attach * to. You can also optionally attach the program to the function * exit instead of entry. * * @param prog BPF program to attach * @param retprobe Attach to function exit * @param pid Process ID to attach the uprobe to, 0 for self (own process), * -1 for all processes * @param binary_path Path to binary that contains the function symbol * @param func_offset Offset within the binary of the function symbol * @return Reference to the newly created BPF link; or NULL is returned on error, * error code is stored in errno */ LIBBPF_API struct bpf_link * bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, pid_t pid, const char *binary_path, size_t func_offset); /** * @brief **bpf_program__attach_uprobe_opts()** is just like * bpf_program__attach_uprobe() except with a options struct * for various configurations. * * @param prog BPF program to attach * @param pid Process ID to attach the uprobe to, 0 for self (own process), * -1 for all processes * @param binary_path Path to binary that contains the function symbol * @param func_offset Offset within the binary of the function symbol * @param opts Options for altering program attachment * @return Reference to the newly created BPF link; or NULL is returned on error, * error code is stored in errno */ LIBBPF_API struct bpf_link * bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const char *binary_path, size_t func_offset, const struct bpf_uprobe_opts *opts); struct bpf_usdt_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value accessible through usdt_cookie() */ __u64 usdt_cookie; size_t :0; }; #define bpf_usdt_opts__last_field usdt_cookie /** * @brief **bpf_program__attach_usdt()** is just like * bpf_program__attach_uprobe_opts() except it covers USDT (User-space * Statically Defined Tracepoint) attachment, instead of attaching to * user-space function entry or exit. * * @param prog BPF program to attach * @param pid Process ID to attach the uprobe to, 0 for self (own process), * -1 for all processes * @param binary_path Path to binary that contains provided USDT probe * @param usdt_provider USDT provider name * @param usdt_name USDT probe name * @param opts Options for altering program attachment * @return Reference to the newly created BPF link; or NULL is returned on error, * error code is stored in errno */ LIBBPF_API struct bpf_link * bpf_program__attach_usdt(const struct bpf_program *prog, pid_t pid, const char *binary_path, const char *usdt_provider, const char *usdt_name, const struct bpf_usdt_opts *opts); struct bpf_tracepoint_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; }; #define bpf_tracepoint_opts__last_field bpf_cookie LIBBPF_API struct bpf_link * bpf_program__attach_tracepoint(const struct bpf_program *prog, const char *tp_category, const char *tp_name); LIBBPF_API struct bpf_link * bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, const char *tp_category, const char *tp_name, const struct bpf_tracepoint_opts *opts); struct bpf_raw_tracepoint_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u64 cookie; size_t :0; }; #define bpf_raw_tracepoint_opts__last_field cookie LIBBPF_API struct bpf_link * bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, const char *tp_name); LIBBPF_API struct bpf_link * bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog, const char *tp_name, struct bpf_raw_tracepoint_opts *opts); struct bpf_trace_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 cookie; }; #define bpf_trace_opts__last_field cookie LIBBPF_API struct bpf_link * bpf_program__attach_trace(const struct bpf_program *prog); LIBBPF_API struct bpf_link * bpf_program__attach_trace_opts(const struct bpf_program *prog, const struct bpf_trace_opts *opts); LIBBPF_API struct bpf_link * bpf_program__attach_lsm(const struct bpf_program *prog); LIBBPF_API struct bpf_link * bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd); LIBBPF_API struct bpf_link * bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd); LIBBPF_API struct bpf_link * bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd); LIBBPF_API struct bpf_link * bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex); LIBBPF_API struct bpf_link * bpf_program__attach_freplace(const struct bpf_program *prog, int target_fd, const char *attach_func_name); struct bpf_netfilter_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; __u32 pf; __u32 hooknum; __s32 priority; __u32 flags; }; #define bpf_netfilter_opts__last_field flags LIBBPF_API struct bpf_link * bpf_program__attach_netfilter(const struct bpf_program *prog, const struct bpf_netfilter_opts *opts); struct bpf_tcx_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; __u32 flags; __u32 relative_fd; __u32 relative_id; __u64 expected_revision; size_t :0; }; #define bpf_tcx_opts__last_field expected_revision LIBBPF_API struct bpf_link * bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, const struct bpf_tcx_opts *opts); struct bpf_netkit_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; __u32 flags; __u32 relative_fd; __u32 relative_id; __u64 expected_revision; size_t :0; }; #define bpf_netkit_opts__last_field expected_revision LIBBPF_API struct bpf_link * bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, const struct bpf_netkit_opts *opts); struct bpf_map; LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); LIBBPF_API int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map); struct bpf_iter_attach_opts { size_t sz; /* size of this struct for forward/backward compatibility */ union bpf_iter_link_info *link_info; __u32 link_info_len; }; #define bpf_iter_attach_opts__last_field link_info_len LIBBPF_API struct bpf_link * bpf_program__attach_iter(const struct bpf_program *prog, const struct bpf_iter_attach_opts *opts); LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog); /** * @brief **bpf_program__set_type()** sets the program * type of the passed BPF program. * @param prog BPF program to set the program type for * @param type program type to set the BPF map to have * @return error code; or 0 if no error. An error occurs * if the object is already loaded. * * This must be called before the BPF object is loaded, * otherwise it has no effect and an error is returned. */ LIBBPF_API int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type); LIBBPF_API enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog); /** * @brief **bpf_program__set_expected_attach_type()** sets the * attach type of the passed BPF program. This is used for * auto-detection of attachment when programs are loaded. * @param prog BPF program to set the attach type for * @param type attach type to set the BPF map to have * @return error code; or 0 if no error. An error occurs * if the object is already loaded. * * This must be called before the BPF object is loaded, * otherwise it has no effect and an error is returned. */ LIBBPF_API int bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type); LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog); LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags); /* Per-program log level and log buffer getters/setters. * See bpf_object_open_opts comments regarding log_level and log_buf * interactions. */ LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog); LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level); LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size); LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); /** * @brief **bpf_program__set_attach_target()** sets BTF-based attach target * for supported BPF program types: * - BTF-aware raw tracepoints (tp_btf); * - fentry/fexit/fmod_ret; * - lsm; * - freplace. * @param prog BPF program to set the attach type for * @param type attach type to set the BPF map to have * @return error code; or 0 if no error occurred. */ LIBBPF_API int bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, const char *attach_func_name); /** * @brief **bpf_object__find_map_by_name()** returns BPF map of * the given name, if it exists within the passed BPF object * @param obj BPF object * @param name name of the BPF map * @return BPF map instance, if such map exists within the BPF object; * or NULL otherwise. */ LIBBPF_API struct bpf_map * bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name); LIBBPF_API int bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name); LIBBPF_API struct bpf_map * bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map); #define bpf_object__for_each_map(pos, obj) \ for ((pos) = bpf_object__next_map((obj), NULL); \ (pos) != NULL; \ (pos) = bpf_object__next_map((obj), (pos))) #define bpf_map__for_each bpf_object__for_each_map LIBBPF_API struct bpf_map * bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map); /** * @brief **bpf_map__set_autocreate()** sets whether libbpf has to auto-create * BPF map during BPF object load phase. * @param map the BPF map instance * @param autocreate whether to create BPF map during BPF object load * @return 0 on success; -EBUSY if BPF object was already loaded * * **bpf_map__set_autocreate()** allows to opt-out from libbpf auto-creating * BPF map. By default, libbpf will attempt to create every single BPF map * defined in BPF object file using BPF_MAP_CREATE command of bpf() syscall * and fill in map FD in BPF instructions. * * This API allows to opt-out of this process for specific map instance. This * can be useful if host kernel doesn't support such BPF map type or used * combination of flags and user application wants to avoid creating such * a map in the first place. User is still responsible to make sure that their * BPF-side code that expects to use such missing BPF map is recognized by BPF * verifier as dead code, otherwise BPF verifier will reject such BPF program. */ LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate); LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map); /** * @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach * map during BPF skeleton attach phase. * @param map the BPF map instance * @param autoattach whether to attach map during BPF skeleton attach phase * @return 0 on success; negative error code, otherwise */ LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach); /** * @brief **bpf_map__autoattach()** returns whether BPF map is configured to * auto-attach during BPF skeleton attach phase. * @param map the BPF map instance * @return true if map is set to auto-attach during skeleton attach phase; false, otherwise */ LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map); /** * @brief **bpf_map__fd()** gets the file descriptor of the passed * BPF map * @param map the BPF map instance * @return the file descriptor; or -EINVAL in case of an error */ LIBBPF_API int bpf_map__fd(const struct bpf_map *map); LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); /* get map name */ LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); /* get/set map type */ LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map); LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); /* get/set map size (max_entries) */ LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); /* get/set map flags */ LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags); /* get/set map NUMA node */ LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map); LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node); /* get/set map key size */ LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map); LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size); /* get map value size */ LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map); /** * @brief **bpf_map__set_value_size()** sets map value size. * @param map the BPF map instance * @return 0, on success; negative error, otherwise * * There is a special case for maps with associated memory-mapped regions, like * the global data section maps (bss, data, rodata). When this function is used * on such a map, the mapped region is resized. Afterward, an attempt is made to * adjust the corresponding BTF info. This attempt is best-effort and can only * succeed if the last variable of the data section map is an array. The array * BTF type is replaced by a new BTF array type with a different length. * Any previously existing pointers returned from bpf_map__initial_value() or * corresponding data section skeleton pointer must be reinitialized. */ LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size); /* get map key/value BTF type IDs */ LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); /* get/set map if_index */ LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); /* get/set map map_extra flags */ LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map); LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra); LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, const void *data, size_t size); LIBBPF_API void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize); /** * @brief **bpf_map__is_internal()** tells the caller whether or not the * passed map is a special map created by libbpf automatically for things like * global variables, __ksym externs, Kconfig values, etc * @param map the bpf_map * @return true, if the map is an internal map; false, otherwise */ LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); /** * @brief **bpf_map__set_pin_path()** sets the path attribute that tells where the * BPF map should be pinned. This does not actually create the 'pin'. * @param map The bpf_map * @param path The path * @return 0, on success; negative error, otherwise */ LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); /** * @brief **bpf_map__pin_path()** gets the path attribute that tells where the * BPF map should be pinned. * @param map The bpf_map * @return The path string; which can be NULL */ LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map); /** * @brief **bpf_map__is_pinned()** tells the caller whether or not the * passed map has been pinned via a 'pin' file. * @param map The bpf_map * @return true, if the map is pinned; false, otherwise */ LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); /** * @brief **bpf_map__pin()** creates a file that serves as a 'pin' * for the BPF map. This increments the reference count on the * BPF map which will keep the BPF map loaded even after the * userspace process which loaded it has exited. * @param map The bpf_map to pin * @param path A file path for the 'pin' * @return 0, on success; negative error, otherwise * * If `path` is NULL the maps `pin_path` attribute will be used. If this is * also NULL, an error will be returned and the map will not be pinned. */ LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); /** * @brief **bpf_map__unpin()** removes the file that serves as a * 'pin' for the BPF map. * @param map The bpf_map to unpin * @param path A file path for the 'pin' * @return 0, on success; negative error, otherwise * * The `path` parameter can be NULL, in which case the `pin_path` * map attribute is unpinned. If both the `path` parameter and * `pin_path` map attribute are set, they must be equal. */ LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map); /** * @brief **bpf_map__lookup_elem()** allows to lookup BPF map value * corresponding to provided key. * @param map BPF map to lookup element in * @param key pointer to memory containing bytes of the key used for lookup * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** * @param value pointer to memory in which looked up value will be stored * @param value_sz size in byte of value data memory; it has to match BPF map * definition's **value_size**. For per-CPU BPF maps value size has to be * a product of BPF map value size and number of possible CPUs in the system * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for * per-CPU values value size has to be aligned up to closest 8 bytes for * alignment reasons, so expected size is: `round_up(value_size, 8) * * libbpf_num_possible_cpus()`. * @flags extra flags passed to kernel for this operation * @return 0, on success; negative error, otherwise * * **bpf_map__lookup_elem()** is high-level equivalent of * **bpf_map_lookup_elem()** API with added check for key and value size. */ LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map, const void *key, size_t key_sz, void *value, size_t value_sz, __u64 flags); /** * @brief **bpf_map__update_elem()** allows to insert or update value in BPF * map that corresponds to provided key. * @param map BPF map to insert to or update element in * @param key pointer to memory containing bytes of the key * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** * @param value pointer to memory containing bytes of the value * @param value_sz size in byte of value data memory; it has to match BPF map * definition's **value_size**. For per-CPU BPF maps value size has to be * a product of BPF map value size and number of possible CPUs in the system * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for * per-CPU values value size has to be aligned up to closest 8 bytes for * alignment reasons, so expected size is: `round_up(value_size, 8) * * libbpf_num_possible_cpus()`. * @flags extra flags passed to kernel for this operation * @return 0, on success; negative error, otherwise * * **bpf_map__update_elem()** is high-level equivalent of * **bpf_map_update_elem()** API with added check for key and value size. */ LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map, const void *key, size_t key_sz, const void *value, size_t value_sz, __u64 flags); /** * @brief **bpf_map__delete_elem()** allows to delete element in BPF map that * corresponds to provided key. * @param map BPF map to delete element from * @param key pointer to memory containing bytes of the key * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** * @flags extra flags passed to kernel for this operation * @return 0, on success; negative error, otherwise * * **bpf_map__delete_elem()** is high-level equivalent of * **bpf_map_delete_elem()** API with added check for key size. */ LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map, const void *key, size_t key_sz, __u64 flags); /** * @brief **bpf_map__lookup_and_delete_elem()** allows to lookup BPF map value * corresponding to provided key and atomically delete it afterwards. * @param map BPF map to lookup element in * @param key pointer to memory containing bytes of the key used for lookup * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** * @param value pointer to memory in which looked up value will be stored * @param value_sz size in byte of value data memory; it has to match BPF map * definition's **value_size**. For per-CPU BPF maps value size has to be * a product of BPF map value size and number of possible CPUs in the system * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for * per-CPU values value size has to be aligned up to closest 8 bytes for * alignment reasons, so expected size is: `round_up(value_size, 8) * * libbpf_num_possible_cpus()`. * @flags extra flags passed to kernel for this operation * @return 0, on success; negative error, otherwise * * **bpf_map__lookup_and_delete_elem()** is high-level equivalent of * **bpf_map_lookup_and_delete_elem()** API with added check for key and value size. */ LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, const void *key, size_t key_sz, void *value, size_t value_sz, __u64 flags); /** * @brief **bpf_map__get_next_key()** allows to iterate BPF map keys by * fetching next key that follows current key. * @param map BPF map to fetch next key from * @param cur_key pointer to memory containing bytes of current key or NULL to * fetch the first key * @param next_key pointer to memory to write next key into * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** * @return 0, on success; -ENOENT if **cur_key** is the last key in BPF map; * negative error, otherwise * * **bpf_map__get_next_key()** is high-level equivalent of * **bpf_map_get_next_key()** API with added check for key size. */ LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map, const void *cur_key, void *next_key, size_t key_sz); struct bpf_xdp_set_link_opts { size_t sz; int old_fd; size_t :0; }; #define bpf_xdp_set_link_opts__last_field old_fd struct bpf_xdp_attach_opts { size_t sz; int old_prog_fd; size_t :0; }; #define bpf_xdp_attach_opts__last_field old_prog_fd struct bpf_xdp_query_opts { size_t sz; __u32 prog_id; /* output */ __u32 drv_prog_id; /* output */ __u32 hw_prog_id; /* output */ __u32 skb_prog_id; /* output */ __u8 attach_mode; /* output */ __u64 feature_flags; /* output */ __u32 xdp_zc_max_segs; /* output */ size_t :0; }; #define bpf_xdp_query_opts__last_field xdp_zc_max_segs LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts); LIBBPF_API int bpf_xdp_detach(int ifindex, __u32 flags, const struct bpf_xdp_attach_opts *opts); LIBBPF_API int bpf_xdp_query(int ifindex, int flags, struct bpf_xdp_query_opts *opts); LIBBPF_API int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id); /* TC related API */ enum bpf_tc_attach_point { BPF_TC_INGRESS = 1 << 0, BPF_TC_EGRESS = 1 << 1, BPF_TC_CUSTOM = 1 << 2, }; #define BPF_TC_PARENT(a, b) \ ((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU)) enum bpf_tc_flags { BPF_TC_F_REPLACE = 1 << 0, }; struct bpf_tc_hook { size_t sz; int ifindex; enum bpf_tc_attach_point attach_point; __u32 parent; size_t :0; }; #define bpf_tc_hook__last_field parent struct bpf_tc_opts { size_t sz; int prog_fd; __u32 flags; __u32 prog_id; __u32 handle; __u32 priority; size_t :0; }; #define bpf_tc_opts__last_field priority LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook); LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook); LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts); LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook, const struct bpf_tc_opts *opts); LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts); /* Ring buffer APIs */ struct ring_buffer; struct ring; struct user_ring_buffer; typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size); struct ring_buffer_opts { size_t sz; /* size of this struct, for forward/backward compatibility */ }; #define ring_buffer_opts__last_field sz LIBBPF_API struct ring_buffer * ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, const struct ring_buffer_opts *opts); LIBBPF_API void ring_buffer__free(struct ring_buffer *rb); LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd, ring_buffer_sample_fn sample_cb, void *ctx); LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms); LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb); LIBBPF_API int ring_buffer__consume_n(struct ring_buffer *rb, size_t n); LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb); /** * @brief **ring_buffer__ring()** returns the ringbuffer object inside a given * ringbuffer manager representing a single BPF_MAP_TYPE_RINGBUF map instance. * * @param rb A ringbuffer manager object. * @param idx An index into the ringbuffers contained within the ringbuffer * manager object. The index is 0-based and corresponds to the order in which * ring_buffer__add was called. * @return A ringbuffer object on success; NULL and errno set if the index is * invalid. */ LIBBPF_API struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx); /** * @brief **ring__consumer_pos()** returns the current consumer position in the * given ringbuffer. * * @param r A ringbuffer object. * @return The current consumer position. */ LIBBPF_API unsigned long ring__consumer_pos(const struct ring *r); /** * @brief **ring__producer_pos()** returns the current producer position in the * given ringbuffer. * * @param r A ringbuffer object. * @return The current producer position. */ LIBBPF_API unsigned long ring__producer_pos(const struct ring *r); /** * @brief **ring__avail_data_size()** returns the number of bytes in the * ringbuffer not yet consumed. This has no locking associated with it, so it * can be inaccurate if operations are ongoing while this is called. However, it * should still show the correct trend over the long-term. * * @param r A ringbuffer object. * @return The number of bytes not yet consumed. */ LIBBPF_API size_t ring__avail_data_size(const struct ring *r); /** * @brief **ring__size()** returns the total size of the ringbuffer's map data * area (excluding special producer/consumer pages). Effectively this gives the * amount of usable bytes of data inside the ringbuffer. * * @param r A ringbuffer object. * @return The total size of the ringbuffer map data area. */ LIBBPF_API size_t ring__size(const struct ring *r); /** * @brief **ring__map_fd()** returns the file descriptor underlying the given * ringbuffer. * * @param r A ringbuffer object. * @return The underlying ringbuffer file descriptor */ LIBBPF_API int ring__map_fd(const struct ring *r); /** * @brief **ring__consume()** consumes available ringbuffer data without event * polling. * * @param r A ringbuffer object. * @return The number of records consumed (or INT_MAX, whichever is less), or * a negative number if any of the callbacks return an error. */ LIBBPF_API int ring__consume(struct ring *r); /** * @brief **ring__consume_n()** consumes up to a requested amount of items from * a ringbuffer without event polling. * * @param r A ringbuffer object. * @param n Maximum amount of items to consume. * @return The number of items consumed, or a negative number if any of the * callbacks return an error. */ LIBBPF_API int ring__consume_n(struct ring *r, size_t n); struct user_ring_buffer_opts { size_t sz; /* size of this struct, for forward/backward compatibility */ }; #define user_ring_buffer_opts__last_field sz /** * @brief **user_ring_buffer__new()** creates a new instance of a user ring * buffer. * * @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map. * @param opts Options for how the ring buffer should be created. * @return A user ring buffer on success; NULL and errno being set on a * failure. */ LIBBPF_API struct user_ring_buffer * user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts); /** * @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the * user ring buffer. * @param rb A pointer to a user ring buffer. * @param size The size of the sample, in bytes. * @return A pointer to an 8-byte aligned reserved region of the user ring * buffer; NULL, and errno being set if a sample could not be reserved. * * This function is *not* thread safe, and callers must synchronize accessing * this function if there are multiple producers. If a size is requested that * is larger than the size of the entire ring buffer, errno will be set to * E2BIG and NULL is returned. If the ring buffer could accommodate the size, * but currently does not have enough space, errno is set to ENOSPC and NULL is * returned. * * After initializing the sample, callers must invoke * **user_ring_buffer__submit()** to post the sample to the kernel. Otherwise, * the sample must be freed with **user_ring_buffer__discard()**. */ LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size); /** * @brief **user_ring_buffer__reserve_blocking()** reserves a record in the * ring buffer, possibly blocking for up to @timeout_ms until a sample becomes * available. * @param rb The user ring buffer. * @param size The size of the sample, in bytes. * @param timeout_ms The amount of time, in milliseconds, for which the caller * should block when waiting for a sample. -1 causes the caller to block * indefinitely. * @return A pointer to an 8-byte aligned reserved region of the user ring * buffer; NULL, and errno being set if a sample could not be reserved. * * This function is *not* thread safe, and callers must synchronize * accessing this function if there are multiple producers * * If **timeout_ms** is -1, the function will block indefinitely until a sample * becomes available. Otherwise, **timeout_ms** must be non-negative, or errno * is set to EINVAL, and NULL is returned. If **timeout_ms** is 0, no blocking * will occur and the function will return immediately after attempting to * reserve a sample. * * If **size** is larger than the size of the entire ring buffer, errno is set * to E2BIG and NULL is returned. If the ring buffer could accommodate * **size**, but currently does not have enough space, the caller will block * until at most **timeout_ms** has elapsed. If insufficient space is available * at that time, errno is set to ENOSPC, and NULL is returned. * * The kernel guarantees that it will wake up this thread to check if * sufficient space is available in the ring buffer at least once per * invocation of the **bpf_ringbuf_drain()** helper function, provided that at * least one sample is consumed, and the BPF program did not invoke the * function with BPF_RB_NO_WAKEUP. A wakeup may occur sooner than that, but the * kernel does not guarantee this. If the helper function is invoked with * BPF_RB_FORCE_WAKEUP, a wakeup event will be sent even if no sample is * consumed. * * When a sample of size **size** is found within **timeout_ms**, a pointer to * the sample is returned. After initializing the sample, callers must invoke * **user_ring_buffer__submit()** to post the sample to the ring buffer. * Otherwise, the sample must be freed with **user_ring_buffer__discard()**. */ LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms); /** * @brief **user_ring_buffer__submit()** submits a previously reserved sample * into the ring buffer. * @param rb The user ring buffer. * @param sample A reserved sample. * * It is not necessary to synchronize amongst multiple producers when invoking * this function. */ LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample); /** * @brief **user_ring_buffer__discard()** discards a previously reserved sample. * @param rb The user ring buffer. * @param sample A reserved sample. * * It is not necessary to synchronize amongst multiple producers when invoking * this function. */ LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample); /** * @brief **user_ring_buffer__free()** frees a ring buffer that was previously * created with **user_ring_buffer__new()**. * @param rb The user ring buffer being freed. */ LIBBPF_API void user_ring_buffer__free(struct user_ring_buffer *rb); /* Perf buffer APIs */ struct perf_buffer; typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu, void *data, __u32 size); typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt); /* common use perf buffer options */ struct perf_buffer_opts { size_t sz; __u32 sample_period; size_t :0; }; #define perf_buffer_opts__last_field sample_period /** * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified * BPF_PERF_EVENT_ARRAY map * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF * code to send data over to user-space * @param page_cnt number of memory pages allocated for each per-CPU buffer * @param sample_cb function called on each received data record * @param lost_cb function called when record loss has occurred * @param ctx user-provided extra context passed into *sample_cb* and *lost_cb* * @return a new instance of struct perf_buffer on success, NULL on error with * *errno* containing an error code */ LIBBPF_API struct perf_buffer * perf_buffer__new(int map_fd, size_t page_cnt, perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx, const struct perf_buffer_opts *opts); enum bpf_perf_event_ret { LIBBPF_PERF_EVENT_DONE = 0, LIBBPF_PERF_EVENT_ERROR = -1, LIBBPF_PERF_EVENT_CONT = -2, }; struct perf_event_header; typedef enum bpf_perf_event_ret (*perf_buffer_event_fn)(void *ctx, int cpu, struct perf_event_header *event); /* raw perf buffer options, giving most power and control */ struct perf_buffer_raw_opts { size_t sz; long :0; long :0; /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of * max_entries of given PERF_EVENT_ARRAY map) */ int cpu_cnt; /* if cpu_cnt > 0, cpus is an array of CPUs to open ring buffers on */ int *cpus; /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */ int *map_keys; }; #define perf_buffer_raw_opts__last_field map_keys struct perf_event_attr; LIBBPF_API struct perf_buffer * perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr, perf_buffer_event_fn event_cb, void *ctx, const struct perf_buffer_raw_opts *opts); LIBBPF_API void perf_buffer__free(struct perf_buffer *pb); LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb); LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms); LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb); LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx); LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb); LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx); /** * @brief **perf_buffer__buffer()** returns the per-cpu raw mmap()'ed underlying * memory region of the ring buffer. * This ring buffer can be used to implement a custom events consumer. * The ring buffer starts with the *struct perf_event_mmap_page*, which * holds the ring buffer management fields, when accessing the header * structure it's important to be SMP aware. * You can refer to *perf_event_read_simple* for a simple example. * @param pb the perf buffer structure * @param buf_idx the buffer index to retrieve * @param buf (out) gets the base pointer of the mmap()'ed memory * @param buf_size (out) gets the size of the mmap()'ed region * @return 0 on success, negative error code for failure */ LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size); struct bpf_prog_linfo; struct bpf_prog_info; LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo); LIBBPF_API struct bpf_prog_linfo * bpf_prog_linfo__new(const struct bpf_prog_info *info); LIBBPF_API const struct bpf_line_info * bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, __u64 addr, __u32 func_idx, __u32 nr_skip); LIBBPF_API const struct bpf_line_info * bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, __u32 insn_off, __u32 nr_skip); /* * Probe for supported system features * * Note that running many of these probes in a short amount of time can cause * the kernel to reach the maximal size of lockable memory allowed for the * user, causing subsequent probes to fail. In this case, the caller may want * to adjust that limit with setrlimit(). */ /** * @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports * BPF programs of a given type. * @param prog_type BPF program type to detect kernel support for * @param opts reserved for future extensibility, should be NULL * @return 1, if given program type is supported; 0, if given program type is * not supported; negative error code if feature detection failed or can't be * performed * * Make sure the process has required set of CAP_* permissions (or runs as * root) when performing feature checking. */ LIBBPF_API int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts); /** * @brief **libbpf_probe_bpf_map_type()** detects if host kernel supports * BPF maps of a given type. * @param map_type BPF map type to detect kernel support for * @param opts reserved for future extensibility, should be NULL * @return 1, if given map type is supported; 0, if given map type is * not supported; negative error code if feature detection failed or can't be * performed * * Make sure the process has required set of CAP_* permissions (or runs as * root) when performing feature checking. */ LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts); /** * @brief **libbpf_probe_bpf_helper()** detects if host kernel supports the * use of a given BPF helper from specified BPF program type. * @param prog_type BPF program type used to check the support of BPF helper * @param helper_id BPF helper ID (enum bpf_func_id) to check support for * @param opts reserved for future extensibility, should be NULL * @return 1, if given combination of program type and helper is supported; 0, * if the combination is not supported; negative error code if feature * detection for provided input arguments failed or can't be performed * * Make sure the process has required set of CAP_* permissions (or runs as * root) when performing feature checking. */ LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id, const void *opts); /** * @brief **libbpf_num_possible_cpus()** is a helper function to get the * number of possible CPUs that the host kernel supports and expects. * @return number of possible CPUs; or error code on failure * * Example usage: * * int ncpus = libbpf_num_possible_cpus(); * if (ncpus < 0) { * // error handling * } * long values[ncpus]; * bpf_map_lookup_elem(per_cpu_map_fd, key, values); */ LIBBPF_API int libbpf_num_possible_cpus(void); struct bpf_map_skeleton { const char *name; struct bpf_map **map; void **mmaped; struct bpf_link **link; }; struct bpf_prog_skeleton { const char *name; struct bpf_program **prog; struct bpf_link **link; }; struct bpf_object_skeleton { size_t sz; /* size of this struct, for forward/backward compatibility */ const char *name; const void *data; size_t data_sz; struct bpf_object **obj; int map_cnt; int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ struct bpf_map_skeleton *maps; int prog_cnt; int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ struct bpf_prog_skeleton *progs; }; LIBBPF_API int bpf_object__open_skeleton(struct bpf_object_skeleton *s, const struct bpf_object_open_opts *opts); LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s); LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s); LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s); LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s); struct bpf_var_skeleton { const char *name; struct bpf_map **map; void **addr; }; struct bpf_object_subskeleton { size_t sz; /* size of this struct, for forward/backward compatibility */ const struct bpf_object *obj; int map_cnt; int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ struct bpf_map_skeleton *maps; int prog_cnt; int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ struct bpf_prog_skeleton *progs; int var_cnt; int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */ struct bpf_var_skeleton *vars; }; LIBBPF_API int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s); LIBBPF_API void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s); struct gen_loader_opts { size_t sz; /* size of this struct, for forward/backward compatibility */ const char *data; const char *insns; __u32 data_sz; __u32 insns_sz; }; #define gen_loader_opts__last_field insns_sz LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts); enum libbpf_tristate { TRI_NO = 0, TRI_YES = 1, TRI_MODULE = 2, }; struct bpf_linker_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; }; #define bpf_linker_opts__last_field sz struct bpf_linker_file_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; }; #define bpf_linker_file_opts__last_field sz struct bpf_linker; LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts); LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, const char *filename, const struct bpf_linker_file_opts *opts); LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker); LIBBPF_API void bpf_linker__free(struct bpf_linker *linker); /* * Custom handling of BPF program's SEC() definitions */ struct bpf_prog_load_opts; /* defined in bpf.h */ /* Called during bpf_object__open() for each recognized BPF program. Callback * can use various bpf_program__set_*() setters to adjust whatever properties * are necessary. */ typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); /* Called right before libbpf performs bpf_prog_load() to load BPF program * into the kernel. Callback can adjust opts as necessary. */ typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, struct bpf_prog_load_opts *opts, long cookie); /* Called during skeleton attach or through bpf_program__attach(). If * auto-attach is not supported, callback should return 0 and set link to * NULL (it's not considered an error during skeleton attach, but it will be * an error for bpf_program__attach() calls). On error, error should be * returned directly and link set to NULL. On success, return 0 and set link * to a valid struct bpf_link. */ typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, struct bpf_link **link); struct libbpf_prog_handler_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; /* User-provided value that is passed to prog_setup_fn, * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to * register one set of callbacks for multiple SEC() definitions and * still be able to distinguish them, if necessary. For example, * libbpf itself is using this to pass necessary flags (e.g., * sleepable flag) to a common internal SEC() handler. */ long cookie; /* BPF program initialization callback (see libbpf_prog_setup_fn_t). * Callback is optional, pass NULL if it's not necessary. */ libbpf_prog_setup_fn_t prog_setup_fn; /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t). * Callback is optional, pass NULL if it's not necessary. */ libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; /* BPF program attach callback (see libbpf_prog_attach_fn_t). * Callback is optional, pass NULL if it's not necessary. */ libbpf_prog_attach_fn_t prog_attach_fn; }; #define libbpf_prog_handler_opts__last_field prog_attach_fn /** * @brief **libbpf_register_prog_handler()** registers a custom BPF program * SEC() handler. * @param sec section prefix for which custom handler is registered * @param prog_type BPF program type associated with specified section * @param exp_attach_type Expected BPF attach type associated with specified section * @param opts optional cookie, callbacks, and other extra options * @return Non-negative handler ID is returned on success. This handler ID has * to be passed to *libbpf_unregister_prog_handler()* to unregister such * custom handler. Negative error code is returned on error. * * *sec* defines which SEC() definitions are handled by this custom handler * registration. *sec* can have few different forms: * - if *sec* is just a plain string (e.g., "abc"), it will match only * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result * in an error; * - if *sec* is of the form "abc/", proper SEC() form is * SEC("abc/something"), where acceptable "something" should be checked by * *prog_init_fn* callback, if there are additional restrictions; * - if *sec* is of the form "abc+", it will successfully match both * SEC("abc") and SEC("abc/whatever") forms; * - if *sec* is NULL, custom handler is registered for any BPF program that * doesn't match any of the registered (custom or libbpf's own) SEC() * handlers. There could be only one such generic custom handler registered * at any given time. * * All custom handlers (except the one with *sec* == NULL) are processed * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's * SEC() handlers by registering custom ones for the same section prefix * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses") * handler). * * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs * to ensure synchronization if there is a risk of running this API from * multiple threads simultaneously. */ LIBBPF_API int libbpf_register_prog_handler(const char *sec, enum bpf_prog_type prog_type, enum bpf_attach_type exp_attach_type, const struct libbpf_prog_handler_opts *opts); /** * @brief *libbpf_unregister_prog_handler()* unregisters previously registered * custom BPF program SEC() handler. * @param handler_id handler ID returned by *libbpf_register_prog_handler()* * after successful registration * @return 0 on success, negative error code if handler isn't found * * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs * to ensure synchronization if there is a risk of running this API from * multiple threads simultaneously. */ LIBBPF_API int libbpf_unregister_prog_handler(int handler_id); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBBPF_LIBBPF_H */ xdp-tools-1.5.4/lib/libbpf/src/gen_loader.c0000644000175100001660000012166614706536574020110 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2021 Facebook */ #include #include #include #include #include #include #include "btf.h" #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" #include "hashmap.h" #include "bpf_gen_internal.h" #include "skel_internal.h" #include #define MAX_USED_MAPS 64 #define MAX_USED_PROGS 32 #define MAX_KFUNC_DESCS 256 #define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS) /* The following structure describes the stack layout of the loader program. * In addition R6 contains the pointer to context. * R7 contains the result of the last sys_bpf command (typically error or FD). * R9 contains the result of the last sys_close command. * * Naming convention: * ctx - bpf program context * stack - bpf program stack * blob - bpf_attr-s, strings, insns, map data. * All the bytes that loader prog will use for read/write. */ struct loader_stack { __u32 btf_fd; __u32 inner_map_fd; __u32 prog_fd[MAX_USED_PROGS]; }; #define stack_off(field) \ (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field)) #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field)) static int blob_fd_array_off(struct bpf_gen *gen, int index) { return gen->fd_array + index * sizeof(int); } static int realloc_insn_buf(struct bpf_gen *gen, __u32 size) { size_t off = gen->insn_cur - gen->insn_start; void *insn_start; if (gen->error) return gen->error; if (size > INT32_MAX || off + size > INT32_MAX) { gen->error = -ERANGE; return -ERANGE; } insn_start = realloc(gen->insn_start, off + size); if (!insn_start) { gen->error = -ENOMEM; free(gen->insn_start); gen->insn_start = NULL; return -ENOMEM; } gen->insn_start = insn_start; gen->insn_cur = insn_start + off; return 0; } static int realloc_data_buf(struct bpf_gen *gen, __u32 size) { size_t off = gen->data_cur - gen->data_start; void *data_start; if (gen->error) return gen->error; if (size > INT32_MAX || off + size > INT32_MAX) { gen->error = -ERANGE; return -ERANGE; } data_start = realloc(gen->data_start, off + size); if (!data_start) { gen->error = -ENOMEM; free(gen->data_start); gen->data_start = NULL; return -ENOMEM; } gen->data_start = data_start; gen->data_cur = data_start + off; return 0; } static void emit(struct bpf_gen *gen, struct bpf_insn insn) { if (realloc_insn_buf(gen, sizeof(insn))) return; memcpy(gen->insn_cur, &insn, sizeof(insn)); gen->insn_cur += sizeof(insn); } static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2) { emit(gen, insn1); emit(gen, insn2); } static int add_data(struct bpf_gen *gen, const void *data, __u32 size); static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off); void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps) { size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz; int i; gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int)); gen->log_level = log_level; /* save ctx pointer into R6 */ emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1)); /* bzero stack */ emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10)); emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz)); emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz)); emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); /* amount of stack actually used, only used to calculate iterations, not stack offset */ nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]); /* jump over cleanup code */ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, /* size of cleanup code below (including map fd cleanup) */ (nr_progs_sz / 4) * 3 + 2 + /* 6 insns for emit_sys_close_blob, * 6 insns for debug_regs in emit_sys_close_blob */ nr_maps * (6 + (gen->log_level ? 6 : 0)))); /* remember the label where all error branches will jump to */ gen->cleanup_label = gen->insn_cur - gen->insn_start; /* emit cleanup code: close all temp FDs */ for (i = 0; i < nr_progs_sz; i += 4) { emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i)); emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close)); } for (i = 0; i < nr_maps; i++) emit_sys_close_blob(gen, blob_fd_array_off(gen, i)); /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */ emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7)); emit(gen, BPF_EXIT_INSN()); } static int add_data(struct bpf_gen *gen, const void *data, __u32 size) { __u32 size8 = roundup(size, 8); __u64 zero = 0; void *prev; if (realloc_data_buf(gen, size8)) return 0; prev = gen->data_cur; if (data) { memcpy(gen->data_cur, data, size); memcpy(gen->data_cur + size, &zero, size8 - size); } else { memset(gen->data_cur, 0, size8); } gen->data_cur += size8; return prev - gen->data_start; } /* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative * to start of fd_array. Caller can decide if it is usable or not. */ static int add_map_fd(struct bpf_gen *gen) { if (gen->nr_maps == MAX_USED_MAPS) { pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS); gen->error = -E2BIG; return 0; } return gen->nr_maps++; } static int add_kfunc_btf_fd(struct bpf_gen *gen) { int cur; if (gen->nr_fd_array == MAX_KFUNC_DESCS) { cur = add_data(gen, NULL, sizeof(int)); return (cur - gen->fd_array) / sizeof(int); } return MAX_USED_MAPS + gen->nr_fd_array++; } static int insn_bytes_to_bpf_size(__u32 sz) { switch (sz) { case 8: return BPF_DW; case 4: return BPF_W; case 2: return BPF_H; case 1: return BPF_B; default: return -1; } } /* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */ static void emit_rel_store(struct bpf_gen *gen, int off, int data) { emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, data)); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, off)); emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0)); } static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off) { emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, blob_off)); emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0)); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, off)); emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); } static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off) { emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, blob_off)); emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0)); emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off)); } static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off, bool check_non_zero) { emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off)); if (check_non_zero) /* If value in ctx is zero don't update the blob. * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c */ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3)); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, off)); emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); } static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off) { emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off)); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, off)); emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); } static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off) { emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off)); emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off)); } static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size) { emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd)); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, attr)); emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf)); /* remember the result in R7 */ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); } static bool is_simm16(__s64 value) { return value == (__s64)(__s16)value; } static void emit_check_err(struct bpf_gen *gen) { __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1; /* R7 contains result of last sys_bpf command. * if (R7 < 0) goto cleanup; */ if (is_simm16(off)) { emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off)); } else { gen->error = -ERANGE; emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1)); } } /* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */ static void emit_debug(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, va_list args) { char buf[1024]; int addr, len, ret; if (!gen->log_level) return; ret = vsnprintf(buf, sizeof(buf), fmt, args); if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0) /* The special case to accommodate common debug_ret(): * to avoid specifying BPF_REG_7 and adding " r=%%d" to * prints explicitly. */ strcat(buf, " r=%d"); len = strlen(buf) + 1; addr = add_data(gen, buf, len); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, addr)); emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); if (reg1 >= 0) emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1)); if (reg2 >= 0) emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk)); } static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...) { va_list args; va_start(args, fmt); emit_debug(gen, reg1, reg2, fmt, args); va_end(args); } static void debug_ret(struct bpf_gen *gen, const char *fmt, ...) { va_list args; va_start(args, fmt); emit_debug(gen, BPF_REG_7, -1, fmt, args); va_end(args); } static void __emit_sys_close(struct bpf_gen *gen) { emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, /* 2 is the number of the following insns * * 6 is additional insns in debug_regs */ 2 + (gen->log_level ? 6 : 0))); emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close)); debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d"); } static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off) { emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off)); __emit_sys_close(gen); } static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off) { emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, blob_off)); emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0)); __emit_sys_close(gen); } int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps) { int i; if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) { pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n", nr_progs, gen->nr_progs, nr_maps, gen->nr_maps); gen->error = -EFAULT; return gen->error; } emit_sys_close_stack(gen, stack_off(btf_fd)); for (i = 0; i < gen->nr_progs; i++) move_stack2ctx(gen, sizeof(struct bpf_loader_ctx) + sizeof(struct bpf_map_desc) * gen->nr_maps + sizeof(struct bpf_prog_desc) * i + offsetof(struct bpf_prog_desc, prog_fd), 4, stack_off(prog_fd[i])); for (i = 0; i < gen->nr_maps; i++) move_blob2ctx(gen, sizeof(struct bpf_loader_ctx) + sizeof(struct bpf_map_desc) * i + offsetof(struct bpf_map_desc, map_fd), 4, blob_fd_array_off(gen, i)); emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0)); emit(gen, BPF_EXIT_INSN()); pr_debug("gen: finish %d\n", gen->error); if (!gen->error) { struct gen_loader_opts *opts = gen->opts; opts->insns = gen->insn_start; opts->insns_sz = gen->insn_cur - gen->insn_start; opts->data = gen->data_start; opts->data_sz = gen->data_cur - gen->data_start; /* use target endianness for embedded loader */ if (gen->swapped_endian) { struct bpf_insn *insn = (struct bpf_insn *)opts->insns; int insn_cnt = opts->insns_sz / sizeof(struct bpf_insn); for (i = 0; i < insn_cnt; i++) bpf_insn_bswap(insn++); } } return gen->error; } void bpf_gen__free(struct bpf_gen *gen) { if (!gen) return; free(gen->data_start); free(gen->insn_start); free(gen); } /* * Fields of bpf_attr are set to values in native byte-order before being * written to the target-bound data blob, and may need endian conversion. * This macro allows providing the correct value in situ more simply than * writing a separate converter for *all fields* of *all records* included * in union bpf_attr. Note that sizeof(rval) should match the assignment * target to avoid runtime problems. */ #define tgt_endian(rval) ({ \ typeof(rval) _val = (rval); \ if (gen->swapped_endian) { \ switch (sizeof(_val)) { \ case 1: break; \ case 2: _val = bswap_16(_val); break; \ case 4: _val = bswap_32(_val); break; \ case 8: _val = bswap_64(_val); break; \ default: pr_warn("unsupported bswap size!\n"); \ } \ } \ _val; \ }) void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, __u32 btf_raw_size) { int attr_size = offsetofend(union bpf_attr, btf_log_level); int btf_data, btf_load_attr; union bpf_attr attr; memset(&attr, 0, attr_size); btf_data = add_data(gen, btf_raw_data, btf_raw_size); attr.btf_size = tgt_endian(btf_raw_size); btf_load_attr = add_data(gen, &attr, attr_size); pr_debug("gen: load_btf: off %d size %d, attr: off %d size %d\n", btf_data, btf_raw_size, btf_load_attr, attr_size); /* populate union bpf_attr with user provided log details */ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4, offsetof(struct bpf_loader_ctx, log_level), false); move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4, offsetof(struct bpf_loader_ctx, log_size), false); move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8, offsetof(struct bpf_loader_ctx, log_buf), false); /* populate union bpf_attr with a pointer to the BTF data */ emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data); /* emit BTF_LOAD command */ emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size); debug_ret(gen, "btf_load size %d", btf_raw_size); emit_check_err(gen); /* remember btf_fd in the stack, if successful */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd))); } void bpf_gen__map_create(struct bpf_gen *gen, enum bpf_map_type map_type, const char *map_name, __u32 key_size, __u32 value_size, __u32 max_entries, struct bpf_map_create_opts *map_attr, int map_idx) { int attr_size = offsetofend(union bpf_attr, map_extra); bool close_inner_map_fd = false; int map_create_attr, idx; union bpf_attr attr; memset(&attr, 0, attr_size); attr.map_type = tgt_endian(map_type); attr.key_size = tgt_endian(key_size); attr.value_size = tgt_endian(value_size); attr.map_flags = tgt_endian(map_attr->map_flags); attr.map_extra = tgt_endian(map_attr->map_extra); if (map_name) libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); attr.numa_node = tgt_endian(map_attr->numa_node); attr.map_ifindex = tgt_endian(map_attr->map_ifindex); attr.max_entries = tgt_endian(max_entries); attr.btf_key_type_id = tgt_endian(map_attr->btf_key_type_id); attr.btf_value_type_id = tgt_endian(map_attr->btf_value_type_id); map_create_attr = add_data(gen, &attr, attr_size); pr_debug("gen: map_create: %s idx %d type %d value_type_id %d, attr: off %d size %d\n", map_name, map_idx, map_type, map_attr->btf_value_type_id, map_create_attr, attr_size); if (map_attr->btf_value_type_id) /* populate union bpf_attr with btf_fd saved in the stack earlier */ move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4, stack_off(btf_fd)); switch (map_type) { case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4, stack_off(inner_map_fd)); close_inner_map_fd = true; break; default: break; } /* conditionally update max_entries */ if (map_idx >= 0) move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4, sizeof(struct bpf_loader_ctx) + sizeof(struct bpf_map_desc) * map_idx + offsetof(struct bpf_map_desc, max_entries), true /* check that max_entries != 0 */); /* emit MAP_CREATE command */ emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size); debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d", map_name, map_idx, map_type, value_size, map_attr->btf_value_type_id); emit_check_err(gen); /* remember map_fd in the stack, if successful */ if (map_idx < 0) { /* This bpf_gen__map_create() function is called with map_idx >= 0 * for all maps that libbpf loading logic tracks. * It's called with -1 to create an inner map. */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(inner_map_fd))); } else if (map_idx != gen->nr_maps) { gen->error = -EDOM; /* internal bug */ return; } else { /* add_map_fd does gen->nr_maps++ */ idx = add_map_fd(gen); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, blob_fd_array_off(gen, idx))); emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0)); } if (close_inner_map_fd) emit_sys_close_stack(gen, stack_off(inner_map_fd)); } void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name, enum bpf_attach_type type) { const char *prefix; int kind, ret; btf_get_kernel_prefix_kind(type, &prefix, &kind); gen->attach_kind = kind; ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s", prefix, attach_name); if (ret >= sizeof(gen->attach_target)) gen->error = -ENOSPC; } static void emit_find_attach_target(struct bpf_gen *gen) { int name, len = strlen(gen->attach_target) + 1; pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind); name = add_data(gen, gen->attach_target, len); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, name)); emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind)); emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind)); emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); debug_ret(gen, "find_by_name_kind(%s,%d)", gen->attach_target, gen->attach_kind); emit_check_err(gen); /* if successful, btf_id is in lower 32-bit of R7 and * btf_obj_fd is in upper 32-bit */ } void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, bool is_typeless, bool is_ld64, int kind, int insn_idx) { struct ksym_relo_desc *relo; relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo)); if (!relo) { gen->error = -ENOMEM; return; } gen->relos = relo; relo += gen->relo_cnt; relo->name = name; relo->is_weak = is_weak; relo->is_typeless = is_typeless; relo->is_ld64 = is_ld64; relo->kind = kind; relo->insn_idx = insn_idx; gen->relo_cnt++; } /* returns existing ksym_desc with ref incremented, or inserts a new one */ static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo) { struct ksym_desc *kdesc; int i; for (i = 0; i < gen->nr_ksyms; i++) { kdesc = &gen->ksyms[i]; if (kdesc->kind == relo->kind && kdesc->is_ld64 == relo->is_ld64 && !strcmp(kdesc->name, relo->name)) { kdesc->ref++; return kdesc; } } kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc)); if (!kdesc) { gen->error = -ENOMEM; return NULL; } gen->ksyms = kdesc; kdesc = &gen->ksyms[gen->nr_ksyms++]; kdesc->name = relo->name; kdesc->kind = relo->kind; kdesc->ref = 1; kdesc->off = 0; kdesc->insn = 0; kdesc->is_ld64 = relo->is_ld64; return kdesc; } /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7} * Returns result in BPF_REG_7 */ static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo) { int name_off, len = strlen(relo->name) + 1; name_off = add_data(gen, relo->name, len); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, name_off)); emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind)); emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind)); emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind); } /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7} * Returns result in BPF_REG_7 * Returns u64 symbol addr in BPF_REG_9 */ static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo) { int name_off, len = strlen(relo->name) + 1, res_off; name_off = add_data(gen, relo->name, len); res_off = add_data(gen, NULL, 8); /* res is u64 */ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, name_off)); emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0)); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, res_off)); emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name)); emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0)); emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind); } /* Expects: * BPF_REG_8 - pointer to instruction * * We need to reuse BTF fd for same symbol otherwise each relocation takes a new * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols, * this would mean a new BTF fd index for each entry. By pairing symbol name * with index, we get the insn->imm, insn->off pairing that kernel uses for * kfunc_tab, which becomes the effective limit even though all of them may * share same index in fd_array (such that kfunc_btf_tab has 1 element). */ static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn) { struct ksym_desc *kdesc; int btf_fd_idx; kdesc = get_ksym_desc(gen, relo); if (!kdesc) return; /* try to copy from existing bpf_insn */ if (kdesc->ref > 1) { move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, kdesc->insn + offsetof(struct bpf_insn, imm)); move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2, kdesc->insn + offsetof(struct bpf_insn, off)); goto log; } /* remember insn offset, so we can copy BTF ID and FD later */ kdesc->insn = insn; emit_bpf_find_by_name_kind(gen, relo); if (!relo->is_weak) emit_check_err(gen); /* get index in fd_array to store BTF FD at */ btf_fd_idx = add_kfunc_btf_fd(gen); if (btf_fd_idx > INT16_MAX) { pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n", btf_fd_idx, relo->name); gen->error = -E2BIG; return; } kdesc->off = btf_fd_idx; /* jump to success case */ emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); /* set value for imm, off as 0 */ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); /* skip success case for ret < 0 */ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10)); /* store btf_id into insn[insn_idx].imm */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); /* obtain fd in BPF_REG_9 */ emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7)); emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); /* load fd_array slot pointer */ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx))); /* store BTF fd in slot, 0 for vmlinux */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0)); /* jump to insn[insn_idx].off store if fd denotes module BTF */ emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2)); /* set the default value for off */ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); /* skip BTF fd store for vmlinux BTF */ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1)); /* store index into insn[insn_idx].off */ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx)); log: if (!gen->log_level) return; emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8, offsetof(struct bpf_insn, imm))); emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, offsetof(struct bpf_insn, off))); debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d", relo->name, kdesc->ref); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, blob_fd_array_off(gen, kdesc->off))); emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0)); debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd", relo->name, kdesc->ref); } static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo, int ref) { if (!gen->log_level) return; emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8, offsetof(struct bpf_insn, imm))); emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d", relo->is_typeless, relo->is_weak, relo->name, ref); emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg", relo->is_typeless, relo->is_weak, relo->name, ref); } /* Expects: * BPF_REG_8 - pointer to instruction */ static void emit_relo_ksym_typeless(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn) { struct ksym_desc *kdesc; kdesc = get_ksym_desc(gen, relo); if (!kdesc) return; /* try to copy from existing ldimm64 insn */ if (kdesc->ref > 1) { move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, kdesc->insn + offsetof(struct bpf_insn, imm)); move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); goto log; } /* remember insn offset, so we can copy ksym addr later */ kdesc->insn = insn; /* skip typeless ksym_desc in fd closing loop in cleanup_relos */ kdesc->typeless = true; emit_bpf_kallsyms_lookup_name(gen, relo); emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1)); emit_check_err(gen); /* store lower half of addr into insn[insn_idx].imm */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm))); /* store upper half of addr into insn[insn_idx + 1].imm */ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); log: emit_ksym_relo_log(gen, relo, kdesc->ref); } static __u32 src_reg_mask(struct bpf_gen *gen) { #if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */ return gen->swapped_endian ? 0xf0 : 0x0f; #elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */ return gen->swapped_endian ? 0x0f : 0xf0; #else #error "Unsupported bit endianness, cannot proceed" #endif } /* Expects: * BPF_REG_8 - pointer to instruction */ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn) { struct ksym_desc *kdesc; __u32 reg_mask; kdesc = get_ksym_desc(gen, relo); if (!kdesc) return; /* try to copy from existing ldimm64 insn */ if (kdesc->ref > 1) { move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, kdesc->insn + offsetof(struct bpf_insn, imm)); /* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn */ emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3)); goto clear_src_reg; } /* remember insn offset, so we can copy BTF ID and FD later */ kdesc->insn = insn; emit_bpf_find_by_name_kind(gen, relo); if (!relo->is_weak) emit_check_err(gen); /* jump to success case */ emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0)); /* skip success case for ret < 0 */ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4)); /* store btf_id into insn[insn_idx].imm */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); /* store btf_obj_fd into insn[insn_idx + 1].imm */ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); /* skip src_reg adjustment */ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3)); clear_src_reg: /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */ reg_mask = src_reg_mask(gen); emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask)); emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code))); emit_ksym_relo_log(gen, relo, kdesc->ref); } void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo) { struct bpf_core_relo *relos; relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos)); if (!relos) { gen->error = -ENOMEM; return; } gen->core_relos = relos; relos += gen->core_relo_cnt; memcpy(relos, core_relo, sizeof(*relos)); gen->core_relo_cnt++; } static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns) { int insn; pr_debug("gen: emit_relo (%d): %s at %d %s\n", relo->kind, relo->name, relo->insn_idx, relo->is_ld64 ? "ld64" : "call"); insn = insns + sizeof(struct bpf_insn) * relo->insn_idx; emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn)); if (relo->is_ld64) { if (relo->is_typeless) emit_relo_ksym_typeless(gen, relo, insn); else emit_relo_ksym_btf(gen, relo, insn); } else { emit_relo_kfunc_btf(gen, relo, insn); } } static void emit_relos(struct bpf_gen *gen, int insns) { int i; for (i = 0; i < gen->relo_cnt; i++) emit_relo(gen, gen->relos + i, insns); } static void cleanup_core_relo(struct bpf_gen *gen) { if (!gen->core_relo_cnt) return; free(gen->core_relos); gen->core_relo_cnt = 0; gen->core_relos = NULL; } static void cleanup_relos(struct bpf_gen *gen, int insns) { struct ksym_desc *kdesc; int i, insn; for (i = 0; i < gen->nr_ksyms; i++) { kdesc = &gen->ksyms[i]; /* only close fds for typed ksyms and kfuncs */ if (kdesc->is_ld64 && !kdesc->typeless) { /* close fd recorded in insn[insn_idx + 1].imm */ insn = kdesc->insn; insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm); emit_sys_close_blob(gen, insn); } else if (!kdesc->is_ld64) { emit_sys_close_blob(gen, blob_fd_array_off(gen, kdesc->off)); if (kdesc->off < MAX_FD_ARRAY_SZ) gen->nr_fd_array--; } } if (gen->nr_ksyms) { free(gen->ksyms); gen->nr_ksyms = 0; gen->ksyms = NULL; } if (gen->relo_cnt) { free(gen->relos); gen->relo_cnt = 0; gen->relos = NULL; } cleanup_core_relo(gen); } /* Convert func, line, and core relo info blobs to target endianness */ static void info_blob_bswap(struct bpf_gen *gen, int func_info, int line_info, int core_relos, struct bpf_prog_load_opts *load_attr) { struct bpf_func_info *fi = gen->data_start + func_info; struct bpf_line_info *li = gen->data_start + line_info; struct bpf_core_relo *cr = gen->data_start + core_relos; int i; for (i = 0; i < load_attr->func_info_cnt; i++) bpf_func_info_bswap(fi++); for (i = 0; i < load_attr->line_info_cnt; i++) bpf_line_info_bswap(li++); for (i = 0; i < gen->core_relo_cnt; i++) bpf_core_relo_bswap(cr++); } void bpf_gen__prog_load(struct bpf_gen *gen, enum bpf_prog_type prog_type, const char *prog_name, const char *license, struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *load_attr, int prog_idx) { int func_info_tot_sz = load_attr->func_info_cnt * load_attr->func_info_rec_size; int line_info_tot_sz = load_attr->line_info_cnt * load_attr->line_info_rec_size; int core_relo_tot_sz = gen->core_relo_cnt * sizeof(struct bpf_core_relo); int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos; int attr_size = offsetofend(union bpf_attr, core_relo_rec_size); union bpf_attr attr; memset(&attr, 0, attr_size); /* add license string to blob of bytes */ license_off = add_data(gen, license, strlen(license) + 1); /* add insns to blob of bytes */ insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn)); pr_debug("gen: prog_load: prog_idx %d type %d insn off %d insns_cnt %zd license off %d\n", prog_idx, prog_type, insns_off, insn_cnt, license_off); /* convert blob insns to target endianness */ if (gen->swapped_endian) { struct bpf_insn *insn = gen->data_start + insns_off; int i; for (i = 0; i < insn_cnt; i++, insn++) bpf_insn_bswap(insn); } attr.prog_type = tgt_endian(prog_type); attr.expected_attach_type = tgt_endian(load_attr->expected_attach_type); attr.attach_btf_id = tgt_endian(load_attr->attach_btf_id); attr.prog_ifindex = tgt_endian(load_attr->prog_ifindex); attr.kern_version = 0; attr.insn_cnt = tgt_endian((__u32)insn_cnt); attr.prog_flags = tgt_endian(load_attr->prog_flags); attr.func_info_rec_size = tgt_endian(load_attr->func_info_rec_size); attr.func_info_cnt = tgt_endian(load_attr->func_info_cnt); func_info = add_data(gen, load_attr->func_info, func_info_tot_sz); pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n", func_info, load_attr->func_info_cnt, load_attr->func_info_rec_size); attr.line_info_rec_size = tgt_endian(load_attr->line_info_rec_size); attr.line_info_cnt = tgt_endian(load_attr->line_info_cnt); line_info = add_data(gen, load_attr->line_info, line_info_tot_sz); pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n", line_info, load_attr->line_info_cnt, load_attr->line_info_rec_size); attr.core_relo_rec_size = tgt_endian((__u32)sizeof(struct bpf_core_relo)); attr.core_relo_cnt = tgt_endian(gen->core_relo_cnt); core_relos = add_data(gen, gen->core_relos, core_relo_tot_sz); pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n", core_relos, gen->core_relo_cnt, sizeof(struct bpf_core_relo)); /* convert all info blobs to target endianness */ if (gen->swapped_endian) info_blob_bswap(gen, func_info, line_info, core_relos, load_attr); libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); prog_load_attr = add_data(gen, &attr, attr_size); pr_debug("gen: prog_load: attr: off %d size %d\n", prog_load_attr, attr_size); /* populate union bpf_attr with a pointer to license */ emit_rel_store(gen, attr_field(prog_load_attr, license), license_off); /* populate union bpf_attr with a pointer to instructions */ emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off); /* populate union bpf_attr with a pointer to func_info */ emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info); /* populate union bpf_attr with a pointer to line_info */ emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info); /* populate union bpf_attr with a pointer to core_relos */ emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos); /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */ emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array); /* populate union bpf_attr with user provided log details */ move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4, offsetof(struct bpf_loader_ctx, log_level), false); move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4, offsetof(struct bpf_loader_ctx, log_size), false); move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8, offsetof(struct bpf_loader_ctx, log_buf), false); /* populate union bpf_attr with btf_fd saved in the stack earlier */ move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4, stack_off(btf_fd)); if (gen->attach_kind) { emit_find_attach_target(gen); /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, prog_load_attr)); emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(union bpf_attr, attach_btf_id))); emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(union bpf_attr, attach_btf_obj_fd))); } emit_relos(gen, insns_off); /* emit PROG_LOAD command */ emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size); debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt); /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */ cleanup_relos(gen, insns_off); if (gen->attach_kind) { emit_sys_close_blob(gen, attr_field(prog_load_attr, attach_btf_obj_fd)); gen->attach_kind = 0; } emit_check_err(gen); /* remember prog_fd in the stack, if successful */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(prog_fd[gen->nr_progs]))); gen->nr_progs++; } void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, __u32 value_size) { int attr_size = offsetofend(union bpf_attr, flags); int map_update_attr, value, key; union bpf_attr attr; int zero = 0; memset(&attr, 0, attr_size); value = add_data(gen, pvalue, value_size); key = add_data(gen, &zero, sizeof(zero)); /* if (map_desc[map_idx].initial_value) { * if (ctx->flags & BPF_SKEL_KERNEL) * bpf_probe_read_kernel(value, value_size, initial_value); * else * bpf_copy_from_user(value, value_size, initial_value); * } */ emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, sizeof(struct bpf_loader_ctx) + sizeof(struct bpf_map_desc) * map_idx + offsetof(struct bpf_map_desc, initial_value))); emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8)); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, value)); emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size)); emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_loader_ctx, flags))); emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user)); emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); map_update_attr = add_data(gen, &attr, attr_size); pr_debug("gen: map_update_elem: idx %d, value: off %d size %d, attr: off %d size %d\n", map_idx, value, value_size, map_update_attr, attr_size); move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, blob_fd_array_off(gen, map_idx)); emit_rel_store(gen, attr_field(map_update_attr, key), key); emit_rel_store(gen, attr_field(map_update_attr, value), value); /* emit MAP_UPDATE_ELEM command */ emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size); debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size); emit_check_err(gen); } void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot, int inner_map_idx) { int attr_size = offsetofend(union bpf_attr, flags); int map_update_attr, key; union bpf_attr attr; int tgt_slot; memset(&attr, 0, attr_size); tgt_slot = tgt_endian(slot); key = add_data(gen, &tgt_slot, sizeof(tgt_slot)); map_update_attr = add_data(gen, &attr, attr_size); pr_debug("gen: populate_outer_map: outer %d key %d inner %d, attr: off %d size %d\n", outer_map_idx, slot, inner_map_idx, map_update_attr, attr_size); move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, blob_fd_array_off(gen, outer_map_idx)); emit_rel_store(gen, attr_field(map_update_attr, key), key); emit_rel_store(gen, attr_field(map_update_attr, value), blob_fd_array_off(gen, inner_map_idx)); /* emit MAP_UPDATE_ELEM command */ emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size); debug_ret(gen, "populate_outer_map outer %d key %d inner %d", outer_map_idx, slot, inner_map_idx); emit_check_err(gen); } void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx) { int attr_size = offsetofend(union bpf_attr, map_fd); int map_freeze_attr; union bpf_attr attr; memset(&attr, 0, attr_size); map_freeze_attr = add_data(gen, &attr, attr_size); pr_debug("gen: map_freeze: idx %d, attr: off %d size %d\n", map_idx, map_freeze_attr, attr_size); move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4, blob_fd_array_off(gen, map_idx)); /* emit MAP_FREEZE command */ emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size); debug_ret(gen, "map_freeze"); emit_check_err(gen); } xdp-tools-1.5.4/lib/libbpf/src/usdt.bpf.h0000644000175100001660000002050214706536574017526 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ #ifndef __USDT_BPF_H__ #define __USDT_BPF_H__ #include #include "bpf_helpers.h" #include "bpf_tracing.h" /* Below types and maps are internal implementation details of libbpf's USDT * support and are subjects to change. Also, bpf_usdt_xxx() API helpers should * be considered an unstable API as well and might be adjusted based on user * feedback from using libbpf's USDT support in production. */ /* User can override BPF_USDT_MAX_SPEC_CNT to change default size of internal * map that keeps track of USDT argument specifications. This might be * necessary if there are a lot of USDT attachments. */ #ifndef BPF_USDT_MAX_SPEC_CNT #define BPF_USDT_MAX_SPEC_CNT 256 #endif /* User can override BPF_USDT_MAX_IP_CNT to change default size of internal * map that keeps track of IP (memory address) mapping to USDT argument * specification. * Note, if kernel supports BPF cookies, this map is not used and could be * resized all the way to 1 to save a bit of memory. */ #ifndef BPF_USDT_MAX_IP_CNT #define BPF_USDT_MAX_IP_CNT (4 * BPF_USDT_MAX_SPEC_CNT) #endif enum __bpf_usdt_arg_type { BPF_USDT_ARG_CONST, BPF_USDT_ARG_REG, BPF_USDT_ARG_REG_DEREF, }; struct __bpf_usdt_arg_spec { /* u64 scalar interpreted depending on arg_type, see below */ __u64 val_off; /* arg location case, see bpf_usdt_arg() for details */ enum __bpf_usdt_arg_type arg_type; /* offset of referenced register within struct pt_regs */ short reg_off; /* whether arg should be interpreted as signed value */ bool arg_signed; /* number of bits that need to be cleared and, optionally, * sign-extended to cast arguments that are 1, 2, or 4 bytes * long into final 8-byte u64/s64 value returned to user */ char arg_bitshift; }; /* should match USDT_MAX_ARG_CNT in usdt.c exactly */ #define BPF_USDT_MAX_ARG_CNT 12 struct __bpf_usdt_spec { struct __bpf_usdt_arg_spec args[BPF_USDT_MAX_ARG_CNT]; __u64 usdt_cookie; short arg_cnt; }; struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, BPF_USDT_MAX_SPEC_CNT); __type(key, int); __type(value, struct __bpf_usdt_spec); } __bpf_usdt_specs SEC(".maps") __weak; struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, BPF_USDT_MAX_IP_CNT); __type(key, long); __type(value, __u32); } __bpf_usdt_ip_to_spec_id SEC(".maps") __weak; extern const _Bool LINUX_HAS_BPF_COOKIE __kconfig; static __always_inline int __bpf_usdt_spec_id(struct pt_regs *ctx) { if (!LINUX_HAS_BPF_COOKIE) { long ip = PT_REGS_IP(ctx); int *spec_id_ptr; spec_id_ptr = bpf_map_lookup_elem(&__bpf_usdt_ip_to_spec_id, &ip); return spec_id_ptr ? *spec_id_ptr : -ESRCH; } return bpf_get_attach_cookie(ctx); } /* Return number of USDT arguments defined for currently traced USDT. */ __weak __hidden int bpf_usdt_arg_cnt(struct pt_regs *ctx) { struct __bpf_usdt_spec *spec; int spec_id; spec_id = __bpf_usdt_spec_id(ctx); if (spec_id < 0) return -ESRCH; spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id); if (!spec) return -ESRCH; return spec->arg_cnt; } /* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res. * Returns 0 on success; negative error, otherwise. * On error *res is guaranteed to be set to zero. */ __weak __hidden int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res) { struct __bpf_usdt_spec *spec; struct __bpf_usdt_arg_spec *arg_spec; unsigned long val; int err, spec_id; *res = 0; spec_id = __bpf_usdt_spec_id(ctx); if (spec_id < 0) return -ESRCH; spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id); if (!spec) return -ESRCH; if (arg_num >= BPF_USDT_MAX_ARG_CNT) return -ENOENT; barrier_var(arg_num); if (arg_num >= spec->arg_cnt) return -ENOENT; arg_spec = &spec->args[arg_num]; switch (arg_spec->arg_type) { case BPF_USDT_ARG_CONST: /* Arg is just a constant ("-4@$-9" in USDT arg spec). * value is recorded in arg_spec->val_off directly. */ val = arg_spec->val_off; break; case BPF_USDT_ARG_REG: /* Arg is in a register (e.g, "8@%rax" in USDT arg spec), * so we read the contents of that register directly from * struct pt_regs. To keep things simple user-space parts * record offsetof(struct pt_regs, ) in arg_spec->reg_off. */ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off); if (err) return err; break; case BPF_USDT_ARG_REG_DEREF: /* Arg is in memory addressed by register, plus some offset * (e.g., "-4@-1204(%rbp)" in USDT arg spec). Register is * identified like with BPF_USDT_ARG_REG case, and the offset * is in arg_spec->val_off. We first fetch register contents * from pt_regs, then do another user-space probe read to * fetch argument value itself. */ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off); if (err) return err; err = bpf_probe_read_user(&val, sizeof(val), (void *)val + arg_spec->val_off); if (err) return err; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ val >>= arg_spec->arg_bitshift; #endif break; default: return -EINVAL; } /* cast arg from 1, 2, or 4 bytes to final 8 byte size clearing * necessary upper arg_bitshift bits, with sign extension if argument * is signed */ val <<= arg_spec->arg_bitshift; if (arg_spec->arg_signed) val = ((long)val) >> arg_spec->arg_bitshift; else val = val >> arg_spec->arg_bitshift; *res = val; return 0; } /* Retrieve user-specified cookie value provided during attach as * bpf_usdt_opts.usdt_cookie. This serves the same purpose as BPF cookie * returned by bpf_get_attach_cookie(). Libbpf's support for USDT is itself * utilizing BPF cookies internally, so user can't use BPF cookie directly * for USDT programs and has to use bpf_usdt_cookie() API instead. */ __weak __hidden long bpf_usdt_cookie(struct pt_regs *ctx) { struct __bpf_usdt_spec *spec; int spec_id; spec_id = __bpf_usdt_spec_id(ctx); if (spec_id < 0) return 0; spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id); if (!spec) return 0; return spec->usdt_cookie; } /* we rely on ___bpf_apply() and ___bpf_narg() macros already defined in bpf_tracing.h */ #define ___bpf_usdt_args0() ctx #define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); _x; }) #define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); _x; }) #define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); _x; }) #define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); _x; }) #define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); _x; }) #define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); _x; }) #define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); _x; }) #define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); _x; }) #define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); _x; }) #define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); _x; }) #define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); _x; }) #define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); _x; }) #define ___bpf_usdt_args(args...) ___bpf_apply(___bpf_usdt_args, ___bpf_narg(args))(args) /* * BPF_USDT serves the same purpose for USDT handlers as BPF_PROG for * tp_btf/fentry/fexit BPF programs and BPF_KPROBE for kprobes. * Original struct pt_regs * context is preserved as 'ctx' argument. */ #define BPF_USDT(name, args...) \ name(struct pt_regs *ctx); \ static __always_inline typeof(name(0)) \ ____##name(struct pt_regs *ctx, ##args); \ typeof(name(0)) name(struct pt_regs *ctx) \ { \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ return ____##name(___bpf_usdt_args(args)); \ _Pragma("GCC diagnostic pop") \ } \ static __always_inline typeof(name(0)) \ ____##name(struct pt_regs *ctx, ##args) #endif /* __USDT_BPF_H__ */ xdp-tools-1.5.4/lib/libbpf/src/elf.c0000644000175100001660000003275014706536574016552 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include #include #include #include #include "libbpf_internal.h" #include "str_error.h" /* A SHT_GNU_versym section holds 16-bit words. This bit is set if * the symbol is hidden and can only be seen when referenced using an * explicit version number. This is a GNU extension. */ #define VERSYM_HIDDEN 0x8000 /* This is the mask for the rest of the data in a word read from a * SHT_GNU_versym section. */ #define VERSYM_VERSION 0x7fff int elf_open(const char *binary_path, struct elf_fd *elf_fd) { char errmsg[STRERR_BUFSIZE]; int fd, ret; Elf *elf; elf_fd->elf = NULL; elf_fd->fd = -1; if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn("elf: failed to init libelf for %s\n", binary_path); return -LIBBPF_ERRNO__LIBELF; } fd = open(binary_path, O_RDONLY | O_CLOEXEC); if (fd < 0) { ret = -errno; pr_warn("elf: failed to open %s: %s\n", binary_path, libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); return ret; } elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (!elf) { pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); close(fd); return -LIBBPF_ERRNO__FORMAT; } elf_fd->fd = fd; elf_fd->elf = elf; return 0; } void elf_close(struct elf_fd *elf_fd) { if (!elf_fd) return; elf_end(elf_fd->elf); close(elf_fd->fd); } /* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */ static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn) { while ((scn = elf_nextscn(elf, scn)) != NULL) { GElf_Shdr sh; if (!gelf_getshdr(scn, &sh)) continue; if (sh.sh_type == sh_type) return scn; } return NULL; } struct elf_sym { const char *name; GElf_Sym sym; GElf_Shdr sh; int ver; bool hidden; }; struct elf_sym_iter { Elf *elf; Elf_Data *syms; Elf_Data *versyms; Elf_Data *verdefs; size_t nr_syms; size_t strtabidx; size_t verdef_strtabidx; size_t next_sym_idx; struct elf_sym sym; int st_type; }; static int elf_sym_iter_new(struct elf_sym_iter *iter, Elf *elf, const char *binary_path, int sh_type, int st_type) { Elf_Scn *scn = NULL; GElf_Ehdr ehdr; GElf_Shdr sh; memset(iter, 0, sizeof(*iter)); if (!gelf_getehdr(elf, &ehdr)) { pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); return -EINVAL; } scn = elf_find_next_scn_by_type(elf, sh_type, NULL); if (!scn) { pr_debug("elf: failed to find symbol table ELF sections in '%s'\n", binary_path); return -ENOENT; } if (!gelf_getshdr(scn, &sh)) return -EINVAL; iter->strtabidx = sh.sh_link; iter->syms = elf_getdata(scn, 0); if (!iter->syms) { pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n", binary_path, elf_errmsg(-1)); return -EINVAL; } iter->nr_syms = iter->syms->d_size / sh.sh_entsize; iter->elf = elf; iter->st_type = st_type; /* Version symbol table is meaningful to dynsym only */ if (sh_type != SHT_DYNSYM) return 0; scn = elf_find_next_scn_by_type(elf, SHT_GNU_versym, NULL); if (!scn) return 0; iter->versyms = elf_getdata(scn, 0); scn = elf_find_next_scn_by_type(elf, SHT_GNU_verdef, NULL); if (!scn) return 0; iter->verdefs = elf_getdata(scn, 0); if (!iter->verdefs || !gelf_getshdr(scn, &sh)) { pr_warn("elf: failed to get verdef ELF section in '%s'\n", binary_path); return -EINVAL; } iter->verdef_strtabidx = sh.sh_link; return 0; } static struct elf_sym *elf_sym_iter_next(struct elf_sym_iter *iter) { struct elf_sym *ret = &iter->sym; GElf_Sym *sym = &ret->sym; const char *name = NULL; GElf_Versym versym; Elf_Scn *sym_scn; size_t idx; for (idx = iter->next_sym_idx; idx < iter->nr_syms; idx++) { if (!gelf_getsym(iter->syms, idx, sym)) continue; if (GELF_ST_TYPE(sym->st_info) != iter->st_type) continue; name = elf_strptr(iter->elf, iter->strtabidx, sym->st_name); if (!name) continue; sym_scn = elf_getscn(iter->elf, sym->st_shndx); if (!sym_scn) continue; if (!gelf_getshdr(sym_scn, &ret->sh)) continue; iter->next_sym_idx = idx + 1; ret->name = name; ret->ver = 0; ret->hidden = false; if (iter->versyms) { if (!gelf_getversym(iter->versyms, idx, &versym)) continue; ret->ver = versym & VERSYM_VERSION; ret->hidden = versym & VERSYM_HIDDEN; } return ret; } return NULL; } static const char *elf_get_vername(struct elf_sym_iter *iter, int ver) { GElf_Verdaux verdaux; GElf_Verdef verdef; int offset; if (!iter->verdefs) return NULL; offset = 0; while (gelf_getverdef(iter->verdefs, offset, &verdef)) { if (verdef.vd_ndx != ver) { if (!verdef.vd_next) break; offset += verdef.vd_next; continue; } if (!gelf_getverdaux(iter->verdefs, offset + verdef.vd_aux, &verdaux)) break; return elf_strptr(iter->elf, iter->verdef_strtabidx, verdaux.vda_name); } return NULL; } static bool symbol_match(struct elf_sym_iter *iter, int sh_type, struct elf_sym *sym, const char *name, size_t name_len, const char *lib_ver) { const char *ver_name; /* Symbols are in forms of func, func@LIB_VER or func@@LIB_VER * make sure the func part matches the user specified name */ if (strncmp(sym->name, name, name_len) != 0) return false; /* ...but we don't want a search for "foo" to match 'foo2" also, so any * additional characters in sname should be of the form "@@LIB". */ if (sym->name[name_len] != '\0' && sym->name[name_len] != '@') return false; /* If user does not specify symbol version, then we got a match */ if (!lib_ver) return true; /* If user specifies symbol version, for dynamic symbols, * get version name from ELF verdef section for comparison. */ if (sh_type == SHT_DYNSYM) { ver_name = elf_get_vername(iter, sym->ver); if (!ver_name) return false; return strcmp(ver_name, lib_ver) == 0; } /* For normal symbols, it is already in form of func@LIB_VER */ return strcmp(sym->name, name) == 0; } /* Transform symbol's virtual address (absolute for binaries and relative * for shared libs) into file offset, which is what kernel is expecting * for uprobe/uretprobe attachment. * See Documentation/trace/uprobetracer.rst for more details. This is done * by looking up symbol's containing section's header and using iter's virtual * address (sh_addr) and corresponding file offset (sh_offset) to transform * sym.st_value (virtual address) into desired final file offset. */ static unsigned long elf_sym_offset(struct elf_sym *sym) { return sym->sym.st_value - sym->sh.sh_addr + sym->sh.sh_offset; } /* Find offset of function name in the provided ELF object. "binary_path" is * the path to the ELF binary represented by "elf", and only used for error * reporting matters. "name" matches symbol name or name@@LIB for library * functions. */ long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name) { int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; const char *at_symbol, *lib_ver; bool is_shared_lib; long ret = -ENOENT; size_t name_len; GElf_Ehdr ehdr; if (!gelf_getehdr(elf, &ehdr)) { pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); ret = -LIBBPF_ERRNO__FORMAT; goto out; } /* for shared lib case, we do not need to calculate relative offset */ is_shared_lib = ehdr.e_type == ET_DYN; /* Does name specify "@@LIB_VER" or "@LIB_VER" ? */ at_symbol = strchr(name, '@'); if (at_symbol) { name_len = at_symbol - name; /* skip second @ if it's @@LIB_VER case */ if (at_symbol[1] == '@') at_symbol++; lib_ver = at_symbol + 1; } else { name_len = strlen(name); lib_ver = NULL; } /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically * linked binary may not have SHT_DYMSYM, so absence of a section should not be * reported as a warning/error. */ for (i = 0; i < ARRAY_SIZE(sh_types); i++) { struct elf_sym_iter iter; struct elf_sym *sym; int last_bind = -1; int cur_bind; ret = elf_sym_iter_new(&iter, elf, binary_path, sh_types[i], STT_FUNC); if (ret == -ENOENT) continue; if (ret) goto out; while ((sym = elf_sym_iter_next(&iter))) { if (!symbol_match(&iter, sh_types[i], sym, name, name_len, lib_ver)) continue; cur_bind = GELF_ST_BIND(sym->sym.st_info); if (ret > 0) { /* handle multiple matches */ if (elf_sym_offset(sym) == ret) { /* same offset, no problem */ continue; } else if (last_bind != STB_WEAK && cur_bind != STB_WEAK) { /* Only accept one non-weak bind. */ pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n", sym->name, name, binary_path); ret = -LIBBPF_ERRNO__FORMAT; goto out; } else if (cur_bind == STB_WEAK) { /* already have a non-weak bind, and * this is a weak bind, so ignore. */ continue; } } ret = elf_sym_offset(sym); last_bind = cur_bind; } if (ret > 0) break; } if (ret > 0) { pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path, ret); } else { if (ret == 0) { pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path, is_shared_lib ? "should not be 0 in a shared library" : "try using shared library path instead"); ret = -ENOENT; } else { pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path); } } out: return ret; } /* Find offset of function name in ELF object specified by path. "name" matches * symbol name or name@@LIB for library functions. */ long elf_find_func_offset_from_file(const char *binary_path, const char *name) { struct elf_fd elf_fd; long ret = -ENOENT; ret = elf_open(binary_path, &elf_fd); if (ret) return ret; ret = elf_find_func_offset(elf_fd.elf, binary_path, name); elf_close(&elf_fd); return ret; } struct symbol { const char *name; int bind; int idx; }; static int symbol_cmp(const void *a, const void *b) { const struct symbol *sym_a = a; const struct symbol *sym_b = b; return strcmp(sym_a->name, sym_b->name); } /* * Return offsets in @poffsets for symbols specified in @syms array argument. * On success returns 0 and offsets are returned in allocated array with @cnt * size, that needs to be released by the caller. */ int elf_resolve_syms_offsets(const char *binary_path, int cnt, const char **syms, unsigned long **poffsets, int st_type) { int sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; int err = 0, i, cnt_done = 0; unsigned long *offsets; struct symbol *symbols; struct elf_fd elf_fd; err = elf_open(binary_path, &elf_fd); if (err) return err; offsets = calloc(cnt, sizeof(*offsets)); symbols = calloc(cnt, sizeof(*symbols)); if (!offsets || !symbols) { err = -ENOMEM; goto out; } for (i = 0; i < cnt; i++) { symbols[i].name = syms[i]; symbols[i].idx = i; } qsort(symbols, cnt, sizeof(*symbols), symbol_cmp); for (i = 0; i < ARRAY_SIZE(sh_types); i++) { struct elf_sym_iter iter; struct elf_sym *sym; err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], st_type); if (err == -ENOENT) continue; if (err) goto out; while ((sym = elf_sym_iter_next(&iter))) { unsigned long sym_offset = elf_sym_offset(sym); int bind = GELF_ST_BIND(sym->sym.st_info); struct symbol *found, tmp = { .name = sym->name, }; unsigned long *offset; found = bsearch(&tmp, symbols, cnt, sizeof(*symbols), symbol_cmp); if (!found) continue; offset = &offsets[found->idx]; if (*offset > 0) { /* same offset, no problem */ if (*offset == sym_offset) continue; /* handle multiple matches */ if (found->bind != STB_WEAK && bind != STB_WEAK) { /* Only accept one non-weak bind. */ pr_warn("elf: ambiguous match found '%s@%lu' in '%s' previous offset %lu\n", sym->name, sym_offset, binary_path, *offset); err = -ESRCH; goto out; } else if (bind == STB_WEAK) { /* already have a non-weak bind, and * this is a weak bind, so ignore. */ continue; } } else { cnt_done++; } *offset = sym_offset; found->bind = bind; } } if (cnt != cnt_done) { err = -ENOENT; goto out; } *poffsets = offsets; out: free(symbols); if (err) free(offsets); elf_close(&elf_fd); return err; } /* * Return offsets in @poffsets for symbols specified by @pattern argument. * On success returns 0 and offsets are returned in allocated @poffsets * array with the @pctn size, that needs to be released by the caller. */ int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern, unsigned long **poffsets, size_t *pcnt) { int sh_types[2] = { SHT_SYMTAB, SHT_DYNSYM }; unsigned long *offsets = NULL; size_t cap = 0, cnt = 0; struct elf_fd elf_fd; int err = 0, i; err = elf_open(binary_path, &elf_fd); if (err) return err; for (i = 0; i < ARRAY_SIZE(sh_types); i++) { struct elf_sym_iter iter; struct elf_sym *sym; err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC); if (err == -ENOENT) continue; if (err) goto out; while ((sym = elf_sym_iter_next(&iter))) { if (!glob_match(sym->name, pattern)) continue; err = libbpf_ensure_mem((void **) &offsets, &cap, sizeof(*offsets), cnt + 1); if (err) goto out; offsets[cnt++] = elf_sym_offset(sym); } /* If we found anything in the first symbol section, * do not search others to avoid duplicates. */ if (cnt) break; } if (cnt) { *poffsets = offsets; *pcnt = cnt; } else { err = -ENOENT; } out: if (err) free(offsets); elf_close(&elf_fd); return err; } xdp-tools-1.5.4/lib/libbpf/src/Makefile0000644000175100001660000001237014706536574017274 0ustar runnerdocker# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) ifeq ($(V),1) Q = msg = else Q = @ msg = @printf ' %-8s %s%s\n' "$(1)" "$(2)" "$(if $(3), $(3))"; endif LIBBPF_MAJOR_VERSION := 1 LIBBPF_MINOR_VERSION := 5 LIBBPF_PATCH_VERSION := 0 LIBBPF_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).$(LIBBPF_PATCH_VERSION) LIBBPF_MAJMIN_VERSION := $(LIBBPF_MAJOR_VERSION).$(LIBBPF_MINOR_VERSION).0 LIBBPF_MAP_VERSION := $(shell grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | sort -rV | head -n1 | cut -d'_' -f2) ifneq ($(LIBBPF_MAJMIN_VERSION), $(LIBBPF_MAP_VERSION)) $(error Libbpf release ($(LIBBPF_VERSION)) and map ($(LIBBPF_MAP_VERSION)) versions are out of sync!) endif define allow-override $(if $(or $(findstring environment,$(origin $(1))),\ $(findstring command line,$(origin $(1)))),,\ $(eval $(1) = $(2))) endef $(call allow-override,CC,$(CROSS_COMPILE)cc) $(call allow-override,LD,$(CROSS_COMPILE)ld) TOPDIR = .. INCLUDES := -I. -I$(TOPDIR)/include -I$(TOPDIR)/include/uapi ALL_CFLAGS := $(INCLUDES) SHARED_CFLAGS += -fPIC -fvisibility=hidden -DSHARED CFLAGS ?= -g -O2 -Werror -Wall -std=gnu89 ALL_CFLAGS += $(CFLAGS) \ -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 \ -Wno-unknown-warning-option -Wno-format-overflow \ $(EXTRA_CFLAGS) ALL_LDFLAGS += $(LDFLAGS) $(EXTRA_LDFLAGS) ifdef NO_PKG_CONFIG ALL_LDFLAGS += -lelf -lz else PKG_CONFIG ?= pkg-config ALL_CFLAGS += $(shell $(PKG_CONFIG) --cflags libelf zlib) ALL_LDFLAGS += $(shell $(PKG_CONFIG) --libs libelf zlib) endif OBJDIR ?= . SHARED_OBJDIR := $(OBJDIR)/sharedobjs STATIC_OBJDIR := $(OBJDIR)/staticobjs OBJS := bpf.o btf.o libbpf.o libbpf_errno.o netlink.o \ nlattr.o str_error.o libbpf_probes.o bpf_prog_linfo.o \ btf_dump.o hashmap.o ringbuf.o strset.o linker.o gen_loader.o \ relo_core.o usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o SHARED_OBJS := $(addprefix $(SHARED_OBJDIR)/,$(OBJS)) STATIC_OBJS := $(addprefix $(STATIC_OBJDIR)/,$(OBJS)) STATIC_LIBS := $(OBJDIR)/libbpf.a ifndef BUILD_STATIC_ONLY SHARED_LIBS := $(OBJDIR)/libbpf.so \ $(OBJDIR)/libbpf.so.$(LIBBPF_MAJOR_VERSION) \ $(OBJDIR)/libbpf.so.$(LIBBPF_VERSION) VERSION_SCRIPT := libbpf.map endif HEADERS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h \ bpf_helpers.h bpf_helper_defs.h bpf_tracing.h \ bpf_endian.h bpf_core_read.h skel_internal.h libbpf_version.h \ usdt.bpf.h UAPI_HEADERS := $(addprefix $(TOPDIR)/include/uapi/linux/,\ bpf.h bpf_common.h btf.h) PC_FILE := $(OBJDIR)/libbpf.pc INSTALL = install DESTDIR ?= HOSTARCH = $(firstword $(subst -, ,$(shell $(CC) -dumpmachine))) ifeq ($(filter-out %64 %64be %64eb %64le %64el s390x, $(HOSTARCH)),) LIBSUBDIR := lib64 else LIBSUBDIR := lib endif # By default let the pc file itself use ${prefix} in includedir/libdir so that # the prefix can be overridden at runtime (eg: --define-prefix) ifndef LIBDIR LIBDIR_PC := $$\{prefix\}/$(LIBSUBDIR) else LIBDIR_PC := $(LIBDIR) endif PREFIX ?= /usr LIBDIR ?= $(PREFIX)/$(LIBSUBDIR) INCLUDEDIR ?= $(PREFIX)/include UAPIDIR ?= $(PREFIX)/include TAGS_PROG := $(if $(shell which etags 2>/dev/null),etags,ctags) all: $(STATIC_LIBS) $(SHARED_LIBS) $(PC_FILE) $(OBJDIR)/libbpf.a: $(STATIC_OBJS) $(call msg,AR,$@) $(Q)$(AR) rcs $@ $^ $(OBJDIR)/libbpf.so: $(OBJDIR)/libbpf.so.$(LIBBPF_MAJOR_VERSION) $(Q)ln -sf $(^F) $@ $(OBJDIR)/libbpf.so.$(LIBBPF_MAJOR_VERSION): $(OBJDIR)/libbpf.so.$(LIBBPF_VERSION) $(Q)ln -sf $(^F) $@ $(OBJDIR)/libbpf.so.$(LIBBPF_VERSION): $(SHARED_OBJS) $(call msg,CC,$@) $(Q)$(CC) -shared -Wl,--version-script=$(VERSION_SCRIPT) \ -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \ $^ $(ALL_LDFLAGS) -o $@ $(OBJDIR)/libbpf.pc: force | $(OBJDIR) $(Q)sed -e "s|@PREFIX@|$(PREFIX)|" \ -e "s|@LIBDIR@|$(LIBDIR_PC)|" \ -e "s|@VERSION@|$(LIBBPF_VERSION)|" \ < libbpf.pc.template > $@ $(OBJDIR) $(STATIC_OBJDIR) $(SHARED_OBJDIR): $(call msg,MKDIR,$@) $(Q)mkdir -p $@ $(STATIC_OBJDIR)/%.o: %.c | $(STATIC_OBJDIR) $(call msg,CC,$@) $(Q)$(CC) $(ALL_CFLAGS) $(CPPFLAGS) -c $< -o $@ $(SHARED_OBJDIR)/%.o: %.c | $(SHARED_OBJDIR) $(call msg,CC,$@) $(Q)$(CC) $(ALL_CFLAGS) $(SHARED_CFLAGS) $(CPPFLAGS) -c $< -o $@ define do_install $(call msg,INSTALL,$1) $(Q)if [ ! -d '$(DESTDIR)$2' ]; then \ $(INSTALL) -d -m 755 '$(DESTDIR)$2'; \ fi; $(Q)$(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR)$2' endef # Preserve symlinks at installation. define do_s_install $(call msg,INSTALL,$1) $(Q)if [ ! -d '$(DESTDIR)$2' ]; then \ $(INSTALL) -d -m 755 '$(DESTDIR)$2'; \ fi; $(Q)cp -fR $1 '$(DESTDIR)$2' endef install: all install_headers install_pkgconfig $(call do_s_install,$(STATIC_LIBS) $(SHARED_LIBS),$(LIBDIR)) install_headers: $(call do_install,$(HEADERS),$(INCLUDEDIR)/bpf,644) # UAPI headers can be installed by a different package so they're not installed # in by install rule. install_uapi_headers: $(call do_install,$(UAPI_HEADERS),$(UAPIDIR)/linux,644) install_pkgconfig: $(PC_FILE) $(call do_install,$(PC_FILE),$(LIBDIR)/pkgconfig,644) clean: $(call msg,CLEAN) $(Q)rm -rf *.o *.a *.so *.so.* *.pc $(SHARED_OBJDIR) $(STATIC_OBJDIR) .PHONY: cscope tags force cscope: $(call msg,CSCOPE) $(Q)ls *.c *.h > cscope.files $(Q)cscope -b -q -f cscope.out tags: $(call msg,CTAGS) $(Q)rm -f TAGS tags $(Q)ls *.c *.h | xargs $(TAGS_PROG) -a force: xdp-tools-1.5.4/lib/libbpf/src/btf_dump.c0000644000175100001660000021210014706536574017571 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * BTF-to-C type converter. * * Copyright (c) 2019 Facebook */ #include #include #include #include #include #include #include #include #include #include #include #include "btf.h" #include "hashmap.h" #include "libbpf.h" #include "libbpf_internal.h" static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t"; static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1; static const char *pfx(int lvl) { return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl]; } enum btf_dump_type_order_state { NOT_ORDERED, ORDERING, ORDERED, }; enum btf_dump_type_emit_state { NOT_EMITTED, EMITTING, EMITTED, }; /* per-type auxiliary state */ struct btf_dump_type_aux_state { /* topological sorting state */ enum btf_dump_type_order_state order_state: 2; /* emitting state used to determine the need for forward declaration */ enum btf_dump_type_emit_state emit_state: 2; /* whether forward declaration was already emitted */ __u8 fwd_emitted: 1; /* whether unique non-duplicate name was already assigned */ __u8 name_resolved: 1; /* whether type is referenced from any other type */ __u8 referenced: 1; }; /* indent string length; one indent string is added for each indent level */ #define BTF_DATA_INDENT_STR_LEN 32 /* * Common internal data for BTF type data dump operations. */ struct btf_dump_data { const void *data_end; /* end of valid data to show */ bool compact; bool skip_names; bool emit_zeroes; __u8 indent_lvl; /* base indent level */ char indent_str[BTF_DATA_INDENT_STR_LEN]; /* below are used during iteration */ int depth; bool is_array_member; bool is_array_terminated; bool is_array_char; }; struct btf_dump { const struct btf *btf; btf_dump_printf_fn_t printf_fn; void *cb_ctx; int ptr_sz; bool strip_mods; bool skip_anon_defs; int last_id; /* per-type auxiliary state */ struct btf_dump_type_aux_state *type_states; size_t type_states_cap; /* per-type optional cached unique name, must be freed, if present */ const char **cached_names; size_t cached_names_cap; /* topo-sorted list of dependent type definitions */ __u32 *emit_queue; int emit_queue_cap; int emit_queue_cnt; /* * stack of type declarations (e.g., chain of modifiers, arrays, * funcs, etc) */ __u32 *decl_stack; int decl_stack_cap; int decl_stack_cnt; /* maps struct/union/enum name to a number of name occurrences */ struct hashmap *type_names; /* * maps typedef identifiers and enum value names to a number of such * name occurrences */ struct hashmap *ident_names; /* * data for typed display; allocated if needed. */ struct btf_dump_data *typed_dump; }; static size_t str_hash_fn(long key, void *ctx) { return str_hash((void *)key); } static bool str_equal_fn(long a, long b, void *ctx) { return strcmp((void *)a, (void *)b) == 0; } static const char *btf_name_of(const struct btf_dump *d, __u32 name_off) { return btf__name_by_offset(d->btf, name_off); } static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...) { va_list args; va_start(args, fmt); d->printf_fn(d->cb_ctx, fmt, args); va_end(args); } static int btf_dump_mark_referenced(struct btf_dump *d); static int btf_dump_resize(struct btf_dump *d); struct btf_dump *btf_dump__new(const struct btf *btf, btf_dump_printf_fn_t printf_fn, void *ctx, const struct btf_dump_opts *opts) { struct btf_dump *d; int err; if (!OPTS_VALID(opts, btf_dump_opts)) return libbpf_err_ptr(-EINVAL); if (!printf_fn) return libbpf_err_ptr(-EINVAL); d = calloc(1, sizeof(struct btf_dump)); if (!d) return libbpf_err_ptr(-ENOMEM); d->btf = btf; d->printf_fn = printf_fn; d->cb_ctx = ctx; d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *); d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); if (IS_ERR(d->type_names)) { err = PTR_ERR(d->type_names); d->type_names = NULL; goto err; } d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); if (IS_ERR(d->ident_names)) { err = PTR_ERR(d->ident_names); d->ident_names = NULL; goto err; } err = btf_dump_resize(d); if (err) goto err; return d; err: btf_dump__free(d); return libbpf_err_ptr(err); } static int btf_dump_resize(struct btf_dump *d) { int err, last_id = btf__type_cnt(d->btf) - 1; if (last_id <= d->last_id) return 0; if (libbpf_ensure_mem((void **)&d->type_states, &d->type_states_cap, sizeof(*d->type_states), last_id + 1)) return -ENOMEM; if (libbpf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap, sizeof(*d->cached_names), last_id + 1)) return -ENOMEM; if (d->last_id == 0) { /* VOID is special */ d->type_states[0].order_state = ORDERED; d->type_states[0].emit_state = EMITTED; } /* eagerly determine referenced types for anon enums */ err = btf_dump_mark_referenced(d); if (err) return err; d->last_id = last_id; return 0; } static void btf_dump_free_names(struct hashmap *map) { size_t bkt; struct hashmap_entry *cur; hashmap__for_each_entry(map, cur, bkt) free((void *)cur->pkey); hashmap__free(map); } void btf_dump__free(struct btf_dump *d) { int i; if (IS_ERR_OR_NULL(d)) return; free(d->type_states); if (d->cached_names) { /* any set cached name is owned by us and should be freed */ for (i = 0; i <= d->last_id; i++) { if (d->cached_names[i]) free((void *)d->cached_names[i]); } } free(d->cached_names); free(d->emit_queue); free(d->decl_stack); btf_dump_free_names(d->type_names); btf_dump_free_names(d->ident_names); free(d); } static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr); static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id); /* * Dump BTF type in a compilable C syntax, including all the necessary * dependent types, necessary for compilation. If some of the dependent types * were already emitted as part of previous btf_dump__dump_type() invocation * for another type, they won't be emitted again. This API allows callers to * filter out BTF types according to user-defined criterias and emitted only * minimal subset of types, necessary to compile everything. Full struct/union * definitions will still be emitted, even if the only usage is through * pointer and could be satisfied with just a forward declaration. * * Dumping is done in two high-level passes: * 1. Topologically sort type definitions to satisfy C rules of compilation. * 2. Emit type definitions in C syntax. * * Returns 0 on success; <0, otherwise. */ int btf_dump__dump_type(struct btf_dump *d, __u32 id) { int err, i; if (id >= btf__type_cnt(d->btf)) return libbpf_err(-EINVAL); err = btf_dump_resize(d); if (err) return libbpf_err(err); d->emit_queue_cnt = 0; err = btf_dump_order_type(d, id, false); if (err < 0) return libbpf_err(err); for (i = 0; i < d->emit_queue_cnt; i++) btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/); return 0; } /* * Mark all types that are referenced from any other type. This is used to * determine top-level anonymous enums that need to be emitted as an * independent type declarations. * Anonymous enums come in two flavors: either embedded in a struct's field * definition, in which case they have to be declared inline as part of field * type declaration; or as a top-level anonymous enum, typically used for * declaring global constants. It's impossible to distinguish between two * without knowing whether given enum type was referenced from other type: * top-level anonymous enum won't be referenced by anything, while embedded * one will. */ static int btf_dump_mark_referenced(struct btf_dump *d) { int i, j, n = btf__type_cnt(d->btf); const struct btf_type *t; __u16 vlen; for (i = d->last_id + 1; i < n; i++) { t = btf__type_by_id(d->btf, i); vlen = btf_vlen(t); switch (btf_kind(t)) { case BTF_KIND_INT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_FWD: case BTF_KIND_FLOAT: break; case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: case BTF_KIND_TYPE_TAG: d->type_states[t->type].referenced = 1; break; case BTF_KIND_ARRAY: { const struct btf_array *a = btf_array(t); d->type_states[a->index_type].referenced = 1; d->type_states[a->type].referenced = 1; break; } case BTF_KIND_STRUCT: case BTF_KIND_UNION: { const struct btf_member *m = btf_members(t); for (j = 0; j < vlen; j++, m++) d->type_states[m->type].referenced = 1; break; } case BTF_KIND_FUNC_PROTO: { const struct btf_param *p = btf_params(t); for (j = 0; j < vlen; j++, p++) d->type_states[p->type].referenced = 1; break; } case BTF_KIND_DATASEC: { const struct btf_var_secinfo *v = btf_var_secinfos(t); for (j = 0; j < vlen; j++, v++) d->type_states[v->type].referenced = 1; break; } default: return -EINVAL; } } return 0; } static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id) { __u32 *new_queue; size_t new_cap; if (d->emit_queue_cnt >= d->emit_queue_cap) { new_cap = max(16, d->emit_queue_cap * 3 / 2); new_queue = libbpf_reallocarray(d->emit_queue, new_cap, sizeof(new_queue[0])); if (!new_queue) return -ENOMEM; d->emit_queue = new_queue; d->emit_queue_cap = new_cap; } d->emit_queue[d->emit_queue_cnt++] = id; return 0; } /* * Determine order of emitting dependent types and specified type to satisfy * C compilation rules. This is done through topological sorting with an * additional complication which comes from C rules. The main idea for C is * that if some type is "embedded" into a struct/union, it's size needs to be * known at the time of definition of containing type. E.g., for: * * struct A {}; * struct B { struct A x; } * * struct A *HAS* to be defined before struct B, because it's "embedded", * i.e., it is part of struct B layout. But in the following case: * * struct A; * struct B { struct A *x; } * struct A {}; * * it's enough to just have a forward declaration of struct A at the time of * struct B definition, as struct B has a pointer to struct A, so the size of * field x is known without knowing struct A size: it's sizeof(void *). * * Unfortunately, there are some trickier cases we need to handle, e.g.: * * struct A {}; // if this was forward-declaration: compilation error * struct B { * struct { // anonymous struct * struct A y; * } *x; * }; * * In this case, struct B's field x is a pointer, so it's size is known * regardless of the size of (anonymous) struct it points to. But because this * struct is anonymous and thus defined inline inside struct B, *and* it * embeds struct A, compiler requires full definition of struct A to be known * before struct B can be defined. This creates a transitive dependency * between struct A and struct B. If struct A was forward-declared before * struct B definition and fully defined after struct B definition, that would * trigger compilation error. * * All this means that while we are doing topological sorting on BTF type * graph, we need to determine relationships between different types (graph * nodes): * - weak link (relationship) between X and Y, if Y *CAN* be * forward-declared at the point of X definition; * - strong link, if Y *HAS* to be fully-defined before X can be defined. * * The rule is as follows. Given a chain of BTF types from X to Y, if there is * BTF_KIND_PTR type in the chain and at least one non-anonymous type * Z (excluding X, including Y), then link is weak. Otherwise, it's strong. * Weak/strong relationship is determined recursively during DFS traversal and * is returned as a result from btf_dump_order_type(). * * btf_dump_order_type() is trying to avoid unnecessary forward declarations, * but it is not guaranteeing that no extraneous forward declarations will be * emitted. * * To avoid extra work, algorithm marks some of BTF types as ORDERED, when * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT, * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the * entire graph path, so depending where from one came to that BTF type, it * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM, * once they are processed, there is no need to do it again, so they are * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But * in any case, once those are processed, no need to do it again, as the * result won't change. * * Returns: * - 1, if type is part of strong link (so there is strong topological * ordering requirements); * - 0, if type is part of weak link (so can be satisfied through forward * declaration); * - <0, on error (e.g., unsatisfiable type loop detected). */ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) { /* * Order state is used to detect strong link cycles, but only for BTF * kinds that are or could be an independent definition (i.e., * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays, * func_protos, modifiers are just means to get to these definitions. * Int/void don't need definitions, they are assumed to be always * properly defined. We also ignore datasec, var, and funcs for now. * So for all non-defining kinds, we never even set ordering state, * for defining kinds we set ORDERING and subsequently ORDERED if it * forms a strong link. */ struct btf_dump_type_aux_state *tstate = &d->type_states[id]; const struct btf_type *t; __u16 vlen; int err, i; /* return true, letting typedefs know that it's ok to be emitted */ if (tstate->order_state == ORDERED) return 1; t = btf__type_by_id(d->btf, id); if (tstate->order_state == ORDERING) { /* type loop, but resolvable through fwd declaration */ if (btf_is_composite(t) && through_ptr && t->name_off != 0) return 0; pr_warn("unsatisfiable type cycle, id:[%u]\n", id); return -ELOOP; } switch (btf_kind(t)) { case BTF_KIND_INT: case BTF_KIND_FLOAT: tstate->order_state = ORDERED; return 0; case BTF_KIND_PTR: err = btf_dump_order_type(d, t->type, true); tstate->order_state = ORDERED; return err; case BTF_KIND_ARRAY: return btf_dump_order_type(d, btf_array(t)->type, false); case BTF_KIND_STRUCT: case BTF_KIND_UNION: { const struct btf_member *m = btf_members(t); /* * struct/union is part of strong link, only if it's embedded * (so no ptr in a path) or it's anonymous (so has to be * defined inline, even if declared through ptr) */ if (through_ptr && t->name_off != 0) return 0; tstate->order_state = ORDERING; vlen = btf_vlen(t); for (i = 0; i < vlen; i++, m++) { err = btf_dump_order_type(d, m->type, false); if (err < 0) return err; } if (t->name_off != 0) { err = btf_dump_add_emit_queue_id(d, id); if (err < 0) return err; } tstate->order_state = ORDERED; return 1; } case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_FWD: /* * non-anonymous or non-referenced enums are top-level * declarations and should be emitted. Same logic can be * applied to FWDs, it won't hurt anyways. */ if (t->name_off != 0 || !tstate->referenced) { err = btf_dump_add_emit_queue_id(d, id); if (err) return err; } tstate->order_state = ORDERED; return 1; case BTF_KIND_TYPEDEF: { int is_strong; is_strong = btf_dump_order_type(d, t->type, through_ptr); if (is_strong < 0) return is_strong; /* typedef is similar to struct/union w.r.t. fwd-decls */ if (through_ptr && !is_strong) return 0; /* typedef is always a named definition */ err = btf_dump_add_emit_queue_id(d, id); if (err) return err; d->type_states[id].order_state = ORDERED; return 1; } case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_TYPE_TAG: return btf_dump_order_type(d, t->type, through_ptr); case BTF_KIND_FUNC_PROTO: { const struct btf_param *p = btf_params(t); bool is_strong; err = btf_dump_order_type(d, t->type, through_ptr); if (err < 0) return err; is_strong = err > 0; vlen = btf_vlen(t); for (i = 0; i < vlen; i++, p++) { err = btf_dump_order_type(d, p->type, through_ptr); if (err < 0) return err; if (err > 0) is_strong = true; } return is_strong; } case BTF_KIND_FUNC: case BTF_KIND_VAR: case BTF_KIND_DATASEC: case BTF_KIND_DECL_TAG: d->type_states[id].order_state = ORDERED; return 0; default: return -EINVAL; } } static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, const struct btf_type *t); static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t); static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id, const struct btf_type *t, int lvl); static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t); static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id, const struct btf_type *t, int lvl); static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id, const struct btf_type *t); static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id, const struct btf_type *t, int lvl); /* a local view into a shared stack */ struct id_stack { const __u32 *ids; int cnt; }; static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, const char *fname, int lvl); static void btf_dump_emit_type_chain(struct btf_dump *d, struct id_stack *decl_stack, const char *fname, int lvl); static const char *btf_dump_type_name(struct btf_dump *d, __u32 id); static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id); static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, const char *orig_name); static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id) { const struct btf_type *t = btf__type_by_id(d->btf, id); /* __builtin_va_list is a compiler built-in, which causes compilation * errors, when compiling w/ different compiler, then used to compile * original code (e.g., GCC to compile kernel, Clang to use generated * C header from BTF). As it is built-in, it should be already defined * properly internally in compiler. */ if (t->name_off == 0) return false; return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0; } /* * Emit C-syntax definitions of types from chains of BTF types. * * High-level handling of determining necessary forward declarations are handled * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type * declarations/definitions in C syntax are handled by a combo of * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to * corresponding btf_dump_emit_*_{def,fwd}() functions. * * We also keep track of "containing struct/union type ID" to determine when * we reference it from inside and thus can avoid emitting unnecessary forward * declaration. * * This algorithm is designed in such a way, that even if some error occurs * (either technical, e.g., out of memory, or logical, i.e., malformed BTF * that doesn't comply to C rules completely), algorithm will try to proceed * and produce as much meaningful output as possible. */ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) { struct btf_dump_type_aux_state *tstate = &d->type_states[id]; bool top_level_def = cont_id == 0; const struct btf_type *t; __u16 kind; if (tstate->emit_state == EMITTED) return; t = btf__type_by_id(d->btf, id); kind = btf_kind(t); if (tstate->emit_state == EMITTING) { if (tstate->fwd_emitted) return; switch (kind) { case BTF_KIND_STRUCT: case BTF_KIND_UNION: /* * if we are referencing a struct/union that we are * part of - then no need for fwd declaration */ if (id == cont_id) return; if (t->name_off == 0) { pr_warn("anonymous struct/union loop, id:[%u]\n", id); return; } btf_dump_emit_struct_fwd(d, id, t); btf_dump_printf(d, ";\n\n"); tstate->fwd_emitted = 1; break; case BTF_KIND_TYPEDEF: /* * for typedef fwd_emitted means typedef definition * was emitted, but it can be used only for "weak" * references through pointer only, not for embedding */ if (!btf_dump_is_blacklisted(d, id)) { btf_dump_emit_typedef_def(d, id, t, 0); btf_dump_printf(d, ";\n\n"); } tstate->fwd_emitted = 1; break; default: break; } return; } switch (kind) { case BTF_KIND_INT: /* Emit type alias definitions if necessary */ btf_dump_emit_missing_aliases(d, id, t); tstate->emit_state = EMITTED; break; case BTF_KIND_ENUM: case BTF_KIND_ENUM64: if (top_level_def) { btf_dump_emit_enum_def(d, id, t, 0); btf_dump_printf(d, ";\n\n"); } tstate->emit_state = EMITTED; break; case BTF_KIND_PTR: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_TYPE_TAG: btf_dump_emit_type(d, t->type, cont_id); break; case BTF_KIND_ARRAY: btf_dump_emit_type(d, btf_array(t)->type, cont_id); break; case BTF_KIND_FWD: btf_dump_emit_fwd_def(d, id, t); btf_dump_printf(d, ";\n\n"); tstate->emit_state = EMITTED; break; case BTF_KIND_TYPEDEF: tstate->emit_state = EMITTING; btf_dump_emit_type(d, t->type, id); /* * typedef can server as both definition and forward * declaration; at this stage someone depends on * typedef as a forward declaration (refers to it * through pointer), so unless we already did it, * emit typedef as a forward declaration */ if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) { btf_dump_emit_typedef_def(d, id, t, 0); btf_dump_printf(d, ";\n\n"); } tstate->emit_state = EMITTED; break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: tstate->emit_state = EMITTING; /* if it's a top-level struct/union definition or struct/union * is anonymous, then in C we'll be emitting all fields and * their types (as opposed to just `struct X`), so we need to * make sure that all types, referenced from struct/union * members have necessary forward-declarations, where * applicable */ if (top_level_def || t->name_off == 0) { const struct btf_member *m = btf_members(t); __u16 vlen = btf_vlen(t); int i, new_cont_id; new_cont_id = t->name_off == 0 ? cont_id : id; for (i = 0; i < vlen; i++, m++) btf_dump_emit_type(d, m->type, new_cont_id); } else if (!tstate->fwd_emitted && id != cont_id) { btf_dump_emit_struct_fwd(d, id, t); btf_dump_printf(d, ";\n\n"); tstate->fwd_emitted = 1; } if (top_level_def) { btf_dump_emit_struct_def(d, id, t, 0); btf_dump_printf(d, ";\n\n"); tstate->emit_state = EMITTED; } else { tstate->emit_state = NOT_EMITTED; } break; case BTF_KIND_FUNC_PROTO: { const struct btf_param *p = btf_params(t); __u16 n = btf_vlen(t); int i; btf_dump_emit_type(d, t->type, cont_id); for (i = 0; i < n; i++, p++) btf_dump_emit_type(d, p->type, cont_id); break; } default: break; } } static bool btf_is_struct_packed(const struct btf *btf, __u32 id, const struct btf_type *t) { const struct btf_member *m; int max_align = 1, align, i, bit_sz; __u16 vlen; m = btf_members(t); vlen = btf_vlen(t); /* all non-bitfield fields have to be naturally aligned */ for (i = 0; i < vlen; i++, m++) { align = btf__align_of(btf, m->type); bit_sz = btf_member_bitfield_size(t, i); if (align && bit_sz == 0 && m->offset % (8 * align) != 0) return true; max_align = max(align, max_align); } /* size of a non-packed struct has to be a multiple of its alignment */ if (t->size % max_align != 0) return true; /* * if original struct was marked as packed, but its layout is * naturally aligned, we'll detect that it's not packed */ return false; } static void btf_dump_emit_bit_padding(const struct btf_dump *d, int cur_off, int next_off, int next_align, bool in_bitfield, int lvl) { const struct { const char *name; int bits; } pads[] = { {"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8} }; int new_off = 0, pad_bits = 0, bits, i; const char *pad_type = NULL; if (cur_off >= next_off) return; /* no gap */ /* For filling out padding we want to take advantage of * natural alignment rules to minimize unnecessary explicit * padding. First, we find the largest type (among long, int, * short, or char) that can be used to force naturally aligned * boundary. Once determined, we'll use such type to fill in * the remaining padding gap. In some cases we can rely on * compiler filling some gaps, but sometimes we need to force * alignment to close natural alignment with markers like * `long: 0` (this is always the case for bitfields). Note * that even if struct itself has, let's say 4-byte alignment * (i.e., it only uses up to int-aligned types), using `long: * X;` explicit padding doesn't actually change struct's * overall alignment requirements, but compiler does take into * account that type's (long, in this example) natural * alignment requirements when adding implicit padding. We use * this fact heavily and don't worry about ruining correct * struct alignment requirement. */ for (i = 0; i < ARRAY_SIZE(pads); i++) { pad_bits = pads[i].bits; pad_type = pads[i].name; new_off = roundup(cur_off, pad_bits); if (new_off <= next_off) break; } if (new_off > cur_off && new_off <= next_off) { /* We need explicit `: 0` aligning mark if next * field is right on alignment offset and its * alignment requirement is less strict than 's * alignment (so compiler won't naturally align to the * offset we expect), or if subsequent `: X`, * will actually completely fit in the remaining hole, * making compiler basically ignore `: X` * completely. */ if (in_bitfield || (new_off == next_off && roundup(cur_off, next_align * 8) != new_off) || (new_off != next_off && next_off - new_off <= new_off - cur_off)) /* but for bitfields we'll emit explicit bit count */ btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, in_bitfield ? new_off - cur_off : 0); cur_off = new_off; } /* Now we know we start at naturally aligned offset for a chosen * padding type (long, int, short, or char), and so the rest is just * a straightforward filling of remaining padding gap with full * `: sizeof();` markers, except for the last one, which * might need smaller than sizeof() padding. */ while (cur_off != next_off) { bits = min(next_off - cur_off, pad_bits); if (bits == pad_bits) { btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits); cur_off += bits; continue; } /* For the remainder padding that doesn't cover entire * pad_type bit length, we pick the smallest necessary type. * This is pure aesthetics, we could have just used `long`, * but having smallest necessary one communicates better the * scale of the padding gap. */ for (i = ARRAY_SIZE(pads) - 1; i >= 0; i--) { pad_type = pads[i].name; pad_bits = pads[i].bits; if (pad_bits < bits) continue; btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, bits); cur_off += bits; break; } } } static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t) { btf_dump_printf(d, "%s%s%s", btf_is_struct(t) ? "struct" : "union", t->name_off ? " " : "", btf_dump_type_name(d, id)); } static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id, const struct btf_type *t, int lvl) { const struct btf_member *m = btf_members(t); bool is_struct = btf_is_struct(t); bool packed, prev_bitfield = false; int align, i, off = 0; __u16 vlen = btf_vlen(t); align = btf__align_of(d->btf, id); packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0; btf_dump_printf(d, "%s%s%s {", is_struct ? "struct" : "union", t->name_off ? " " : "", btf_dump_type_name(d, id)); for (i = 0; i < vlen; i++, m++) { const char *fname; int m_off, m_sz, m_align; bool in_bitfield; fname = btf_name_of(d, m->name_off); m_sz = btf_member_bitfield_size(t, i); m_off = btf_member_bit_offset(t, i); m_align = packed ? 1 : btf__align_of(d->btf, m->type); in_bitfield = prev_bitfield && m_sz != 0; btf_dump_emit_bit_padding(d, off, m_off, m_align, in_bitfield, lvl + 1); btf_dump_printf(d, "\n%s", pfx(lvl + 1)); btf_dump_emit_type_decl(d, m->type, fname, lvl + 1); if (m_sz) { btf_dump_printf(d, ": %d", m_sz); off = m_off + m_sz; prev_bitfield = true; } else { m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type)); off = m_off + m_sz * 8; prev_bitfield = false; } btf_dump_printf(d, ";"); } /* pad at the end, if necessary */ if (is_struct) btf_dump_emit_bit_padding(d, off, t->size * 8, align, false, lvl + 1); /* * Keep `struct empty {}` on a single line, * only print newline when there are regular or padding fields. */ if (vlen || t->size) { btf_dump_printf(d, "\n"); btf_dump_printf(d, "%s}", pfx(lvl)); } else { btf_dump_printf(d, "}"); } if (packed) btf_dump_printf(d, " __attribute__((packed))"); } static const char *missing_base_types[][2] = { /* * GCC emits typedefs to its internal __PolyX_t types when compiling Arm * SIMD intrinsics. Alias them to standard base types. */ { "__Poly8_t", "unsigned char" }, { "__Poly16_t", "unsigned short" }, { "__Poly64_t", "unsigned long long" }, { "__Poly128_t", "unsigned __int128" }, }; static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, const struct btf_type *t) { const char *name = btf_dump_type_name(d, id); int i; for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) { if (strcmp(name, missing_base_types[i][0]) == 0) { btf_dump_printf(d, "typedef %s %s;\n\n", missing_base_types[i][1], name); break; } } } static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t) { btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id)); } static void btf_dump_emit_enum32_val(struct btf_dump *d, const struct btf_type *t, int lvl, __u16 vlen) { const struct btf_enum *v = btf_enum(t); bool is_signed = btf_kflag(t); const char *fmt_str; const char *name; size_t dup_cnt; int i; for (i = 0; i < vlen; i++, v++) { name = btf_name_of(d, v->name_off); /* enumerators share namespace with typedef idents */ dup_cnt = btf_dump_name_dups(d, d->ident_names, name); if (dup_cnt > 1) { fmt_str = is_signed ? "\n%s%s___%zd = %d," : "\n%s%s___%zd = %u,"; btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, dup_cnt, v->val); } else { fmt_str = is_signed ? "\n%s%s = %d," : "\n%s%s = %u,"; btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, v->val); } } } static void btf_dump_emit_enum64_val(struct btf_dump *d, const struct btf_type *t, int lvl, __u16 vlen) { const struct btf_enum64 *v = btf_enum64(t); bool is_signed = btf_kflag(t); const char *fmt_str; const char *name; size_t dup_cnt; __u64 val; int i; for (i = 0; i < vlen; i++, v++) { name = btf_name_of(d, v->name_off); dup_cnt = btf_dump_name_dups(d, d->ident_names, name); val = btf_enum64_value(v); if (dup_cnt > 1) { fmt_str = is_signed ? "\n%s%s___%zd = %lldLL," : "\n%s%s___%zd = %lluULL,"; btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, dup_cnt, (unsigned long long)val); } else { fmt_str = is_signed ? "\n%s%s = %lldLL," : "\n%s%s = %lluULL,"; btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, (unsigned long long)val); } } } static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id, const struct btf_type *t, int lvl) { __u16 vlen = btf_vlen(t); btf_dump_printf(d, "enum%s%s", t->name_off ? " " : "", btf_dump_type_name(d, id)); if (!vlen) return; btf_dump_printf(d, " {"); if (btf_is_enum(t)) btf_dump_emit_enum32_val(d, t, lvl, vlen); else btf_dump_emit_enum64_val(d, t, lvl, vlen); btf_dump_printf(d, "\n%s}", pfx(lvl)); /* special case enums with special sizes */ if (t->size == 1) { /* one-byte enums can be forced with mode(byte) attribute */ btf_dump_printf(d, " __attribute__((mode(byte)))"); } else if (t->size == 8 && d->ptr_sz == 8) { /* enum can be 8-byte sized if one of the enumerator values * doesn't fit in 32-bit integer, or by adding mode(word) * attribute (but probably only on 64-bit architectures); do * our best here to try to satisfy the contract without adding * unnecessary attributes */ bool needs_word_mode; if (btf_is_enum(t)) { /* enum can't represent 64-bit values, so we need word mode */ needs_word_mode = true; } else { /* enum64 needs mode(word) if none of its values has * non-zero upper 32-bits (which means that all values * fit in 32-bit integers and won't cause compiler to * bump enum to be 64-bit naturally */ int i; needs_word_mode = true; for (i = 0; i < vlen; i++) { if (btf_enum64(t)[i].val_hi32 != 0) { needs_word_mode = false; break; } } } if (needs_word_mode) btf_dump_printf(d, " __attribute__((mode(word)))"); } } static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id, const struct btf_type *t) { const char *name = btf_dump_type_name(d, id); if (btf_kflag(t)) btf_dump_printf(d, "union %s", name); else btf_dump_printf(d, "struct %s", name); } static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id, const struct btf_type *t, int lvl) { const char *name = btf_dump_ident_name(d, id); /* * Old GCC versions are emitting invalid typedef for __gnuc_va_list * pointing to VOID. This generates warnings from btf_dump() and * results in uncompilable header file, so we are fixing it up here * with valid typedef into __builtin_va_list. */ if (t->type == 0 && strcmp(name, "__gnuc_va_list") == 0) { btf_dump_printf(d, "typedef __builtin_va_list __gnuc_va_list"); return; } btf_dump_printf(d, "typedef "); btf_dump_emit_type_decl(d, t->type, name, lvl); } static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id) { __u32 *new_stack; size_t new_cap; if (d->decl_stack_cnt >= d->decl_stack_cap) { new_cap = max(16, d->decl_stack_cap * 3 / 2); new_stack = libbpf_reallocarray(d->decl_stack, new_cap, sizeof(new_stack[0])); if (!new_stack) return -ENOMEM; d->decl_stack = new_stack; d->decl_stack_cap = new_cap; } d->decl_stack[d->decl_stack_cnt++] = id; return 0; } /* * Emit type declaration (e.g., field type declaration in a struct or argument * declaration in function prototype) in correct C syntax. * * For most types it's trivial, but there are few quirky type declaration * cases worth mentioning: * - function prototypes (especially nesting of function prototypes); * - arrays; * - const/volatile/restrict for pointers vs other types. * * For a good discussion of *PARSING* C syntax (as a human), see * Peter van der Linden's "Expert C Programming: Deep C Secrets", * Ch.3 "Unscrambling Declarations in C". * * It won't help with BTF to C conversion much, though, as it's an opposite * problem. So we came up with this algorithm in reverse to van der Linden's * parsing algorithm. It goes from structured BTF representation of type * declaration to a valid compilable C syntax. * * For instance, consider this C typedef: * typedef const int * const * arr[10] arr_t; * It will be represented in BTF with this chain of BTF types: * [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int] * * Notice how [const] modifier always goes before type it modifies in BTF type * graph, but in C syntax, const/volatile/restrict modifiers are written to * the right of pointers, but to the left of other types. There are also other * quirks, like function pointers, arrays of them, functions returning other * functions, etc. * * We handle that by pushing all the types to a stack, until we hit "terminal" * type (int/enum/struct/union/fwd). Then depending on the kind of a type on * top of a stack, modifiers are handled differently. Array/function pointers * have also wildly different syntax and how nesting of them are done. See * code for authoritative definition. * * To avoid allocating new stack for each independent chain of BTF types, we * share one bigger stack, with each chain working only on its own local view * of a stack frame. Some care is required to "pop" stack frames after * processing type declaration chain. */ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, const struct btf_dump_emit_type_decl_opts *opts) { const char *fname; int lvl, err; if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts)) return libbpf_err(-EINVAL); err = btf_dump_resize(d); if (err) return libbpf_err(err); fname = OPTS_GET(opts, field_name, ""); lvl = OPTS_GET(opts, indent_level, 0); d->strip_mods = OPTS_GET(opts, strip_mods, false); btf_dump_emit_type_decl(d, id, fname, lvl); d->strip_mods = false; return 0; } static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, const char *fname, int lvl) { struct id_stack decl_stack; const struct btf_type *t; int err, stack_start; stack_start = d->decl_stack_cnt; for (;;) { t = btf__type_by_id(d->btf, id); if (d->strip_mods && btf_is_mod(t)) goto skip_mod; err = btf_dump_push_decl_stack_id(d, id); if (err < 0) { /* * if we don't have enough memory for entire type decl * chain, restore stack, emit warning, and try to * proceed nevertheless */ pr_warn("not enough memory for decl stack: %d\n", err); d->decl_stack_cnt = stack_start; return; } skip_mod: /* VOID */ if (id == 0) break; switch (btf_kind(t)) { case BTF_KIND_PTR: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_FUNC_PROTO: case BTF_KIND_TYPE_TAG: id = t->type; break; case BTF_KIND_ARRAY: id = btf_array(t)->type; break; case BTF_KIND_INT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_FWD: case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_TYPEDEF: case BTF_KIND_FLOAT: goto done; default: pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n", btf_kind(t), id); goto done; } } done: /* * We might be inside a chain of declarations (e.g., array of function * pointers returning anonymous (so inlined) structs, having another * array field). Each of those needs its own "stack frame" to handle * emitting of declarations. Those stack frames are non-overlapping * portions of shared btf_dump->decl_stack. To make it a bit nicer to * handle this set of nested stacks, we create a view corresponding to * our own "stack frame" and work with it as an independent stack. * We'll need to clean up after emit_type_chain() returns, though. */ decl_stack.ids = d->decl_stack + stack_start; decl_stack.cnt = d->decl_stack_cnt - stack_start; btf_dump_emit_type_chain(d, &decl_stack, fname, lvl); /* * emit_type_chain() guarantees that it will pop its entire decl_stack * frame before returning. But it works with a read-only view into * decl_stack, so it doesn't actually pop anything from the * perspective of shared btf_dump->decl_stack, per se. We need to * reset decl_stack state to how it was before us to avoid it growing * all the time. */ d->decl_stack_cnt = stack_start; } static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack) { const struct btf_type *t; __u32 id; while (decl_stack->cnt) { id = decl_stack->ids[decl_stack->cnt - 1]; t = btf__type_by_id(d->btf, id); switch (btf_kind(t)) { case BTF_KIND_VOLATILE: btf_dump_printf(d, "volatile "); break; case BTF_KIND_CONST: btf_dump_printf(d, "const "); break; case BTF_KIND_RESTRICT: btf_dump_printf(d, "restrict "); break; default: return; } decl_stack->cnt--; } } static void btf_dump_drop_mods(struct btf_dump *d, struct id_stack *decl_stack) { const struct btf_type *t; __u32 id; while (decl_stack->cnt) { id = decl_stack->ids[decl_stack->cnt - 1]; t = btf__type_by_id(d->btf, id); if (!btf_is_mod(t)) return; decl_stack->cnt--; } } static void btf_dump_emit_name(const struct btf_dump *d, const char *name, bool last_was_ptr) { bool separate = name[0] && !last_was_ptr; btf_dump_printf(d, "%s%s", separate ? " " : "", name); } static void btf_dump_emit_type_chain(struct btf_dump *d, struct id_stack *decls, const char *fname, int lvl) { /* * last_was_ptr is used to determine if we need to separate pointer * asterisk (*) from previous part of type signature with space, so * that we get `int ***`, instead of `int * * *`. We default to true * for cases where we have single pointer in a chain. E.g., in ptr -> * func_proto case. func_proto will start a new emit_type_chain call * with just ptr, which should be emitted as (*) or (*), so we * don't want to prepend space for that last pointer. */ bool last_was_ptr = true; const struct btf_type *t; const char *name; __u16 kind; __u32 id; while (decls->cnt) { id = decls->ids[--decls->cnt]; if (id == 0) { /* VOID is a special snowflake */ btf_dump_emit_mods(d, decls); btf_dump_printf(d, "void"); last_was_ptr = false; continue; } t = btf__type_by_id(d->btf, id); kind = btf_kind(t); switch (kind) { case BTF_KIND_INT: case BTF_KIND_FLOAT: btf_dump_emit_mods(d, decls); name = btf_name_of(d, t->name_off); btf_dump_printf(d, "%s", name); break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: btf_dump_emit_mods(d, decls); /* inline anonymous struct/union */ if (t->name_off == 0 && !d->skip_anon_defs) btf_dump_emit_struct_def(d, id, t, lvl); else btf_dump_emit_struct_fwd(d, id, t); break; case BTF_KIND_ENUM: case BTF_KIND_ENUM64: btf_dump_emit_mods(d, decls); /* inline anonymous enum */ if (t->name_off == 0 && !d->skip_anon_defs) btf_dump_emit_enum_def(d, id, t, lvl); else btf_dump_emit_enum_fwd(d, id, t); break; case BTF_KIND_FWD: btf_dump_emit_mods(d, decls); btf_dump_emit_fwd_def(d, id, t); break; case BTF_KIND_TYPEDEF: btf_dump_emit_mods(d, decls); btf_dump_printf(d, "%s", btf_dump_ident_name(d, id)); break; case BTF_KIND_PTR: btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *"); break; case BTF_KIND_VOLATILE: btf_dump_printf(d, " volatile"); break; case BTF_KIND_CONST: btf_dump_printf(d, " const"); break; case BTF_KIND_RESTRICT: btf_dump_printf(d, " restrict"); break; case BTF_KIND_TYPE_TAG: btf_dump_emit_mods(d, decls); name = btf_name_of(d, t->name_off); btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name); break; case BTF_KIND_ARRAY: { const struct btf_array *a = btf_array(t); const struct btf_type *next_t; __u32 next_id; bool multidim; /* * GCC has a bug * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354) * which causes it to emit extra const/volatile * modifiers for an array, if array's element type has * const/volatile modifiers. Clang doesn't do that. * In general, it doesn't seem very meaningful to have * a const/volatile modifier for array, so we are * going to silently skip them here. */ btf_dump_drop_mods(d, decls); if (decls->cnt == 0) { btf_dump_emit_name(d, fname, last_was_ptr); btf_dump_printf(d, "[%u]", a->nelems); return; } next_id = decls->ids[decls->cnt - 1]; next_t = btf__type_by_id(d->btf, next_id); multidim = btf_is_array(next_t); /* we need space if we have named non-pointer */ if (fname[0] && !last_was_ptr) btf_dump_printf(d, " "); /* no parentheses for multi-dimensional array */ if (!multidim) btf_dump_printf(d, "("); btf_dump_emit_type_chain(d, decls, fname, lvl); if (!multidim) btf_dump_printf(d, ")"); btf_dump_printf(d, "[%u]", a->nelems); return; } case BTF_KIND_FUNC_PROTO: { const struct btf_param *p = btf_params(t); __u16 vlen = btf_vlen(t); int i; /* * GCC emits extra volatile qualifier for * __attribute__((noreturn)) function pointers. Clang * doesn't do it. It's a GCC quirk for backwards * compatibility with code written for GCC <2.5. So, * similarly to extra qualifiers for array, just drop * them, instead of handling them. */ btf_dump_drop_mods(d, decls); if (decls->cnt) { btf_dump_printf(d, " ("); btf_dump_emit_type_chain(d, decls, fname, lvl); btf_dump_printf(d, ")"); } else { btf_dump_emit_name(d, fname, last_was_ptr); } btf_dump_printf(d, "("); /* * Clang for BPF target generates func_proto with no * args as a func_proto with a single void arg (e.g., * `int (*f)(void)` vs just `int (*f)()`). We are * going to emit valid empty args (void) syntax for * such case. Similarly and conveniently, valid * no args case can be special-cased here as well. */ if (vlen == 0 || (vlen == 1 && p->type == 0)) { btf_dump_printf(d, "void)"); return; } for (i = 0; i < vlen; i++, p++) { if (i > 0) btf_dump_printf(d, ", "); /* last arg of type void is vararg */ if (i == vlen - 1 && p->type == 0) { btf_dump_printf(d, "..."); break; } name = btf_name_of(d, p->name_off); btf_dump_emit_type_decl(d, p->type, name, lvl); } btf_dump_printf(d, ")"); return; } default: pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n", kind, id); return; } last_was_ptr = kind == BTF_KIND_PTR; } btf_dump_emit_name(d, fname, last_was_ptr); } /* show type name as (type_name) */ static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id, bool top_level) { const struct btf_type *t; /* for array members, we don't bother emitting type name for each * member to avoid the redundancy of * .name = (char[4])[(char)'f',(char)'o',(char)'o',] */ if (d->typed_dump->is_array_member) return; /* avoid type name specification for variable/section; it will be done * for the associated variable value(s). */ t = btf__type_by_id(d->btf, id); if (btf_is_var(t) || btf_is_datasec(t)) return; if (top_level) btf_dump_printf(d, "("); d->skip_anon_defs = true; d->strip_mods = true; btf_dump_emit_type_decl(d, id, "", 0); d->strip_mods = false; d->skip_anon_defs = false; if (top_level) btf_dump_printf(d, ")"); } /* return number of duplicates (occurrences) of a given name */ static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, const char *orig_name) { char *old_name, *new_name; size_t dup_cnt = 0; int err; new_name = strdup(orig_name); if (!new_name) return 1; (void)hashmap__find(name_map, orig_name, &dup_cnt); dup_cnt++; err = hashmap__set(name_map, new_name, dup_cnt, &old_name, NULL); if (err) free(new_name); free(old_name); return dup_cnt; } static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id, struct hashmap *name_map) { struct btf_dump_type_aux_state *s = &d->type_states[id]; const struct btf_type *t = btf__type_by_id(d->btf, id); const char *orig_name = btf_name_of(d, t->name_off); const char **cached_name = &d->cached_names[id]; size_t dup_cnt; if (t->name_off == 0) return ""; if (s->name_resolved) return *cached_name ? *cached_name : orig_name; if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) { s->name_resolved = 1; return orig_name; } dup_cnt = btf_dump_name_dups(d, name_map, orig_name); if (dup_cnt > 1) { const size_t max_len = 256; char new_name[max_len]; snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt); *cached_name = strdup(new_name); } s->name_resolved = 1; return *cached_name ? *cached_name : orig_name; } static const char *btf_dump_type_name(struct btf_dump *d, __u32 id) { return btf_dump_resolve_name(d, id, d->type_names); } static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id) { return btf_dump_resolve_name(d, id, d->ident_names); } static int btf_dump_dump_type_data(struct btf_dump *d, const char *fname, const struct btf_type *t, __u32 id, const void *data, __u8 bits_offset, __u8 bit_sz); static const char *btf_dump_data_newline(struct btf_dump *d) { return d->typed_dump->compact || d->typed_dump->depth == 0 ? "" : "\n"; } static const char *btf_dump_data_delim(struct btf_dump *d) { return d->typed_dump->depth == 0 ? "" : ","; } static void btf_dump_data_pfx(struct btf_dump *d) { int i, lvl = d->typed_dump->indent_lvl + d->typed_dump->depth; if (d->typed_dump->compact) return; for (i = 0; i < lvl; i++) btf_dump_printf(d, "%s", d->typed_dump->indent_str); } /* A macro is used here as btf_type_value[s]() appends format specifiers * to the format specifier passed in; these do the work of appending * delimiters etc while the caller simply has to specify the type values * in the format specifier + value(s). */ #define btf_dump_type_values(d, fmt, ...) \ btf_dump_printf(d, fmt "%s%s", \ ##__VA_ARGS__, \ btf_dump_data_delim(d), \ btf_dump_data_newline(d)) static int btf_dump_unsupported_data(struct btf_dump *d, const struct btf_type *t, __u32 id) { btf_dump_printf(d, "", btf_kind(t)); return -ENOTSUP; } static int btf_dump_get_bitfield_value(struct btf_dump *d, const struct btf_type *t, const void *data, __u8 bits_offset, __u8 bit_sz, __u64 *value) { __u16 left_shift_bits, right_shift_bits; const __u8 *bytes = data; __u8 nr_copy_bits; __u64 num = 0; int i; /* Maximum supported bitfield size is 64 bits */ if (t->size > 8) { pr_warn("unexpected bitfield size %d\n", t->size); return -EINVAL; } /* Bitfield value retrieval is done in two steps; first relevant bytes are * stored in num, then we left/right shift num to eliminate irrelevant bits. */ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ for (i = t->size - 1; i >= 0; i--) num = num * 256 + bytes[i]; nr_copy_bits = bit_sz + bits_offset; #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ for (i = 0; i < t->size; i++) num = num * 256 + bytes[i]; nr_copy_bits = t->size * 8 - bits_offset; #else # error "Unrecognized __BYTE_ORDER__" #endif left_shift_bits = 64 - nr_copy_bits; right_shift_bits = 64 - bit_sz; *value = (num << left_shift_bits) >> right_shift_bits; return 0; } static int btf_dump_bitfield_check_zero(struct btf_dump *d, const struct btf_type *t, const void *data, __u8 bits_offset, __u8 bit_sz) { __u64 check_num; int err; err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &check_num); if (err) return err; if (check_num == 0) return -ENODATA; return 0; } static int btf_dump_bitfield_data(struct btf_dump *d, const struct btf_type *t, const void *data, __u8 bits_offset, __u8 bit_sz) { __u64 print_num; int err; err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &print_num); if (err) return err; btf_dump_type_values(d, "0x%llx", (unsigned long long)print_num); return 0; } /* ints, floats and ptrs */ static int btf_dump_base_type_check_zero(struct btf_dump *d, const struct btf_type *t, __u32 id, const void *data) { static __u8 bytecmp[16] = {}; int nr_bytes; /* For pointer types, pointer size is not defined on a per-type basis. * On dump creation however, we store the pointer size. */ if (btf_kind(t) == BTF_KIND_PTR) nr_bytes = d->ptr_sz; else nr_bytes = t->size; if (nr_bytes < 1 || nr_bytes > 16) { pr_warn("unexpected size %d for id [%u]\n", nr_bytes, id); return -EINVAL; } if (memcmp(data, bytecmp, nr_bytes) == 0) return -ENODATA; return 0; } static bool ptr_is_aligned(const struct btf *btf, __u32 type_id, const void *data) { int alignment = btf__align_of(btf, type_id); if (alignment == 0) return false; return ((uintptr_t)data) % alignment == 0; } static int btf_dump_int_data(struct btf_dump *d, const struct btf_type *t, __u32 type_id, const void *data, __u8 bits_offset) { __u8 encoding = btf_int_encoding(t); bool sign = encoding & BTF_INT_SIGNED; char buf[16] __attribute__((aligned(16))); int sz = t->size; if (sz == 0 || sz > sizeof(buf)) { pr_warn("unexpected size %d for id [%u]\n", sz, type_id); return -EINVAL; } /* handle packed int data - accesses of integers not aligned on * int boundaries can cause problems on some platforms. */ if (!ptr_is_aligned(d->btf, type_id, data)) { memcpy(buf, data, sz); data = buf; } switch (sz) { case 16: { const __u64 *ints = data; __u64 lsi, msi; /* avoid use of __int128 as some 32-bit platforms do not * support it. */ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ lsi = ints[0]; msi = ints[1]; #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ lsi = ints[1]; msi = ints[0]; #else # error "Unrecognized __BYTE_ORDER__" #endif if (msi == 0) btf_dump_type_values(d, "0x%llx", (unsigned long long)lsi); else btf_dump_type_values(d, "0x%llx%016llx", (unsigned long long)msi, (unsigned long long)lsi); break; } case 8: if (sign) btf_dump_type_values(d, "%lld", *(long long *)data); else btf_dump_type_values(d, "%llu", *(unsigned long long *)data); break; case 4: if (sign) btf_dump_type_values(d, "%d", *(__s32 *)data); else btf_dump_type_values(d, "%u", *(__u32 *)data); break; case 2: if (sign) btf_dump_type_values(d, "%d", *(__s16 *)data); else btf_dump_type_values(d, "%u", *(__u16 *)data); break; case 1: if (d->typed_dump->is_array_char) { /* check for null terminator */ if (d->typed_dump->is_array_terminated) break; if (*(char *)data == '\0') { btf_dump_type_values(d, "'\\0'"); d->typed_dump->is_array_terminated = true; break; } if (isprint(*(char *)data)) { btf_dump_type_values(d, "'%c'", *(char *)data); break; } } if (sign) btf_dump_type_values(d, "%d", *(__s8 *)data); else btf_dump_type_values(d, "%u", *(__u8 *)data); break; default: pr_warn("unexpected sz %d for id [%u]\n", sz, type_id); return -EINVAL; } return 0; } union float_data { long double ld; double d; float f; }; static int btf_dump_float_data(struct btf_dump *d, const struct btf_type *t, __u32 type_id, const void *data) { const union float_data *flp = data; union float_data fl; int sz = t->size; /* handle unaligned data; copy to local union */ if (!ptr_is_aligned(d->btf, type_id, data)) { memcpy(&fl, data, sz); flp = &fl; } switch (sz) { case 16: btf_dump_type_values(d, "%Lf", flp->ld); break; case 8: btf_dump_type_values(d, "%lf", flp->d); break; case 4: btf_dump_type_values(d, "%f", flp->f); break; default: pr_warn("unexpected size %d for id [%u]\n", sz, type_id); return -EINVAL; } return 0; } static int btf_dump_var_data(struct btf_dump *d, const struct btf_type *v, __u32 id, const void *data) { enum btf_func_linkage linkage = btf_var(v)->linkage; const struct btf_type *t; const char *l; __u32 type_id; switch (linkage) { case BTF_FUNC_STATIC: l = "static "; break; case BTF_FUNC_EXTERN: l = "extern "; break; case BTF_FUNC_GLOBAL: default: l = ""; break; } /* format of output here is [linkage] [type] [varname] = (type)value, * for example "static int cpu_profile_flip = (int)1" */ btf_dump_printf(d, "%s", l); type_id = v->type; t = btf__type_by_id(d->btf, type_id); btf_dump_emit_type_cast(d, type_id, false); btf_dump_printf(d, " %s = ", btf_name_of(d, v->name_off)); return btf_dump_dump_type_data(d, NULL, t, type_id, data, 0, 0); } static int btf_dump_array_data(struct btf_dump *d, const struct btf_type *t, __u32 id, const void *data) { const struct btf_array *array = btf_array(t); const struct btf_type *elem_type; __u32 i, elem_type_id; __s64 elem_size; bool is_array_member; bool is_array_terminated; elem_type_id = array->type; elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL); elem_size = btf__resolve_size(d->btf, elem_type_id); if (elem_size <= 0) { pr_warn("unexpected elem size %zd for array type [%u]\n", (ssize_t)elem_size, id); return -EINVAL; } if (btf_is_int(elem_type)) { /* * BTF_INT_CHAR encoding never seems to be set for * char arrays, so if size is 1 and element is * printable as a char, we'll do that. */ if (elem_size == 1) d->typed_dump->is_array_char = true; } /* note that we increment depth before calling btf_dump_print() below; * this is intentional. btf_dump_data_newline() will not print a * newline for depth 0 (since this leaves us with trailing newlines * at the end of typed display), so depth is incremented first. * For similar reasons, we decrement depth before showing the closing * parenthesis. */ d->typed_dump->depth++; btf_dump_printf(d, "[%s", btf_dump_data_newline(d)); /* may be a multidimensional array, so store current "is array member" * status so we can restore it correctly later. */ is_array_member = d->typed_dump->is_array_member; d->typed_dump->is_array_member = true; is_array_terminated = d->typed_dump->is_array_terminated; d->typed_dump->is_array_terminated = false; for (i = 0; i < array->nelems; i++, data += elem_size) { if (d->typed_dump->is_array_terminated) break; btf_dump_dump_type_data(d, NULL, elem_type, elem_type_id, data, 0, 0); } d->typed_dump->is_array_member = is_array_member; d->typed_dump->is_array_terminated = is_array_terminated; d->typed_dump->depth--; btf_dump_data_pfx(d); btf_dump_type_values(d, "]"); return 0; } static int btf_dump_struct_data(struct btf_dump *d, const struct btf_type *t, __u32 id, const void *data) { const struct btf_member *m = btf_members(t); __u16 n = btf_vlen(t); int i, err = 0; /* note that we increment depth before calling btf_dump_print() below; * this is intentional. btf_dump_data_newline() will not print a * newline for depth 0 (since this leaves us with trailing newlines * at the end of typed display), so depth is incremented first. * For similar reasons, we decrement depth before showing the closing * parenthesis. */ d->typed_dump->depth++; btf_dump_printf(d, "{%s", btf_dump_data_newline(d)); for (i = 0; i < n; i++, m++) { const struct btf_type *mtype; const char *mname; __u32 moffset; __u8 bit_sz; mtype = btf__type_by_id(d->btf, m->type); mname = btf_name_of(d, m->name_off); moffset = btf_member_bit_offset(t, i); bit_sz = btf_member_bitfield_size(t, i); err = btf_dump_dump_type_data(d, mname, mtype, m->type, data + moffset / 8, moffset % 8, bit_sz); if (err < 0) return err; } d->typed_dump->depth--; btf_dump_data_pfx(d); btf_dump_type_values(d, "}"); return err; } union ptr_data { unsigned int p; unsigned long long lp; }; static int btf_dump_ptr_data(struct btf_dump *d, const struct btf_type *t, __u32 id, const void *data) { if (ptr_is_aligned(d->btf, id, data) && d->ptr_sz == sizeof(void *)) { btf_dump_type_values(d, "%p", *(void **)data); } else { union ptr_data pt; memcpy(&pt, data, d->ptr_sz); if (d->ptr_sz == 4) btf_dump_type_values(d, "0x%x", pt.p); else btf_dump_type_values(d, "0x%llx", pt.lp); } return 0; } static int btf_dump_get_enum_value(struct btf_dump *d, const struct btf_type *t, const void *data, __u32 id, __s64 *value) { bool is_signed = btf_kflag(t); if (!ptr_is_aligned(d->btf, id, data)) { __u64 val; int err; err = btf_dump_get_bitfield_value(d, t, data, 0, 0, &val); if (err) return err; *value = (__s64)val; return 0; } switch (t->size) { case 8: *value = *(__s64 *)data; return 0; case 4: *value = is_signed ? (__s64)*(__s32 *)data : *(__u32 *)data; return 0; case 2: *value = is_signed ? *(__s16 *)data : *(__u16 *)data; return 0; case 1: *value = is_signed ? *(__s8 *)data : *(__u8 *)data; return 0; default: pr_warn("unexpected size %d for enum, id:[%u]\n", t->size, id); return -EINVAL; } } static int btf_dump_enum_data(struct btf_dump *d, const struct btf_type *t, __u32 id, const void *data) { bool is_signed; __s64 value; int i, err; err = btf_dump_get_enum_value(d, t, data, id, &value); if (err) return err; is_signed = btf_kflag(t); if (btf_is_enum(t)) { const struct btf_enum *e; for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) { if (value != e->val) continue; btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off)); return 0; } btf_dump_type_values(d, is_signed ? "%d" : "%u", value); } else { const struct btf_enum64 *e; for (i = 0, e = btf_enum64(t); i < btf_vlen(t); i++, e++) { if (value != btf_enum64_value(e)) continue; btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off)); return 0; } btf_dump_type_values(d, is_signed ? "%lldLL" : "%lluULL", (unsigned long long)value); } return 0; } static int btf_dump_datasec_data(struct btf_dump *d, const struct btf_type *t, __u32 id, const void *data) { const struct btf_var_secinfo *vsi; const struct btf_type *var; __u32 i; int err; btf_dump_type_values(d, "SEC(\"%s\") ", btf_name_of(d, t->name_off)); for (i = 0, vsi = btf_var_secinfos(t); i < btf_vlen(t); i++, vsi++) { var = btf__type_by_id(d->btf, vsi->type); err = btf_dump_dump_type_data(d, NULL, var, vsi->type, data + vsi->offset, 0, 0); if (err < 0) return err; btf_dump_printf(d, ";"); } return 0; } /* return size of type, or if base type overflows, return -E2BIG. */ static int btf_dump_type_data_check_overflow(struct btf_dump *d, const struct btf_type *t, __u32 id, const void *data, __u8 bits_offset, __u8 bit_sz) { __s64 size; if (bit_sz) { /* bits_offset is at most 7. bit_sz is at most 128. */ __u8 nr_bytes = (bits_offset + bit_sz + 7) / 8; /* When bit_sz is non zero, it is called from * btf_dump_struct_data() where it only cares about * negative error value. * Return nr_bytes in success case to make it * consistent as the regular integer case below. */ return data + nr_bytes > d->typed_dump->data_end ? -E2BIG : nr_bytes; } size = btf__resolve_size(d->btf, id); if (size < 0 || size >= INT_MAX) { pr_warn("unexpected size [%zu] for id [%u]\n", (size_t)size, id); return -EINVAL; } /* Only do overflow checking for base types; we do not want to * avoid showing part of a struct, union or array, even if we * do not have enough data to show the full object. By * restricting overflow checking to base types we can ensure * that partial display succeeds, while avoiding overflowing * and using bogus data for display. */ t = skip_mods_and_typedefs(d->btf, id, NULL); if (!t) { pr_warn("unexpected error skipping mods/typedefs for id [%u]\n", id); return -EINVAL; } switch (btf_kind(t)) { case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_PTR: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: if (data + bits_offset / 8 + size > d->typed_dump->data_end) return -E2BIG; break; default: break; } return (int)size; } static int btf_dump_type_data_check_zero(struct btf_dump *d, const struct btf_type *t, __u32 id, const void *data, __u8 bits_offset, __u8 bit_sz) { __s64 value; int i, err; /* toplevel exceptions; we show zero values if * - we ask for them (emit_zeros) * - if we are at top-level so we see "struct empty { }" * - or if we are an array member and the array is non-empty and * not a char array; we don't want to be in a situation where we * have an integer array 0, 1, 0, 1 and only show non-zero values. * If the array contains zeroes only, or is a char array starting * with a '\0', the array-level check_zero() will prevent showing it; * we are concerned with determining zero value at the array member * level here. */ if (d->typed_dump->emit_zeroes || d->typed_dump->depth == 0 || (d->typed_dump->is_array_member && !d->typed_dump->is_array_char)) return 0; t = skip_mods_and_typedefs(d->btf, id, NULL); switch (btf_kind(t)) { case BTF_KIND_INT: if (bit_sz) return btf_dump_bitfield_check_zero(d, t, data, bits_offset, bit_sz); return btf_dump_base_type_check_zero(d, t, id, data); case BTF_KIND_FLOAT: case BTF_KIND_PTR: return btf_dump_base_type_check_zero(d, t, id, data); case BTF_KIND_ARRAY: { const struct btf_array *array = btf_array(t); const struct btf_type *elem_type; __u32 elem_type_id, elem_size; bool ischar; elem_type_id = array->type; elem_size = btf__resolve_size(d->btf, elem_type_id); elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL); ischar = btf_is_int(elem_type) && elem_size == 1; /* check all elements; if _any_ element is nonzero, all * of array is displayed. We make an exception however * for char arrays where the first element is 0; these * are considered zeroed also, even if later elements are * non-zero because the string is terminated. */ for (i = 0; i < array->nelems; i++) { if (i == 0 && ischar && *(char *)data == 0) return -ENODATA; err = btf_dump_type_data_check_zero(d, elem_type, elem_type_id, data + (i * elem_size), bits_offset, 0); if (err != -ENODATA) return err; } return -ENODATA; } case BTF_KIND_STRUCT: case BTF_KIND_UNION: { const struct btf_member *m = btf_members(t); __u16 n = btf_vlen(t); /* if any struct/union member is non-zero, the struct/union * is considered non-zero and dumped. */ for (i = 0; i < n; i++, m++) { const struct btf_type *mtype; __u32 moffset; mtype = btf__type_by_id(d->btf, m->type); moffset = btf_member_bit_offset(t, i); /* btf_int_bits() does not store member bitfield size; * bitfield size needs to be stored here so int display * of member can retrieve it. */ bit_sz = btf_member_bitfield_size(t, i); err = btf_dump_type_data_check_zero(d, mtype, m->type, data + moffset / 8, moffset % 8, bit_sz); if (err != ENODATA) return err; } return -ENODATA; } case BTF_KIND_ENUM: case BTF_KIND_ENUM64: err = btf_dump_get_enum_value(d, t, data, id, &value); if (err) return err; if (value == 0) return -ENODATA; return 0; default: return 0; } } /* returns size of data dumped, or error. */ static int btf_dump_dump_type_data(struct btf_dump *d, const char *fname, const struct btf_type *t, __u32 id, const void *data, __u8 bits_offset, __u8 bit_sz) { int size, err = 0; size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset, bit_sz); if (size < 0) return size; err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz); if (err) { /* zeroed data is expected and not an error, so simply skip * dumping such data. Record other errors however. */ if (err == -ENODATA) return size; return err; } btf_dump_data_pfx(d); if (!d->typed_dump->skip_names) { if (fname && strlen(fname) > 0) btf_dump_printf(d, ".%s = ", fname); btf_dump_emit_type_cast(d, id, true); } t = skip_mods_and_typedefs(d->btf, id, NULL); switch (btf_kind(t)) { case BTF_KIND_UNKN: case BTF_KIND_FWD: case BTF_KIND_FUNC: case BTF_KIND_FUNC_PROTO: case BTF_KIND_DECL_TAG: err = btf_dump_unsupported_data(d, t, id); break; case BTF_KIND_INT: if (bit_sz) err = btf_dump_bitfield_data(d, t, data, bits_offset, bit_sz); else err = btf_dump_int_data(d, t, id, data, bits_offset); break; case BTF_KIND_FLOAT: err = btf_dump_float_data(d, t, id, data); break; case BTF_KIND_PTR: err = btf_dump_ptr_data(d, t, id, data); break; case BTF_KIND_ARRAY: err = btf_dump_array_data(d, t, id, data); break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: err = btf_dump_struct_data(d, t, id, data); break; case BTF_KIND_ENUM: case BTF_KIND_ENUM64: /* handle bitfield and int enum values */ if (bit_sz) { __u64 print_num; __s64 enum_val; err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &print_num); if (err) break; enum_val = (__s64)print_num; err = btf_dump_enum_data(d, t, id, &enum_val); } else err = btf_dump_enum_data(d, t, id, data); break; case BTF_KIND_VAR: err = btf_dump_var_data(d, t, id, data); break; case BTF_KIND_DATASEC: err = btf_dump_datasec_data(d, t, id, data); break; default: pr_warn("unexpected kind [%u] for id [%u]\n", BTF_INFO_KIND(t->info), id); return -EINVAL; } if (err < 0) return err; return size; } int btf_dump__dump_type_data(struct btf_dump *d, __u32 id, const void *data, size_t data_sz, const struct btf_dump_type_data_opts *opts) { struct btf_dump_data typed_dump = {}; const struct btf_type *t; int ret; if (!OPTS_VALID(opts, btf_dump_type_data_opts)) return libbpf_err(-EINVAL); t = btf__type_by_id(d->btf, id); if (!t) return libbpf_err(-ENOENT); d->typed_dump = &typed_dump; d->typed_dump->data_end = data + data_sz; d->typed_dump->indent_lvl = OPTS_GET(opts, indent_level, 0); /* default indent string is a tab */ if (!OPTS_GET(opts, indent_str, NULL)) d->typed_dump->indent_str[0] = '\t'; else libbpf_strlcpy(d->typed_dump->indent_str, opts->indent_str, sizeof(d->typed_dump->indent_str)); d->typed_dump->compact = OPTS_GET(opts, compact, false); d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false); d->typed_dump->emit_zeroes = OPTS_GET(opts, emit_zeroes, false); ret = btf_dump_dump_type_data(d, NULL, t, id, data, 0, 0); d->typed_dump = NULL; return libbpf_err(ret); } xdp-tools-1.5.4/lib/libbpf/src/hashmap.h0000644000175100001660000001542614706536574017433 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* * Generic non-thread safe hash map implementation. * * Copyright (c) 2019 Facebook */ #ifndef __LIBBPF_HASHMAP_H #define __LIBBPF_HASHMAP_H #include #include #include static inline size_t hash_bits(size_t h, int bits) { /* shuffle bits and return requested number of upper bits */ if (bits == 0) return 0; #if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__) /* LP64 case */ return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits); #elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__) return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits); #else # error "Unsupported size_t size" #endif } /* generic C-string hashing function */ static inline size_t str_hash(const char *s) { size_t h = 0; while (*s) { h = h * 31 + *s; s++; } return h; } typedef size_t (*hashmap_hash_fn)(long key, void *ctx); typedef bool (*hashmap_equal_fn)(long key1, long key2, void *ctx); /* * Hashmap interface is polymorphic, keys and values could be either * long-sized integers or pointers, this is achieved as follows: * - interface functions that operate on keys and values are hidden * behind auxiliary macros, e.g. hashmap_insert <-> hashmap__insert; * - these auxiliary macros cast the key and value parameters as * long or long *, so the user does not have to specify the casts explicitly; * - for pointer parameters (e.g. old_key) the size of the pointed * type is verified by hashmap_cast_ptr using _Static_assert; * - when iterating using hashmap__for_each_* forms * hasmap_entry->key should be used for integer keys and * hasmap_entry->pkey should be used for pointer keys, * same goes for values. */ struct hashmap_entry { union { long key; const void *pkey; }; union { long value; void *pvalue; }; struct hashmap_entry *next; }; struct hashmap { hashmap_hash_fn hash_fn; hashmap_equal_fn equal_fn; void *ctx; struct hashmap_entry **buckets; size_t cap; size_t cap_bits; size_t sz; }; void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, hashmap_equal_fn equal_fn, void *ctx); struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, hashmap_equal_fn equal_fn, void *ctx); void hashmap__clear(struct hashmap *map); void hashmap__free(struct hashmap *map); size_t hashmap__size(const struct hashmap *map); size_t hashmap__capacity(const struct hashmap *map); /* * Hashmap insertion strategy: * - HASHMAP_ADD - only add key/value if key doesn't exist yet; * - HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise, * update value; * - HASHMAP_UPDATE - update value, if key already exists; otherwise, do * nothing and return -ENOENT; * - HASHMAP_APPEND - always add key/value pair, even if key already exists. * This turns hashmap into a multimap by allowing multiple values to be * associated with the same key. Most useful read API for such hashmap is * hashmap__for_each_key_entry() iteration. If hashmap__find() is still * used, it will return last inserted key/value entry (first in a bucket * chain). */ enum hashmap_insert_strategy { HASHMAP_ADD, HASHMAP_SET, HASHMAP_UPDATE, HASHMAP_APPEND, }; #define hashmap_cast_ptr(p) ({ \ _Static_assert((__builtin_constant_p((p)) ? (p) == NULL : 0) || \ sizeof(*(p)) == sizeof(long), \ #p " pointee should be a long-sized integer or a pointer"); \ (long *)(p); \ }) /* * hashmap__insert() adds key/value entry w/ various semantics, depending on * provided strategy value. If a given key/value pair replaced already * existing key/value pair, both old key and old value will be returned * through old_key and old_value to allow calling code do proper memory * management. */ int hashmap_insert(struct hashmap *map, long key, long value, enum hashmap_insert_strategy strategy, long *old_key, long *old_value); #define hashmap__insert(map, key, value, strategy, old_key, old_value) \ hashmap_insert((map), (long)(key), (long)(value), (strategy), \ hashmap_cast_ptr(old_key), \ hashmap_cast_ptr(old_value)) #define hashmap__add(map, key, value) \ hashmap__insert((map), (key), (value), HASHMAP_ADD, NULL, NULL) #define hashmap__set(map, key, value, old_key, old_value) \ hashmap__insert((map), (key), (value), HASHMAP_SET, (old_key), (old_value)) #define hashmap__update(map, key, value, old_key, old_value) \ hashmap__insert((map), (key), (value), HASHMAP_UPDATE, (old_key), (old_value)) #define hashmap__append(map, key, value) \ hashmap__insert((map), (key), (value), HASHMAP_APPEND, NULL, NULL) bool hashmap_delete(struct hashmap *map, long key, long *old_key, long *old_value); #define hashmap__delete(map, key, old_key, old_value) \ hashmap_delete((map), (long)(key), \ hashmap_cast_ptr(old_key), \ hashmap_cast_ptr(old_value)) bool hashmap_find(const struct hashmap *map, long key, long *value); #define hashmap__find(map, key, value) \ hashmap_find((map), (long)(key), hashmap_cast_ptr(value)) /* * hashmap__for_each_entry - iterate over all entries in hashmap * @map: hashmap to iterate * @cur: struct hashmap_entry * used as a loop cursor * @bkt: integer used as a bucket loop cursor */ #define hashmap__for_each_entry(map, cur, bkt) \ for (bkt = 0; bkt < (map)->cap; bkt++) \ for (cur = (map)->buckets[bkt]; cur; cur = cur->next) /* * hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe * against removals * @map: hashmap to iterate * @cur: struct hashmap_entry * used as a loop cursor * @tmp: struct hashmap_entry * used as a temporary next cursor storage * @bkt: integer used as a bucket loop cursor */ #define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ for (bkt = 0; bkt < (map)->cap; bkt++) \ for (cur = (map)->buckets[bkt]; \ cur && ({tmp = cur->next; true; }); \ cur = tmp) /* * hashmap__for_each_key_entry - iterate over entries associated with given key * @map: hashmap to iterate * @cur: struct hashmap_entry * used as a loop cursor * @key: key to iterate entries for */ #define hashmap__for_each_key_entry(map, cur, _key) \ for (cur = (map)->buckets \ ? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \ : NULL; \ cur; \ cur = cur->next) \ if ((map)->equal_fn(cur->key, (_key), (map)->ctx)) #define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \ for (cur = (map)->buckets \ ? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \ : NULL; \ cur && ({ tmp = cur->next; true; }); \ cur = tmp) \ if ((map)->equal_fn(cur->key, (_key), (map)->ctx)) #endif /* __LIBBPF_HASHMAP_H */ xdp-tools-1.5.4/lib/libbpf/src/skel_internal.h0000644000175100001660000002226114706536574020637 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (c) 2021 Facebook */ #ifndef __SKEL_INTERNAL_H #define __SKEL_INTERNAL_H #ifdef __KERNEL__ #include #include #include #include #include #else #include #include #include #include #include "bpf.h" #endif #ifndef __NR_bpf # if defined(__mips__) && defined(_ABIO32) # define __NR_bpf 4355 # elif defined(__mips__) && defined(_ABIN32) # define __NR_bpf 6319 # elif defined(__mips__) && defined(_ABI64) # define __NR_bpf 5315 # endif #endif /* This file is a base header for auto-generated *.lskel.h files. * Its contents will change and may become part of auto-generation in the future. * * The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent * and will change from one version of libbpf to another and features * requested during loader program generation. */ struct bpf_map_desc { /* output of the loader prog */ int map_fd; /* input for the loader prog */ __u32 max_entries; __aligned_u64 initial_value; }; struct bpf_prog_desc { int prog_fd; }; enum { BPF_SKEL_KERNEL = (1ULL << 0), }; struct bpf_loader_ctx { __u32 sz; __u32 flags; __u32 log_level; __u32 log_size; __u64 log_buf; }; struct bpf_load_and_run_opts { struct bpf_loader_ctx *ctx; const void *data; const void *insns; __u32 data_sz; __u32 insns_sz; const char *errstr; }; long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size); static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { #ifdef __KERNEL__ return kern_sys_bpf(cmd, attr, size); #else return syscall(__NR_bpf, cmd, attr, size); #endif } #ifdef __KERNEL__ static inline int close(int fd) { return close_fd(fd); } static inline void *skel_alloc(size_t size) { struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL); if (!ctx) return NULL; ctx->flags |= BPF_SKEL_KERNEL; return ctx; } static inline void skel_free(const void *p) { kfree(p); } /* skel->bss/rodata maps are populated the following way: * * For kernel use: * skel_prep_map_data() allocates kernel memory that kernel module can directly access. * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value. * The loader program will perform probe_read_kernel() from maps.rodata.initial_value. * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree * is not necessary. * * For user space: * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly. * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value. * The loader program will perform copy_from_user() from maps.rodata.initial_value. * skel_finalize_map_data() remaps bpf array map value from the kernel memory into * skel->rodata address. * * The "bpftool gen skeleton -L" command generates lskel.h that is suitable for * both kernel and user space. The generated loader program does * either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value * depending on bpf_loader_ctx->flags. */ static inline void skel_free_map_data(void *p, __u64 addr, size_t sz) { if (addr != ~0ULL) kvfree(p); /* When addr == ~0ULL the 'p' points to * ((struct bpf_array *)map)->value. See skel_finalize_map_data. */ } static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz) { void *addr; addr = kvmalloc(val_sz, GFP_KERNEL); if (!addr) return NULL; memcpy(addr, val, val_sz); return addr; } static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd) { struct bpf_map *map; void *addr = NULL; kvfree((void *) (long) *init_val); *init_val = ~0ULL; /* At this point bpf_load_and_run() finished without error and * 'fd' is a valid bpf map FD. All sanity checks below should succeed. */ map = bpf_map_get(fd); if (IS_ERR(map)) return NULL; if (map->map_type != BPF_MAP_TYPE_ARRAY) goto out; addr = ((struct bpf_array *)map)->value; /* the addr stays valid, since FD is not closed */ out: bpf_map_put(map); return addr; } #else static inline void *skel_alloc(size_t size) { return calloc(1, size); } static inline void skel_free(void *p) { free(p); } static inline void skel_free_map_data(void *p, __u64 addr, size_t sz) { munmap(p, sz); } static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz) { void *addr; addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (addr == (void *) -1) return NULL; memcpy(addr, val, val_sz); return addr; } static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd) { void *addr; addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0); if (addr == (void *) -1) return NULL; return addr; } #endif static inline int skel_closenz(int fd) { if (fd > 0) return close(fd); return -EINVAL; } #ifndef offsetofend #define offsetofend(TYPE, MEMBER) \ (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER))) #endif static inline int skel_map_create(enum bpf_map_type map_type, const char *map_name, __u32 key_size, __u32 value_size, __u32 max_entries) { const size_t attr_sz = offsetofend(union bpf_attr, map_extra); union bpf_attr attr; memset(&attr, 0, attr_sz); attr.map_type = map_type; strncpy(attr.map_name, map_name, sizeof(attr.map_name)); attr.key_size = key_size; attr.value_size = value_size; attr.max_entries = max_entries; return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz); } static inline int skel_map_update_elem(int fd, const void *key, const void *value, __u64 flags) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = (long) key; attr.value = (long) value; attr.flags = flags; return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); } static inline int skel_map_delete_elem(int fd, const void *key) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; memset(&attr, 0, attr_sz); attr.map_fd = fd; attr.key = (long)key; return skel_sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz); } static inline int skel_map_get_fd_by_id(__u32 id) { const size_t attr_sz = offsetofend(union bpf_attr, flags); union bpf_attr attr; memset(&attr, 0, attr_sz); attr.map_id = id; return skel_sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz); } static inline int skel_raw_tracepoint_open(const char *name, int prog_fd) { const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd); union bpf_attr attr; memset(&attr, 0, attr_sz); attr.raw_tracepoint.name = (long) name; attr.raw_tracepoint.prog_fd = prog_fd; return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz); } static inline int skel_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type) { const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len); union bpf_attr attr; memset(&attr, 0, attr_sz); attr.link_create.prog_fd = prog_fd; attr.link_create.target_fd = target_fd; attr.link_create.attach_type = attach_type; return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz); } #ifdef __KERNEL__ #define set_err #else #define set_err err = -errno #endif static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) { const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array); const size_t test_run_attr_sz = offsetofend(union bpf_attr, test); int map_fd = -1, prog_fd = -1, key = 0, err; union bpf_attr attr; err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1); if (map_fd < 0) { opts->errstr = "failed to create loader map"; set_err; goto out; } err = skel_map_update_elem(map_fd, &key, opts->data, 0); if (err < 0) { opts->errstr = "failed to update loader map"; set_err; goto out; } memset(&attr, 0, prog_load_attr_sz); attr.prog_type = BPF_PROG_TYPE_SYSCALL; attr.insns = (long) opts->insns; attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn); attr.license = (long) "Dual BSD/GPL"; memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog")); attr.fd_array = (long) &map_fd; attr.log_level = opts->ctx->log_level; attr.log_size = opts->ctx->log_size; attr.log_buf = opts->ctx->log_buf; attr.prog_flags = BPF_F_SLEEPABLE; err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, prog_load_attr_sz); if (prog_fd < 0) { opts->errstr = "failed to load loader prog"; set_err; goto out; } memset(&attr, 0, test_run_attr_sz); attr.test.prog_fd = prog_fd; attr.test.ctx_in = (long) opts->ctx; attr.test.ctx_size_in = opts->ctx->sz; err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz); if (err < 0 || (int)attr.test.retval < 0) { if (err < 0) { opts->errstr = "failed to execute loader prog"; set_err; } else { opts->errstr = "error returned by loader prog"; err = (int)attr.test.retval; #ifndef __KERNEL__ errno = -err; #endif } goto out; } err = 0; out: if (map_fd >= 0) close(map_fd); if (prog_fd >= 0) close(prog_fd); return err; } #endif xdp-tools-1.5.4/lib/libbpf/src/libbpf.map0000644000175100001660000002164614706536574017577 0ustar runnerdockerLIBBPF_0.0.1 { global: bpf_btf_get_fd_by_id; bpf_map__btf_key_type_id; bpf_map__btf_value_type_id; bpf_map__fd; bpf_map__name; bpf_map__pin; bpf_map__reuse_fd; bpf_map__set_ifindex; bpf_map__set_inner_map_fd; bpf_map__unpin; bpf_map_delete_elem; bpf_map_get_fd_by_id; bpf_map_get_next_id; bpf_map_get_next_key; bpf_map_lookup_and_delete_elem; bpf_map_lookup_elem; bpf_map_update_elem; bpf_obj_get; bpf_obj_get_info_by_fd; bpf_obj_pin; bpf_object__btf_fd; bpf_object__close; bpf_object__find_map_by_name; bpf_object__kversion; bpf_object__load; bpf_object__name; bpf_object__open; bpf_object__pin; bpf_object__pin_maps; bpf_object__pin_programs; bpf_object__unpin_maps; bpf_object__unpin_programs; bpf_prog_attach; bpf_prog_detach; bpf_prog_detach2; bpf_prog_get_fd_by_id; bpf_prog_get_next_id; bpf_prog_query; bpf_program__fd; bpf_program__pin; bpf_program__set_expected_attach_type; bpf_program__set_ifindex; bpf_program__set_type; bpf_program__unload; bpf_program__unpin; bpf_prog_linfo__free; bpf_prog_linfo__new; bpf_prog_linfo__lfind_addr_func; bpf_prog_linfo__lfind; bpf_raw_tracepoint_open; bpf_task_fd_query; btf__fd; btf__find_by_name; btf__free; btf__name_by_offset; btf__new; btf__resolve_size; btf__resolve_type; btf__type_by_id; libbpf_attach_type_by_name; libbpf_get_error; libbpf_prog_type_by_name; libbpf_set_print; libbpf_strerror; local: *; }; LIBBPF_0.0.2 { global: bpf_map_lookup_elem_flags; bpf_object__btf; bpf_object__find_map_fd_by_name; btf__get_raw_data; btf_ext__free; btf_ext__get_raw_data; btf_ext__new; } LIBBPF_0.0.1; LIBBPF_0.0.3 { global: bpf_map__is_internal; bpf_map_freeze; } LIBBPF_0.0.2; LIBBPF_0.0.4 { global: bpf_link__destroy; bpf_program__attach_kprobe; bpf_program__attach_perf_event; bpf_program__attach_raw_tracepoint; bpf_program__attach_tracepoint; bpf_program__attach_uprobe; btf_dump__dump_type; btf_dump__free; btf__parse_elf; libbpf_num_possible_cpus; perf_buffer__free; perf_buffer__poll; } LIBBPF_0.0.3; LIBBPF_0.0.5 { global: bpf_btf_get_next_id; } LIBBPF_0.0.4; LIBBPF_0.0.6 { global: bpf_map__get_pin_path; bpf_map__is_pinned; bpf_map__set_pin_path; bpf_object__open_file; bpf_object__open_mem; bpf_program__attach_trace; bpf_program__get_expected_attach_type; bpf_program__get_type; btf__find_by_name_kind; libbpf_find_vmlinux_btf_id; } LIBBPF_0.0.5; LIBBPF_0.0.7 { global: btf_dump__emit_type_decl; bpf_link__disconnect; bpf_map__attach_struct_ops; bpf_map_delete_batch; bpf_map_lookup_and_delete_batch; bpf_map_lookup_batch; bpf_map_update_batch; bpf_object__find_program_by_name; bpf_object__attach_skeleton; bpf_object__destroy_skeleton; bpf_object__detach_skeleton; bpf_object__load_skeleton; bpf_object__open_skeleton; bpf_program__attach; bpf_program__name; btf__align_of; libbpf_find_kernel_btf; } LIBBPF_0.0.6; LIBBPF_0.0.8 { global: bpf_link__fd; bpf_link__open; bpf_link__pin; bpf_link__pin_path; bpf_link__unpin; bpf_link__update_program; bpf_link_create; bpf_link_update; bpf_map__set_initial_value; bpf_prog_attach_opts; bpf_program__attach_cgroup; bpf_program__attach_lsm; bpf_program__set_attach_target; } LIBBPF_0.0.7; LIBBPF_0.0.9 { global: bpf_enable_stats; bpf_iter_create; bpf_link_get_fd_by_id; bpf_link_get_next_id; bpf_program__attach_iter; bpf_program__attach_netns; perf_buffer__consume; ring_buffer__add; ring_buffer__consume; ring_buffer__free; ring_buffer__new; ring_buffer__poll; } LIBBPF_0.0.8; LIBBPF_0.1.0 { global: bpf_link__detach; bpf_link_detach; bpf_map__ifindex; bpf_map__key_size; bpf_map__map_flags; bpf_map__max_entries; bpf_map__numa_node; bpf_map__set_key_size; bpf_map__set_map_flags; bpf_map__set_max_entries; bpf_map__set_numa_node; bpf_map__set_type; bpf_map__set_value_size; bpf_map__type; bpf_map__value_size; bpf_program__attach_xdp; bpf_program__autoload; bpf_program__set_autoload; btf__parse; btf__parse_raw; btf__pointer_size; btf__set_fd; btf__set_pointer_size; } LIBBPF_0.0.9; LIBBPF_0.2.0 { global: bpf_prog_bind_map; bpf_prog_test_run_opts; bpf_program__attach_freplace; bpf_program__section_name; btf__add_array; btf__add_const; btf__add_enum; btf__add_enum_value; btf__add_datasec; btf__add_datasec_var_info; btf__add_field; btf__add_func; btf__add_func_param; btf__add_func_proto; btf__add_fwd; btf__add_int; btf__add_ptr; btf__add_restrict; btf__add_str; btf__add_struct; btf__add_typedef; btf__add_union; btf__add_var; btf__add_volatile; btf__endianness; btf__find_str; btf__new_empty; btf__set_endianness; btf__str_by_offset; perf_buffer__buffer_cnt; perf_buffer__buffer_fd; perf_buffer__epoll_fd; perf_buffer__consume_buffer; } LIBBPF_0.1.0; LIBBPF_0.3.0 { global: btf__base_btf; btf__parse_elf_split; btf__parse_raw_split; btf__parse_split; btf__new_empty_split; ring_buffer__epoll_fd; } LIBBPF_0.2.0; LIBBPF_0.4.0 { global: btf__add_float; btf__add_type; bpf_linker__add_file; bpf_linker__finalize; bpf_linker__free; bpf_linker__new; bpf_map__inner_map; bpf_object__set_kversion; bpf_tc_attach; bpf_tc_detach; bpf_tc_hook_create; bpf_tc_hook_destroy; bpf_tc_query; } LIBBPF_0.3.0; LIBBPF_0.5.0 { global: bpf_map__initial_value; bpf_map__pin_path; bpf_map_lookup_and_delete_elem_flags; bpf_program__attach_kprobe_opts; bpf_program__attach_perf_event_opts; bpf_program__attach_tracepoint_opts; bpf_program__attach_uprobe_opts; bpf_object__gen_loader; btf__load_from_kernel_by_id; btf__load_from_kernel_by_id_split; btf__load_into_kernel; btf__load_module_btf; btf__load_vmlinux_btf; btf_dump__dump_type_data; libbpf_set_strict_mode; } LIBBPF_0.4.0; LIBBPF_0.6.0 { global: bpf_map__map_extra; bpf_map__set_map_extra; bpf_map_create; bpf_object__next_map; bpf_object__next_program; bpf_object__prev_map; bpf_object__prev_program; bpf_prog_load; bpf_program__flags; bpf_program__insn_cnt; bpf_program__insns; bpf_program__set_flags; btf__add_btf; btf__add_decl_tag; btf__add_type_tag; btf__dedup; btf__raw_data; btf__type_cnt; btf_dump__new; libbpf_major_version; libbpf_minor_version; libbpf_version_string; perf_buffer__new; perf_buffer__new_raw; } LIBBPF_0.5.0; LIBBPF_0.7.0 { global: bpf_btf_load; bpf_program__expected_attach_type; bpf_program__log_buf; bpf_program__log_level; bpf_program__set_log_buf; bpf_program__set_log_level; bpf_program__type; bpf_xdp_attach; bpf_xdp_detach; bpf_xdp_query; bpf_xdp_query_id; libbpf_probe_bpf_helper; libbpf_probe_bpf_map_type; libbpf_probe_bpf_prog_type; libbpf_set_memlock_rlim; } LIBBPF_0.6.0; LIBBPF_0.8.0 { global: bpf_map__autocreate; bpf_map__get_next_key; bpf_map__delete_elem; bpf_map__lookup_and_delete_elem; bpf_map__lookup_elem; bpf_map__set_autocreate; bpf_map__update_elem; bpf_map_delete_elem_flags; bpf_object__destroy_subskeleton; bpf_object__open_subskeleton; bpf_program__attach_kprobe_multi_opts; bpf_program__attach_trace_opts; bpf_program__attach_usdt; bpf_program__set_insns; libbpf_register_prog_handler; libbpf_unregister_prog_handler; } LIBBPF_0.7.0; LIBBPF_1.0.0 { global: bpf_obj_get_opts; bpf_prog_query_opts; bpf_program__attach_ksyscall; bpf_program__autoattach; bpf_program__set_autoattach; btf__add_enum64; btf__add_enum64_value; libbpf_bpf_attach_type_str; libbpf_bpf_link_type_str; libbpf_bpf_map_type_str; libbpf_bpf_prog_type_str; perf_buffer__buffer; } LIBBPF_0.8.0; LIBBPF_1.1.0 { global: bpf_btf_get_fd_by_id_opts; bpf_link_get_fd_by_id_opts; bpf_map_get_fd_by_id_opts; bpf_prog_get_fd_by_id_opts; user_ring_buffer__discard; user_ring_buffer__free; user_ring_buffer__new; user_ring_buffer__reserve; user_ring_buffer__reserve_blocking; user_ring_buffer__submit; } LIBBPF_1.0.0; LIBBPF_1.2.0 { global: bpf_btf_get_info_by_fd; bpf_link__update_map; bpf_link_get_info_by_fd; bpf_map_get_info_by_fd; bpf_prog_get_info_by_fd; } LIBBPF_1.1.0; LIBBPF_1.3.0 { global: bpf_obj_pin_opts; bpf_object__unpin; bpf_prog_detach_opts; bpf_program__attach_netfilter; bpf_program__attach_netkit; bpf_program__attach_tcx; bpf_program__attach_uprobe_multi; ring__avail_data_size; ring__consume; ring__consumer_pos; ring__map_fd; ring__producer_pos; ring__size; ring_buffer__ring; } LIBBPF_1.2.0; LIBBPF_1.4.0 { global: bpf_program__attach_raw_tracepoint_opts; bpf_raw_tracepoint_open_opts; bpf_token_create; btf__new_split; btf_ext__raw_data; } LIBBPF_1.3.0; LIBBPF_1.5.0 { global: btf__distill_base; btf__relocate; btf_ext__endianness; btf_ext__set_endianness; bpf_map__autoattach; bpf_map__set_autoattach; bpf_object__token_fd; bpf_program__attach_sockmap; ring__consume_n; ring_buffer__consume_n; } LIBBPF_1.4.0; xdp-tools-1.5.4/lib/libbpf/docs/0000755000175100001660000000000014706536574015772 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/docs/libbpf_naming_convention.rst0000644000175100001660000001503114706536574023555 0ustar runnerdocker.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) API naming convention ===================== libbpf API provides access to a few logically separated groups of functions and types. Every group has its own naming convention described here. It's recommended to follow these conventions whenever a new function or type is added to keep libbpf API clean and consistent. All types and functions provided by libbpf API should have one of the following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``btf_dump_``, ``ring_buffer_``, ``perf_buffer_``. System call wrappers -------------------- System call wrappers are simple wrappers for commands supported by sys_bpf system call. These wrappers should go to ``bpf.h`` header file and map one to one to corresponding commands. For example ``bpf_map_lookup_elem`` wraps ``BPF_MAP_LOOKUP_ELEM`` command of sys_bpf, ``bpf_prog_attach`` wraps ``BPF_PROG_ATTACH``, etc. Objects ------- Another class of types and functions provided by libbpf API is "objects" and functions to work with them. Objects are high-level abstractions such as BPF program or BPF map. They're represented by corresponding structures such as ``struct bpf_object``, ``struct bpf_program``, ``struct bpf_map``, etc. Structures are forward declared and access to their fields should be provided via corresponding getters and setters rather than directly. These objects are associated with corresponding parts of ELF object that contains compiled BPF programs. For example ``struct bpf_object`` represents ELF object itself created from an ELF file or from a buffer, ``struct bpf_program`` represents a program in ELF object and ``struct bpf_map`` is a map. Functions that work with an object have names built from object name, double underscore and part that describes function purpose. For example ``bpf_object__open`` consists of the name of corresponding object, ``bpf_object``, double underscore and ``open`` that defines the purpose of the function to open ELF file and create ``bpf_object`` from it. All objects and corresponding functions other than BTF related should go to ``libbpf.h``. BTF types and functions should go to ``btf.h``. Auxiliary functions ------------------- Auxiliary functions and types that don't fit well in any of categories described above should have ``libbpf_`` prefix, e.g. ``libbpf_get_error`` or ``libbpf_prog_type_by_name``. ABI --- libbpf can be both linked statically or used as DSO. To avoid possible conflicts with other libraries an application is linked with, all non-static libbpf symbols should have one of the prefixes mentioned in API documentation above. See API naming convention to choose the right name for a new symbol. Symbol visibility ----------------- libbpf follow the model when all global symbols have visibility "hidden" by default and to make a symbol visible it has to be explicitly attributed with ``LIBBPF_API`` macro. For example: .. code-block:: c LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); This prevents from accidentally exporting a symbol, that is not supposed to be a part of ABI what, in turn, improves both libbpf developer- and user-experiences. ABI versioning -------------- To make future ABI extensions possible libbpf ABI is versioned. Versioning is implemented by ``libbpf.map`` version script that is passed to linker. Version name is ``LIBBPF_`` prefix + three-component numeric version, starting from ``0.0.1``. Every time ABI is being changed, e.g. because a new symbol is added or semantic of existing symbol is changed, ABI version should be bumped. This bump in ABI version is at most once per kernel development cycle. For example, if current state of ``libbpf.map`` is: .. code-block:: none LIBBPF_0.0.1 { global: bpf_func_a; bpf_func_b; local: \*; }; , and a new symbol ``bpf_func_c`` is being introduced, then ``libbpf.map`` should be changed like this: .. code-block:: none LIBBPF_0.0.1 { global: bpf_func_a; bpf_func_b; local: \*; }; LIBBPF_0.0.2 { global: bpf_func_c; } LIBBPF_0.0.1; , where new version ``LIBBPF_0.0.2`` depends on the previous ``LIBBPF_0.0.1``. Format of version script and ways to handle ABI changes, including incompatible ones, described in details in [1]. Stand-alone build ------------------- Under https://github.com/libbpf/libbpf there is a (semi-)automated mirror of the mainline's version of libbpf for a stand-alone build. However, all changes to libbpf's code base must be upstreamed through the mainline kernel tree. API documentation convention ============================ The libbpf API is documented via comments above definitions in header files. These comments can be rendered by doxygen and sphinx for well organized html output. This section describes the convention in which these comments should be formatted. Here is an example from btf.h: .. code-block:: c /** * @brief **btf__new()** creates a new instance of a BTF object from the raw * bytes of an ELF's BTF section * @param data raw bytes * @param size number of bytes passed in `data` * @return new BTF object instance which has to be eventually freed with * **btf__free()** * * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract * error code from such a pointer `libbpf_get_error()` should be used. If * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is * returned on error instead. In both cases thread-local `errno` variable is * always set to error code as well. */ The comment must start with a block comment of the form '/\*\*'. The documentation always starts with a @brief directive. This line is a short description about this API. It starts with the name of the API, denoted in bold like so: **api_name**. Please include an open and close parenthesis if this is a function. Follow with the short description of the API. A longer form description can be added below the last directive, at the bottom of the comment. Parameters are denoted with the @param directive, there should be one for each parameter. If this is a function with a non-void return, use the @return directive to document it. License ------------------- libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause. Links ------------------- [1] https://www.akkadia.org/drepper/dsohowto.pdf (Chapter 3. Maintaining APIs and ABIs). xdp-tools-1.5.4/lib/libbpf/docs/libbpf_overview.rst0000644000175100001660000002607114706536574021716 0ustar runnerdocker.. SPDX-License-Identifier: GPL-2.0 =============== libbpf Overview =============== libbpf is a C-based library containing a BPF loader that takes compiled BPF object files and prepares and loads them into the Linux kernel. libbpf takes the heavy lifting of loading, verifying, and attaching BPF programs to various kernel hooks, allowing BPF application developers to focus only on BPF program correctness and performance. The following are the high-level features supported by libbpf: * Provides high-level and low-level APIs for user space programs to interact with BPF programs. The low-level APIs wrap all the bpf system call functionality, which is useful when users need more fine-grained control over the interactions between user space and BPF programs. * Provides overall support for the BPF object skeleton generated by bpftool. The skeleton file simplifies the process for the user space programs to access global variables and work with BPF programs. * Provides BPF-side APIS, including BPF helper definitions, BPF maps support, and tracing helpers, allowing developers to simplify BPF code writing. * Supports BPF CO-RE mechanism, enabling BPF developers to write portable BPF programs that can be compiled once and run across different kernel versions. This document will delve into the above concepts in detail, providing a deeper understanding of the capabilities and advantages of libbpf and how it can help you develop BPF applications efficiently. BPF App Lifecycle and libbpf APIs ================================== A BPF application consists of one or more BPF programs (either cooperating or completely independent), BPF maps, and global variables. The global variables are shared between all BPF programs, which allows them to cooperate on a common set of data. libbpf provides APIs that user space programs can use to manipulate the BPF programs by triggering different phases of a BPF application lifecycle. The following section provides a brief overview of each phase in the BPF life cycle: * **Open phase**: In this phase, libbpf parses the BPF object file and discovers BPF maps, BPF programs, and global variables. After a BPF app is opened, user space apps can make additional adjustments (setting BPF program types, if necessary; pre-setting initial values for global variables, etc.) before all the entities are created and loaded. * **Load phase**: In the load phase, libbpf creates BPF maps, resolves various relocations, and verifies and loads BPF programs into the kernel. At this point, libbpf validates all the parts of a BPF application and loads the BPF program into the kernel, but no BPF program has yet been executed. After the load phase, it’s possible to set up the initial BPF map state without racing with the BPF program code execution. * **Attachment phase**: In this phase, libbpf attaches BPF programs to various BPF hook points (e.g., tracepoints, kprobes, cgroup hooks, network packet processing pipeline, etc.). During this phase, BPF programs perform useful work such as processing packets, or updating BPF maps and global variables that can be read from user space. * **Tear down phase**: In the tear down phase, libbpf detaches BPF programs and unloads them from the kernel. BPF maps are destroyed, and all the resources used by the BPF app are freed. BPF Object Skeleton File ======================== BPF skeleton is an alternative interface to libbpf APIs for working with BPF objects. Skeleton code abstract away generic libbpf APIs to significantly simplify code for manipulating BPF programs from user space. Skeleton code includes a bytecode representation of the BPF object file, simplifying the process of distributing your BPF code. With BPF bytecode embedded, there are no extra files to deploy along with your application binary. You can generate the skeleton header file ``(.skel.h)`` for a specific object file by passing the BPF object to the bpftool. The generated BPF skeleton provides the following custom functions that correspond to the BPF lifecycle, each of them prefixed with the specific object name: * ``__open()`` – creates and opens BPF application (```` stands for the specific bpf object name) * ``__load()`` – instantiates, loads,and verifies BPF application parts * ``__attach()`` – attaches all auto-attachable BPF programs (it’s optional, you can have more control by using libbpf APIs directly) * ``__destroy()`` – detaches all BPF programs and frees up all used resources Using the skeleton code is the recommended way to work with bpf programs. Keep in mind, BPF skeleton provides access to the underlying BPF object, so whatever was possible to do with generic libbpf APIs is still possible even when the BPF skeleton is used. It's an additive convenience feature, with no syscalls, and no cumbersome code. Other Advantages of Using Skeleton File --------------------------------------- * BPF skeleton provides an interface for user space programs to work with BPF global variables. The skeleton code memory maps global variables as a struct into user space. The struct interface allows user space programs to initialize BPF programs before the BPF load phase and fetch and update data from user space afterward. * The ``skel.h`` file reflects the object file structure by listing out the available maps, programs, etc. BPF skeleton provides direct access to all the BPF maps and BPF programs as struct fields. This eliminates the need for string-based lookups with ``bpf_object_find_map_by_name()`` and ``bpf_object_find_program_by_name()`` APIs, reducing errors due to BPF source code and user-space code getting out of sync. * The embedded bytecode representation of the object file ensures that the skeleton and the BPF object file are always in sync. BPF Helpers =========== libbpf provides BPF-side APIs that BPF programs can use to interact with the system. The BPF helpers definition allows developers to use them in BPF code as any other plain C function. For example, there are helper functions to print debugging messages, get the time since the system was booted, interact with BPF maps, manipulate network packets, etc. For a complete description of what the helpers do, the arguments they take, and the return value, see the `bpf-helpers `_ man page. BPF CO-RE (Compile Once – Run Everywhere) ========================================= BPF programs work in the kernel space and have access to kernel memory and data structures. One limitation that BPF applications come across is the lack of portability across different kernel versions and configurations. `BCC `_ is one of the solutions for BPF portability. However, it comes with runtime overhead and a large binary size from embedding the compiler with the application. libbpf steps up the BPF program portability by supporting the BPF CO-RE concept. BPF CO-RE brings together BTF type information, libbpf, and the compiler to produce a single executable binary that you can run on multiple kernel versions and configurations. To make BPF programs portable libbpf relies on the BTF type information of the running kernel. Kernel also exposes this self-describing authoritative BTF information through ``sysfs`` at ``/sys/kernel/btf/vmlinux``. You can generate the BTF information for the running kernel with the following command: :: $ bpftool btf dump file /sys/kernel/btf/vmlinux format c > vmlinux.h The command generates a ``vmlinux.h`` header file with all kernel types (:doc:`BTF types <../btf>`) that the running kernel uses. Including ``vmlinux.h`` in your BPF program eliminates dependency on system-wide kernel headers. libbpf enables portability of BPF programs by looking at the BPF program’s recorded BTF type and relocation information and matching them to BTF information (vmlinux) provided by the running kernel. libbpf then resolves and matches all the types and fields, and updates necessary offsets and other relocatable data to ensure that BPF program’s logic functions correctly for a specific kernel on the host. BPF CO-RE concept thus eliminates overhead associated with BPF development and allows developers to write portable BPF applications without modifications and runtime source code compilation on the target machine. The following code snippet shows how to read the parent field of a kernel ``task_struct`` using BPF CO-RE and libbf. The basic helper to read a field in a CO-RE relocatable manner is ``bpf_core_read(dst, sz, src)``, which will read ``sz`` bytes from the field referenced by ``src`` into the memory pointed to by ``dst``. .. code-block:: C :emphasize-lines: 6 //... struct task_struct *task = (void *)bpf_get_current_task(); struct task_struct *parent_task; int err; err = bpf_core_read(&parent_task, sizeof(void *), &task->parent); if (err) { /* handle error */ } /* parent_task contains the value of task->parent pointer */ In the code snippet, we first get a pointer to the current ``task_struct`` using ``bpf_get_current_task()``. We then use ``bpf_core_read()`` to read the parent field of task struct into the ``parent_task`` variable. ``bpf_core_read()`` is just like ``bpf_probe_read_kernel()`` BPF helper, except it records information about the field that should be relocated on the target kernel. i.e, if the ``parent`` field gets shifted to a different offset within ``struct task_struct`` due to some new field added in front of it, libbpf will automatically adjust the actual offset to the proper value. Getting Started with libbpf =========================== Check out the `libbpf-bootstrap `_ repository with simple examples of using libbpf to build various BPF applications. See also `libbpf API documentation `_. libbpf and Rust =============== If you are building BPF applications in Rust, it is recommended to use the `Libbpf-rs `_ library instead of bindgen bindings directly to libbpf. Libbpf-rs wraps libbpf functionality in Rust-idiomatic interfaces and provides libbpf-cargo plugin to handle BPF code compilation and skeleton generation. Using Libbpf-rs will make building user space part of the BPF application easier. Note that the BPF program themselves must still be written in plain C. libbpf logging ============== By default, libbpf logs informational and warning messages to stderr. The verbosity of these messages can be controlled by setting the environment variable LIBBPF_LOG_LEVEL to either warn, info, or debug. A custom log callback can be set using ``libbpf_set_print()``. Additional Documentation ======================== * `Program types and ELF Sections `_ * `API naming convention `_ * `Building libbpf `_ * `API documentation Convention `_ xdp-tools-1.5.4/lib/libbpf/docs/conf.py0000644000175100001660000000225114706536574017271 0ustar runnerdocker#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html import os import subprocess project = "libbpf" extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.imgmath', 'sphinx.ext.todo', 'sphinx_rtd_theme', 'breathe', ] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True' if read_the_docs_build: subprocess.call('cd sphinx ; make clean', shell=True) subprocess.call('cd sphinx/doxygen ; doxygen', shell=True) html_theme = 'sphinx_rtd_theme' breathe_projects = { "libbpf": "./sphinx/doxygen/build/xml/" } breathe_default_project = "libbpf" breathe_show_define_initializer = True breathe_show_enumvalue_initializer = True xdp-tools-1.5.4/lib/libbpf/docs/index.rst0000644000175100001660000000216614706536574017640 0ustar runnerdocker.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) .. _libbpf: ====== libbpf ====== If you are looking to develop BPF applications using the libbpf library, this directory contains important documentation that you should read. To get started, it is recommended to begin with the :doc:`libbpf Overview ` document, which provides a high-level understanding of the libbpf APIs and their usage. This will give you a solid foundation to start exploring and utilizing the various features of libbpf to develop your BPF applications. .. toctree:: :maxdepth: 1 libbpf_overview API Documentation program_types libbpf_naming_convention libbpf_build All general BPF questions, including kernel functionality, libbpf APIs and their application, should be sent to bpf@vger.kernel.org mailing list. You can `subscribe `_ to the mailing list search its `archive `_. Please search the archive before asking new questions. It may be that this was already addressed or answered before. xdp-tools-1.5.4/lib/libbpf/docs/api.rst0000644000175100001660000000374714706536574017310 0ustar runnerdocker.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) .. _api: .. toctree:: Table of Contents LIBBPF API ========== Error Handling -------------- When libbpf is used in "libbpf 1.0 mode", API functions can return errors in one of two ways. You can set "libbpf 1.0" mode with the following line: .. code-block:: libbpf_set_strict_mode(LIBBPF_STRICT_DIRECT_ERRS | LIBBPF_STRICT_CLEAN_PTRS); If the function returns an error code directly, it uses 0 to indicate success and a negative error code to indicate what caused the error. In this case the error code should be checked directly from the return, you do not need to check errno. For example: .. code-block:: err = some_libbpf_api_with_error_return(...); if (err < 0) { /* Handle error accordingly */ } If the function returns a pointer, it will return NULL to indicate there was an error. In this case errno should be checked for the error code. For example: .. code-block:: ptr = some_libbpf_api_returning_ptr(); if (!ptr) { /* note no minus sign for EINVAL and E2BIG below */ if (errno == EINVAL) { /* handle EINVAL error */ } else if (errno == E2BIG) { /* handle E2BIG error */ } } libbpf.h -------- .. doxygenfile:: libbpf.h :project: libbpf :sections: func define public-type enum bpf.h ----- .. doxygenfile:: bpf.h :project: libbpf :sections: func define public-type enum btf.h ----- .. doxygenfile:: btf.h :project: libbpf :sections: func define public-type enum xsk.h ----- .. doxygenfile:: xsk.h :project: libbpf :sections: func define public-type enum bpf_tracing.h ------------- .. doxygenfile:: bpf_tracing.h :project: libbpf :sections: func define public-type enum bpf_core_read.h --------------- .. doxygenfile:: bpf_core_read.h :project: libbpf :sections: func define public-type enum bpf_endian.h ------------ .. doxygenfile:: bpf_endian.h :project: libbpf :sections: func define public-type enum xdp-tools-1.5.4/lib/libbpf/docs/sphinx/0000755000175100001660000000000014706536574017303 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/docs/sphinx/doxygen/0000755000175100001660000000000014706536574020760 5ustar runnerdockerxdp-tools-1.5.4/lib/libbpf/docs/sphinx/doxygen/Doxyfile0000644000175100001660000001727214706536574022477 0ustar runnerdockerDOXYFILE_ENCODING = UTF-8 PROJECT_NAME = "libbpf" PROJECT_NUMBER = PROJECT_BRIEF = PROJECT_LOGO = OUTPUT_DIRECTORY = ./build CREATE_SUBDIRS = NO ALLOW_UNICODE_NAMES = NO OUTPUT_LANGUAGE = English OUTPUT_TEXT_DIRECTION = None BRIEF_MEMBER_DESC = YES REPEAT_BRIEF = YES ALWAYS_DETAILED_SEC = NO INLINE_INHERITED_MEMB = NO FULL_PATH_NAMES = YES STRIP_FROM_PATH = STRIP_FROM_INC_PATH = SHORT_NAMES = NO JAVADOC_AUTOBRIEF = NO JAVADOC_BANNER = NO QT_AUTOBRIEF = NO MULTILINE_CPP_IS_BRIEF = NO PYTHON_DOCSTRING = NO INHERIT_DOCS = YES SEPARATE_MEMBER_PAGES = NO TAB_SIZE = 4 ALIASES = OPTIMIZE_OUTPUT_FOR_C = YES OPTIMIZE_OUTPUT_JAVA = NO OPTIMIZE_FOR_FORTRAN = NO OPTIMIZE_OUTPUT_VHDL = NO OPTIMIZE_OUTPUT_SLICE = NO EXTENSION_MAPPING = MARKDOWN_SUPPORT = YES TOC_INCLUDE_HEADINGS = 5 AUTOLINK_SUPPORT = YES BUILTIN_STL_SUPPORT = NO CPP_CLI_SUPPORT = NO SIP_SUPPORT = NO IDL_PROPERTY_SUPPORT = YES DISTRIBUTE_GROUP_DOC = NO GROUP_NESTED_COMPOUNDS = NO SUBGROUPING = YES INLINE_GROUPED_CLASSES = NO INLINE_SIMPLE_STRUCTS = NO TYPEDEF_HIDES_STRUCT = NO LOOKUP_CACHE_SIZE = 0 NUM_PROC_THREADS = 1 EXTRACT_ALL = NO EXTRACT_PRIVATE = NO EXTRACT_PRIV_VIRTUAL = NO EXTRACT_PACKAGE = NO EXTRACT_STATIC = NO EXTRACT_LOCAL_CLASSES = YES EXTRACT_LOCAL_METHODS = NO EXTRACT_ANON_NSPACES = NO RESOLVE_UNNAMED_PARAMS = YES HIDE_UNDOC_MEMBERS = NO HIDE_UNDOC_CLASSES = NO HIDE_FRIEND_COMPOUNDS = NO HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO CASE_SENSE_NAMES = YES HIDE_SCOPE_NAMES = NO HIDE_COMPOUND_REFERENCE= NO SHOW_INCLUDE_FILES = YES SHOW_GROUPED_MEMB_INC = NO FORCE_LOCAL_INCLUDES = NO INLINE_INFO = YES SORT_MEMBER_DOCS = YES SORT_BRIEF_DOCS = NO SORT_MEMBERS_CTORS_1ST = NO SORT_GROUP_NAMES = NO SORT_BY_SCOPE_NAME = NO STRICT_PROTO_MATCHING = NO GENERATE_TODOLIST = YES GENERATE_TESTLIST = YES GENERATE_BUGLIST = YES GENERATE_DEPRECATEDLIST= YES ENABLED_SECTIONS = MAX_INITIALIZER_LINES = 30 SHOW_USED_FILES = YES SHOW_FILES = YES SHOW_NAMESPACES = YES FILE_VERSION_FILTER = LAYOUT_FILE = CITE_BIB_FILES = QUIET = NO WARNINGS = YES WARN_IF_UNDOCUMENTED = YES WARN_IF_DOC_ERROR = YES WARN_NO_PARAMDOC = NO WARN_AS_ERROR = NO WARN_FORMAT = "$file:$line: $text" WARN_LOGFILE = INPUT = ../../../src INPUT_ENCODING = UTF-8 FILE_PATTERNS = *.c \ *.h RECURSIVE = NO EXCLUDE = EXCLUDE_SYMLINKS = NO EXCLUDE_PATTERNS = EXCLUDE_SYMBOLS = ___* EXAMPLE_PATH = EXAMPLE_PATTERNS = * EXAMPLE_RECURSIVE = NO IMAGE_PATH = INPUT_FILTER = FILTER_PATTERNS = FILTER_SOURCE_FILES = NO FILTER_SOURCE_PATTERNS = USE_MDFILE_AS_MAINPAGE = YES SOURCE_BROWSER = NO INLINE_SOURCES = NO STRIP_CODE_COMMENTS = YES REFERENCED_BY_RELATION = NO REFERENCES_RELATION = NO REFERENCES_LINK_SOURCE = YES SOURCE_TOOLTIPS = YES USE_HTAGS = NO VERBATIM_HEADERS = YES ALPHABETICAL_INDEX = YES IGNORE_PREFIX = GENERATE_HTML = NO HTML_OUTPUT = html HTML_FILE_EXTENSION = .html HTML_HEADER = HTML_FOOTER = HTML_STYLESHEET = HTML_EXTRA_STYLESHEET = HTML_EXTRA_FILES = HTML_COLORSTYLE_HUE = 220 HTML_COLORSTYLE_SAT = 100 HTML_COLORSTYLE_GAMMA = 80 HTML_TIMESTAMP = NO HTML_DYNAMIC_MENUS = YES HTML_DYNAMIC_SECTIONS = NO HTML_INDEX_NUM_ENTRIES = 100 GENERATE_DOCSET = NO DOCSET_FEEDNAME = "Doxygen generated docs" DOCSET_BUNDLE_ID = org.doxygen.Project DOCSET_PUBLISHER_ID = org.doxygen.Publisher DOCSET_PUBLISHER_NAME = Publisher GENERATE_HTMLHELP = NO CHM_FILE = HHC_LOCATION = GENERATE_CHI = NO CHM_INDEX_ENCODING = BINARY_TOC = NO TOC_EXPAND = NO GENERATE_QHP = NO QCH_FILE = QHP_NAMESPACE = org.doxygen.Project QHP_VIRTUAL_FOLDER = doc QHP_CUST_FILTER_NAME = QHP_CUST_FILTER_ATTRS = QHP_SECT_FILTER_ATTRS = QHG_LOCATION = GENERATE_ECLIPSEHELP = NO ECLIPSE_DOC_ID = org.doxygen.Project DISABLE_INDEX = NO GENERATE_TREEVIEW = NO ENUM_VALUES_PER_LINE = 4 TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO HTML_FORMULA_FORMAT = png FORMULA_FONTSIZE = 10 FORMULA_TRANSPARENT = YES FORMULA_MACROFILE = USE_MATHJAX = NO MATHJAX_FORMAT = HTML-CSS MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2 MATHJAX_EXTENSIONS = MATHJAX_CODEFILE = SEARCHENGINE = YES SERVER_BASED_SEARCH = NO EXTERNAL_SEARCH = NO SEARCHENGINE_URL = SEARCHDATA_FILE = searchdata.xml EXTERNAL_SEARCH_ID = EXTRA_SEARCH_MAPPINGS = GENERATE_LATEX = NO LATEX_OUTPUT = latex LATEX_CMD_NAME = MAKEINDEX_CMD_NAME = makeindex LATEX_MAKEINDEX_CMD = makeindex COMPACT_LATEX = NO PAPER_TYPE = a4 EXTRA_PACKAGES = LATEX_HEADER = LATEX_FOOTER = LATEX_EXTRA_STYLESHEET = LATEX_EXTRA_FILES = PDF_HYPERLINKS = YES USE_PDFLATEX = YES LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO LATEX_SOURCE_CODE = NO LATEX_BIB_STYLE = plain LATEX_TIMESTAMP = NO LATEX_EMOJI_DIRECTORY = GENERATE_RTF = NO RTF_OUTPUT = rtf COMPACT_RTF = NO RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = RTF_SOURCE_CODE = NO GENERATE_MAN = NO MAN_OUTPUT = man MAN_EXTENSION = .3 MAN_SUBDIR = MAN_LINKS = NO GENERATE_XML = YES XML_OUTPUT = xml XML_PROGRAMLISTING = YES XML_NS_MEMB_FILE_SCOPE = NO GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook DOCBOOK_PROGRAMLISTING = NO GENERATE_AUTOGEN_DEF = NO GENERATE_PERLMOD = NO PERLMOD_LATEX = NO PERLMOD_PRETTY = YES PERLMOD_MAKEVAR_PREFIX = ENABLE_PREPROCESSING = YES MACRO_EXPANSION = NO EXPAND_ONLY_PREDEF = YES SEARCH_INCLUDES = YES INCLUDE_PATH = INCLUDE_FILE_PATTERNS = PREDEFINED = EXPAND_AS_DEFINED = SKIP_FUNCTION_MACROS = NO TAGFILES = GENERATE_TAGFILE = ALLEXTERNALS = NO EXTERNAL_GROUPS = YES EXTERNAL_PAGES = YES CLASS_DIAGRAMS = YES DIA_PATH = HIDE_UNDOC_RELATIONS = YES HAVE_DOT = NO DOT_NUM_THREADS = 0 DOT_FONTNAME = Helvetica DOT_FONTSIZE = 10 DOT_FONTPATH = CLASS_GRAPH = YES COLLABORATION_GRAPH = YES GROUP_GRAPHS = YES UML_LOOK = NO UML_LIMIT_NUM_FIELDS = 10 DOT_UML_DETAILS = NO DOT_WRAP_THRESHOLD = 17 TEMPLATE_RELATIONS = NO INCLUDE_GRAPH = YES INCLUDED_BY_GRAPH = YES CALL_GRAPH = NO CALLER_GRAPH = NO GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES DOT_IMAGE_FORMAT = png INTERACTIVE_SVG = NO DOT_PATH = DOTFILE_DIRS = MSCFILE_DIRS = DIAFILE_DIRS = PLANTUML_JAR_PATH = PLANTUML_CFG_FILE = PLANTUML_INCLUDE_PATH = DOT_GRAPH_MAX_NODES = 50 MAX_DOT_GRAPH_DEPTH = 0 DOT_TRANSPARENT = NO DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES DOT_CLEANUP = YES xdp-tools-1.5.4/lib/libbpf/docs/sphinx/requirements.txt0000644000175100001660000000003114706536574022561 0ustar runnerdockerbreathe sphinx_rtd_theme xdp-tools-1.5.4/lib/libbpf/docs/sphinx/Makefile0000644000175100001660000000030014706536574020734 0ustar runnerdockerSPHINXBUILD ?= sphinx-build SOURCEDIR = ../src BUILDDIR = build help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" %: @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" xdp-tools-1.5.4/lib/libbpf/docs/libbpf_build.rst0000644000175100001660000000214414706536574021142 0ustar runnerdocker.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) Building libbpf =============== libelf and zlib are internal dependencies of libbpf and thus are required to link against and must be installed on the system for applications to work. pkg-config is used by default to find libelf, and the program called can be overridden with PKG_CONFIG. If using pkg-config at build time is not desired, it can be disabled by setting NO_PKG_CONFIG=1 when calling make. To build both static libbpf.a and shared libbpf.so: .. code-block:: bash $ cd src $ make To build only static libbpf.a library in directory build/ and install them together with libbpf headers in a staging directory root/: .. code-block:: bash $ cd src $ mkdir build root $ BUILD_STATIC_ONLY=y OBJDIR=build DESTDIR=root make install To build both static libbpf.a and shared libbpf.so against a custom libelf dependency installed in /build/root/ and install them together with libbpf headers in a build directory /build/root/: .. code-block:: bash $ cd src $ PKG_CONFIG_PATH=/build/root/lib64/pkgconfig DESTDIR=/build/root makexdp-tools-1.5.4/lib/libbpf/docs/.gitignore0000644000175100001660000000004114706536574017755 0ustar runnerdockersphinx/build sphinx/doxygen/buildxdp-tools-1.5.4/lib/libbpf/docs/program_types.rst0000644000175100001660000006650514706536574021433 0ustar runnerdocker.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) .. _program_types_and_elf: Program Types and ELF Sections ============================== The table below lists the program types, their attach types where relevant and the ELF section names supported by libbpf for them. The ELF section names follow these rules: - ``type`` is an exact match, e.g. ``SEC("socket")`` - ``type+`` means it can be either exact ``SEC("type")`` or well-formed ``SEC("type/extras")`` with a '``/``' separator between ``type`` and ``extras``. When ``extras`` are specified, they provide details of how to auto-attach the BPF program. The format of ``extras`` depends on the program type, e.g. ``SEC("tracepoint//")`` for tracepoints or ``SEC("usdt/::")`` for USDT probes. The extras are described in more detail in the footnotes. +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | Program Type | Attach Type | ELF Section Name | Sleepable | +===========================================+========================================+==================================+===========+ | ``BPF_PROG_TYPE_CGROUP_DEVICE`` | ``BPF_CGROUP_DEVICE`` | ``cgroup/dev`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_CGROUP_SKB`` | | ``cgroup/skb`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET_EGRESS`` | ``cgroup_skb/egress`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET_INGRESS`` | ``cgroup_skb/ingress`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_CGROUP_SOCKOPT`` | ``BPF_CGROUP_GETSOCKOPT`` | ``cgroup/getsockopt`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_SETSOCKOPT`` | ``cgroup/setsockopt`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_CGROUP_SOCK_ADDR`` | ``BPF_CGROUP_INET4_BIND`` | ``cgroup/bind4`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET4_CONNECT`` | ``cgroup/connect4`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET4_GETPEERNAME`` | ``cgroup/getpeername4`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET4_GETSOCKNAME`` | ``cgroup/getsockname4`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET6_BIND`` | ``cgroup/bind6`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET6_CONNECT`` | ``cgroup/connect6`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET6_GETPEERNAME`` | ``cgroup/getpeername6`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET6_GETSOCKNAME`` | ``cgroup/getsockname6`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_UDP4_RECVMSG`` | ``cgroup/recvmsg4`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_UDP4_SENDMSG`` | ``cgroup/sendmsg4`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_UDP6_RECVMSG`` | ``cgroup/recvmsg6`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_UDP6_SENDMSG`` | ``cgroup/sendmsg6`` | | | +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_UNIX_CONNECT`` | ``cgroup/connect_unix`` | | | +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_UNIX_SENDMSG`` | ``cgroup/sendmsg_unix`` | | | +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_UNIX_RECVMSG`` | ``cgroup/recvmsg_unix`` | | | +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_UNIX_GETPEERNAME`` | ``cgroup/getpeername_unix`` | | | +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_UNIX_GETSOCKNAME`` | ``cgroup/getsockname_unix`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_CGROUP_SOCK`` | ``BPF_CGROUP_INET4_POST_BIND`` | ``cgroup/post_bind4`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET6_POST_BIND`` | ``cgroup/post_bind6`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET_SOCK_CREATE`` | ``cgroup/sock_create`` | | + + +----------------------------------+-----------+ | | | ``cgroup/sock`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_CGROUP_INET_SOCK_RELEASE`` | ``cgroup/sock_release`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_CGROUP_SYSCTL`` | ``BPF_CGROUP_SYSCTL`` | ``cgroup/sysctl`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_EXT`` | | ``freplace+`` [#fentry]_ | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_FLOW_DISSECTOR`` | ``BPF_FLOW_DISSECTOR`` | ``flow_dissector`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_KPROBE`` | | ``kprobe+`` [#kprobe]_ | | + + +----------------------------------+-----------+ | | | ``kretprobe+`` [#kprobe]_ | | + + +----------------------------------+-----------+ | | | ``ksyscall+`` [#ksyscall]_ | | + + +----------------------------------+-----------+ | | | ``kretsyscall+`` [#ksyscall]_ | | + + +----------------------------------+-----------+ | | | ``uprobe+`` [#uprobe]_ | | + + +----------------------------------+-----------+ | | | ``uprobe.s+`` [#uprobe]_ | Yes | + + +----------------------------------+-----------+ | | | ``uretprobe+`` [#uprobe]_ | | + + +----------------------------------+-----------+ | | | ``uretprobe.s+`` [#uprobe]_ | Yes | + + +----------------------------------+-----------+ | | | ``usdt+`` [#usdt]_ | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_TRACE_KPROBE_MULTI`` | ``kprobe.multi+`` [#kpmulti]_ | | + + +----------------------------------+-----------+ | | | ``kretprobe.multi+`` [#kpmulti]_ | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_LIRC_MODE2`` | ``BPF_LIRC_MODE2`` | ``lirc_mode2`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_LSM`` | ``BPF_LSM_CGROUP`` | ``lsm_cgroup+`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_LSM_MAC`` | ``lsm+`` [#lsm]_ | | + + +----------------------------------+-----------+ | | | ``lsm.s+`` [#lsm]_ | Yes | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_LWT_IN`` | | ``lwt_in`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_LWT_OUT`` | | ``lwt_out`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_LWT_SEG6LOCAL`` | | ``lwt_seg6local`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_LWT_XMIT`` | | ``lwt_xmit`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_NETFILTER`` | | ``netfilter`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_PERF_EVENT`` | | ``perf_event`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE`` | | ``raw_tp.w+`` [#rawtp]_ | | + + +----------------------------------+-----------+ | | | ``raw_tracepoint.w+`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_RAW_TRACEPOINT`` | | ``raw_tp+`` [#rawtp]_ | | + + +----------------------------------+-----------+ | | | ``raw_tracepoint+`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` [#tc_legacy]_ | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` [#tc_legacy]_ | | + + +----------------------------------+-----------+ | | | ``tc`` [#tc_legacy]_ | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_NETKIT_PRIMARY`` | ``netkit/primary`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_NETKIT_PEER`` | ``netkit/peer`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_TCX_INGRESS`` | ``tc/ingress`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_TCX_EGRESS`` | ``tc/egress`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_TCX_INGRESS`` | ``tcx/ingress`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_TCX_EGRESS`` | ``tcx/egress`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SK_LOOKUP`` | ``BPF_SK_LOOKUP`` | ``sk_lookup`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SK_MSG`` | ``BPF_SK_MSG_VERDICT`` | ``sk_msg`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SK_REUSEPORT`` | ``BPF_SK_REUSEPORT_SELECT_OR_MIGRATE`` | ``sk_reuseport/migrate`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_SK_REUSEPORT_SELECT`` | ``sk_reuseport`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SK_SKB`` | | ``sk_skb`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_SK_SKB_STREAM_PARSER`` | ``sk_skb/stream_parser`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_SK_SKB_STREAM_VERDICT`` | ``sk_skb/stream_verdict`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SOCKET_FILTER`` | | ``socket`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SOCK_OPS`` | ``BPF_CGROUP_SOCK_OPS`` | ``sockops`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` [#struct_ops]_ | | + + +----------------------------------+-----------+ | | | ``struct_ops.s+`` [#struct_ops]_ | Yes | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SYSCALL`` | | ``syscall`` | Yes | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_TRACEPOINT`` | | ``tp+`` [#tp]_ | | + + +----------------------------------+-----------+ | | | ``tracepoint+`` [#tp]_ | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_TRACING`` | ``BPF_MODIFY_RETURN`` | ``fmod_ret+`` [#fentry]_ | | + + +----------------------------------+-----------+ | | | ``fmod_ret.s+`` [#fentry]_ | Yes | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_TRACE_FENTRY`` | ``fentry+`` [#fentry]_ | | + + +----------------------------------+-----------+ | | | ``fentry.s+`` [#fentry]_ | Yes | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_TRACE_FEXIT`` | ``fexit+`` [#fentry]_ | | + + +----------------------------------+-----------+ | | | ``fexit.s+`` [#fentry]_ | Yes | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_TRACE_ITER`` | ``iter+`` [#iter]_ | | + + +----------------------------------+-----------+ | | | ``iter.s+`` [#iter]_ | Yes | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_TRACE_RAW_TP`` | ``tp_btf+`` [#fentry]_ | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_XDP`` | ``BPF_XDP_CPUMAP`` | ``xdp.frags/cpumap`` | | + + +----------------------------------+-----------+ | | | ``xdp/cpumap`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_XDP_DEVMAP`` | ``xdp.frags/devmap`` | | + + +----------------------------------+-----------+ | | | ``xdp/devmap`` | | + +----------------------------------------+----------------------------------+-----------+ | | ``BPF_XDP`` | ``xdp.frags`` | | + + +----------------------------------+-----------+ | | | ``xdp`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ .. rubric:: Footnotes .. [#fentry] The ``fentry`` attach format is ``fentry[.s]/``. .. [#kprobe] The ``kprobe`` attach format is ``kprobe/[+]``. Valid characters for ``function`` are ``a-zA-Z0-9_.`` and ``offset`` must be a valid non-negative integer. .. [#ksyscall] The ``ksyscall`` attach format is ``ksyscall/``. .. [#uprobe] The ``uprobe`` attach format is ``uprobe[.s]/:[+]``. .. [#usdt] The ``usdt`` attach format is ``usdt/::``. .. [#kpmulti] The ``kprobe.multi`` attach format is ``kprobe.multi/`` where ``pattern`` supports ``*`` and ``?`` wildcards. Valid characters for pattern are ``a-zA-Z0-9_.*?``. .. [#lsm] The ``lsm`` attachment format is ``lsm[.s]/``. .. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/``. .. [#tc_legacy] The ``tc``, ``classifier`` and ``action`` attach types are deprecated, use ``tcx/*`` instead. .. [#struct_ops] The ``struct_ops`` attach format supports ``struct_ops[.s]/`` convention, but ``name`` is ignored and it is recommended to just use plain ``SEC("struct_ops[.s]")``. The attachments are defined in a struct initializer that is tagged with ``SEC(".struct_ops[.link]")``. .. [#tp] The ``tracepoint`` attach format is ``tracepoint//``. .. [#iter] The ``iter`` attach format is ``iter[.s]/``. xdp-tools-1.5.4/lib/libbpf/.readthedocs.yaml0000644000175100001660000000100314706536574020263 0ustar runnerdocker# .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 build: os: "ubuntu-22.04" tools: python: "3.11" # Build documentation in the docs/ directory with Sphinx sphinx: builder: html configuration: docs/conf.py formats: - htmlzip - pdf - epub # Optionally set the version of Python and requirements required to build your docs python: install: - requirements: docs/sphinx/requirements.txt xdp-tools-1.5.4/lib/libbpf/CHECKPOINT-COMMIT0000644000175100001660000000005114706536574017376 0ustar runnerdockerc6fb8030b4baa01c850f99fc6da051b1017edc46 xdp-tools-1.5.4/lib/libbpf/BPF-CHECKPOINT-COMMIT0000644000175100001660000000005114706536574020003 0ustar runnerdockerd5fb316e2af1d947f0f6c3666e373a54d9f27c6f xdp-tools-1.5.4/lib/libbpf/README.md0000644000175100001660000002047614706536574016332 0ustar runnerdocker libbpf [![Github Actions Builds & Tests](https://github.com/libbpf/libbpf/actions/workflows/test.yml/badge.svg)](https://github.com/libbpf/libbpf/actions/workflows/test.yml) [![Coverity](https://img.shields.io/coverity/scan/18195.svg)](https://scan.coverity.com/projects/libbpf) [![CodeQL](https://github.com/libbpf/libbpf/workflows/CodeQL/badge.svg?branch=master)](https://github.com/libbpf/libbpf/actions?query=workflow%3ACodeQL+branch%3Amaster) [![OSS-Fuzz Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/libbpf.svg)](https://oss-fuzz-build-logs.storage.googleapis.com/index.html#libbpf) [![Read the Docs](https://readthedocs.org/projects/libbpf/badge/?version=latest)](https://libbpf.readthedocs.io/en/latest/) ====== **This is the official home of the libbpf library.** *Please use this Github repository for building and packaging libbpf and when using it in your projects through Git submodule.* Libbpf *authoritative source code* is developed as part of [bpf-next Linux source tree](https://kernel.googlesource.com/pub/scm/linux/kernel/git/bpf/bpf-next) under `tools/lib/bpf` subdirectory and is periodically synced to Github. As such, all the libbpf changes should be sent to [BPF mailing list](http://vger.kernel.org/vger-lists.html#bpf), please don't open PRs here unless you are changing Github-specific parts of libbpf (e.g., Github-specific Makefile). Libbpf and general BPF usage questions ====================================== Libbpf documentation can be found [here](https://libbpf.readthedocs.io/en/latest/api.html). It's an ongoing effort and has ways to go, but please take a look and consider contributing as well. Please check out [libbpf-bootstrap](https://github.com/libbpf/libbpf-bootstrap) and [the companion blog post](https://nakryiko.com/posts/libbpf-bootstrap/) for the examples of building BPF applications with libbpf. [libbpf-tools](https://github.com/iovisor/bcc/tree/master/libbpf-tools) are also a good source of the real-world libbpf-based tracing tools. See also ["BPF CO-RE reference guide"](https://nakryiko.com/posts/bpf-core-reference-guide/) for the coverage of practical aspects of building BPF CO-RE applications and ["BPF CO-RE"](https://nakryiko.com/posts/bpf-portability-and-co-re/) for general introduction into BPF portability issues and BPF CO-RE origins. All general BPF questions, including kernel functionality, libbpf APIs and their application, should be sent to bpf@vger.kernel.org mailing list. You can subscribe to it [here](http://vger.kernel.org/vger-lists.html#bpf) and search its archive [here](https://lore.kernel.org/bpf/). Please search the archive before asking new questions. It very well might be that this was already addressed or answered before. bpf@vger.kernel.org is monitored by many more people and they will happily try to help you with whatever issue you have. This repository's PRs and issues should be opened only for dealing with issues pertaining to specific way this libbpf mirror repo is set up and organized. Building libbpf =============== libelf is an internal dependency of libbpf and thus it is required to link against and must be installed on the system for applications to work. pkg-config is used by default to find libelf, and the program called can be overridden with `PKG_CONFIG`. If using `pkg-config` at build time is not desired, it can be disabled by setting `NO_PKG_CONFIG=1` when calling make. To build both static libbpf.a and shared libbpf.so: ```bash $ cd src $ make ``` To build only static libbpf.a library in directory build/ and install them together with libbpf headers in a staging directory root/: ```bash $ cd src $ mkdir build root $ BUILD_STATIC_ONLY=y OBJDIR=build DESTDIR=root make install ``` To build both static libbpf.a and shared libbpf.so against a custom libelf dependency installed in /build/root/ and install them together with libbpf headers in a build directory /build/root/: ```bash $ cd src $ PKG_CONFIG_PATH=/build/root/lib64/pkgconfig DESTDIR=/build/root make install ``` BPF CO-RE (Compile Once – Run Everywhere) ========================================= Libbpf supports building BPF CO-RE-enabled applications, which, in contrast to [BCC](https://github.com/iovisor/bcc/), do not require Clang/LLVM runtime being deployed to target servers and doesn't rely on kernel-devel headers being available. It does rely on kernel to be built with [BTF type information](https://www.kernel.org/doc/html/latest/bpf/btf.html), though. Some major Linux distributions come with kernel BTF already built in: - Fedora 31+ - RHEL 8.2+ - OpenSUSE Tumbleweed (in the next release, as of 2020-06-04) - Arch Linux (from kernel 5.7.1.arch1-1) - Manjaro (from kernel 5.4 if compiled after 2021-06-18) - Ubuntu 20.10 - Debian 11 (amd64/arm64) If your kernel doesn't come with BTF built-in, you'll need to build custom kernel. You'll need: - `pahole` 1.16+ tool (part of `dwarves` package), which performs DWARF to BTF conversion; - kernel built with `CONFIG_DEBUG_INFO_BTF=y` option; - you can check if your kernel has BTF built-in by looking for `/sys/kernel/btf/vmlinux` file: ```shell $ ls -la /sys/kernel/btf/vmlinux -r--r--r--. 1 root root 3541561 Jun 2 18:16 /sys/kernel/btf/vmlinux ``` To develop and build BPF programs, you'll need Clang/LLVM 10+. The following distributions have Clang/LLVM 10+ packaged by default: - Fedora 32+ - Ubuntu 20.04+ - Arch Linux - Ubuntu 20.10 (LLVM 11) - Debian 11 (LLVM 11) - Alpine 3.13+ Otherwise, please make sure to update it on your system. The following resources are useful to understand what BPF CO-RE is and how to use it: - [BPF CO-RE reference guide](https://nakryiko.com/posts/bpf-core-reference-guide/) - [BPF Portability and CO-RE](https://nakryiko.com/posts/bpf-portability-and-co-re/) - [HOWTO: BCC to libbpf conversion](https://nakryiko.com/posts/bcc-to-libbpf-howto-guide/) - [libbpf-tools in BCC repo](https://github.com/iovisor/bcc/tree/master/libbpf-tools) contain lots of real-world tools converted from BCC to BPF CO-RE. Consider converting some more to both contribute to the BPF community and gain some more experience with it. Distributions ============= Distributions packaging libbpf from this mirror: - [Fedora](https://src.fedoraproject.org/rpms/libbpf) - [Gentoo](https://packages.gentoo.org/packages/dev-libs/libbpf) - [Debian](https://packages.debian.org/source/sid/libbpf) - [Arch](https://archlinux.org/packages/core/x86_64/libbpf/) - [Ubuntu](https://packages.ubuntu.com/source/jammy/libbpf) - [Alpine](https://pkgs.alpinelinux.org/packages?name=libbpf) Benefits of packaging from the mirror over packaging from kernel sources: - Consistent versioning across distributions. - No ties to any specific kernel, transparent handling of older kernels. Libbpf is designed to be kernel-agnostic and work across multitude of kernel versions. It has built-in mechanisms to gracefully handle older kernels, that are missing some of the features, by working around or gracefully degrading functionality. Thus libbpf is not tied to a specific kernel version and can/should be packaged and versioned independently. - Continuous integration testing via [GitHub Actions](https://github.com/libbpf/libbpf/actions). - Static code analysis via [LGTM](https://lgtm.com/projects/g/libbpf/libbpf) and [Coverity](https://scan.coverity.com/projects/libbpf). Package dependencies of libbpf, package names may vary across distros: - zlib - libelf [![libbpf distro packaging status](https://repology.org/badge/vertical-allrepos/libbpf.svg)](https://repology.org/project/libbpf/versions) bpf-next to Github sync ======================= All the gory details of syncing can be found in `scripts/sync-kernel.sh` script. See [SYNC.md](SYNC.md) for instruction. Some header files in this repo (`include/linux/*.h`) are reduced versions of their counterpart files at [bpf-next](https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/)'s `tools/include/linux/*.h` to make compilation successful. License ======= This work is dual-licensed under BSD 2-clause license and GNU LGPL v2.1 license. You can choose between one of them if you use this work. `SPDX-License-Identifier: BSD-2-Clause OR LGPL-2.1` xdp-tools-1.5.4/lib/Makefile0000644000175100001660000000222015003640462015217 0ustar runnerdocker LIBBPF_CFLAGS:=$(if $(CFLAGS),$(CFLAGS),-g -O2 -Werror -Wall) -fPIC LIB_DIR = . include defines.mk SUBDIRS=util testing .PHONY: $(SUBDIRS) all: $(SUBDIRS) libxdp util: libxdp @echo; echo " $@"; $(MAKE) -C $@ testing: libxdp util @echo; echo " $@"; $(MAKE) -C $@ .PHONY: libxdp libxdp: $(OBJECT_LIBBPF) @echo; echo " $@"; $(MAKE) -C $@ .PHONY: clean clean: libbpf_clean @for i in $(SUBDIRS) libxdp; \ do $(MAKE) -C $$i clean; done .PHONY: install install: libxdp_install $(MAKE) -C testing install .PHONY: libxdp_install libxdp_install: libxdp install -m 0755 -d $(DESTDIR)$(HDRDIR) $(MAKE) -C libxdp install libbpf: $(OBJECT_LIBBPF) # Handle libbpf as git submodule ifeq ($(SYSTEM_LIBBPF),n) ifeq ($(VERBOSE),0) P:= >/dev/null endif # Detect submodule libbpf source file changes LIBBPF_SOURCES := $(wildcard libbpf/src/*.[ch]) .PHONY: libbpf_clean libbpf/src/libbpf.a: $(LIBBPF_SOURCES) @echo ; echo " libbpf" $(QUIET_CC)$(MAKE) -C libbpf/src CFLAGS="$(LIBBPF_CFLAGS)" $P $(QUIET_INSTALL)$(MAKE) -C libbpf/src install_headers DESTDIR=root PREFIX=/ $P libbpf_clean: $(Q)$(MAKE) -C libbpf/src clean $P else libbpf_clean: @echo -n endif xdp-tools-1.5.4/configure0000755000175100001660000003464415003640462014737 0ustar runnerdocker#!/bin/sh # SPDX-License-Identifier: GPL-2.0 # This is not an autoconf generated configure # # Output file which is input to Makefile CONFIG_FINAL=config.mk CONFIG=".${CONFIG}.tmp" # Make a temp directory in build tree. TMPDIR=$(mktemp -d config.XXXXXX) trap 'status=$?; rm -rf $TMPDIR; rm -f $CONFIG; exit $status' EXIT HUP INT QUIT TERM check_opts() { : ${PRODUCTION:=0} : ${DYNAMIC_LIBXDP:=0} : ${MAX_DISPATCHER_ACTIONS:=10} : ${BPF_TARGET:=bpf} echo "PRODUCTION:=${PRODUCTION}" >>$CONFIG echo "DYNAMIC_LIBXDP:=${DYNAMIC_LIBXDP}" >>$CONFIG echo "MAX_DISPATCHER_ACTIONS:=${MAX_DISPATCHER_ACTIONS}" >>$CONFIG echo "BPF_TARGET:=${BPF_TARGET}" >>$CONFIG } find_tool() { local tool_name local tool_path local v tool_name="$1" tool_path="$2" if [ "$tool_name" != "$tool_path" ] || command -v "$tool_path" >/dev/null 2>&1; then echo $tool_path return 0 fi # we're looking for a binary with the same name as tool_name; try version # suffixes in order until we find one for v in 17 16 15 14 13 12 11; do tool_path="${tool_name}-$v" if command -v "$tool_path" >/dev/null 2>&1; then echo $tool_path return 0 fi done # Fall back to supplied default, check in caller will error out echo $tool_name } check_toolchain() { local emacs_version local clang_version local bpftool_version : ${PKG_CONFIG:=pkg-config} : ${CC=gcc} : ${OBJCOPY=objcopy} : ${CLANG=clang} : ${M4=m4} : ${EMACS=emacs} : ${BPFTOOL=bpftool} : ${READELF=readelf} : ${ARCH_INCLUDES=} : ${ARCH_NAME=} CLANG=$(find_tool clang "$CLANG") for TOOL in $PKG_CONFIG $CC $OBJCOPY $CLANG $M4 $READELF; do if [ ! $(command -v ${TOOL} 2>/dev/null) ]; then echo "*** ERROR: Cannot find tool ${TOOL}" ; exit 1; fi; done ARCH_NAME=$($CC -print-multiarch 2>/dev/null) clang_version=$($CLANG --version | sed -nE 's/.*clang version ([[:digit:]]+).*/\1/p') if [ "$?" -ne "0" ]; then echo "*** ERROR: Couldn't execute '$CLANG --version'" exit 1 fi echo "Found clang binary '$CLANG' with version $clang_version (from '$($CLANG --version | head -n 1)')" if [ "$clang_version" -lt "11" ]; then echo "*** ERROR: Need LLVM version 11+, '$CLANG' is version $clang_version" [ -n "$RELAXED_LLVM_VERSION" ] || exit 1 fi if ! command -v $EMACS >/dev/null 2>&1; then EMACS="" else emacs_major=$($EMACS -Q --batch --eval='(message "%s" emacs-major-version)' 2>&1) if [ -n "$emacs_major" ] && [ "$emacs_major" -ge 26 ]; then echo "using emacs: $EMACS, version $emacs_major" else echo "not using emacs: $EMACS, as it is too old (wanted version >=26, got $emacs_major)" EMACS="" fi fi if [ -z "$EMACS" ] && [ "${FORCE_EMACS:-0}" -eq "1" ]; then echo "FORCE_EMACS is set, but no usable emacs found on system" rm -f "$CONFIG" exit 1 fi if command -v $BPFTOOL &>/dev/null && $BPFTOOL gen help 2>&1 | grep 'gen skeleton.*name' > /dev/null; then bpftool_version=$($BPFTOOL version | head -n 1) echo "using $bpftool_version" else echo "bpftool not found or doesn't support skeleton generation; not building all tools" BPFTOOL= fi if [ -z "$ARCH_INCLUDES" ] && [ -n "$ARCH_NAME" ]; then for dir in $(echo | $CC -Wp,-v -E - 2>&1 | grep '^ '); do local idir idir="${dir}/${ARCH_NAME}/" [ -d "$idir" ] && ARCH_INCLUDES="-I${idir} $ARCH_INCLUDES" done fi echo "PKG_CONFIG:=${PKG_CONFIG}" >>$CONFIG echo "CC:=${CC}" >>$CONFIG echo "OBJCOPY:=${OBJCOPY}" >>$CONFIG echo "CLANG:=${CLANG}" >>$CONFIG echo "M4:=${M4}" >>$CONFIG echo "EMACS:=${EMACS}" >>$CONFIG echo "ARCH_INCLUDES:=$ARCH_INCLUDES" >> $CONFIG echo "READELF:=${READELF}" >> $CONFIG echo "BPFTOOL:=${BPFTOOL}" >> $CONFIG [ -n "$BPFTOOL" ] && echo "HAVE_FEATURES+=BPFTOOL" >>"$CONFIG" } check_zlib() { if ${PKG_CONFIG} zlib --exists; then echo "HAVE_ZLIB:=y" >>$CONFIG echo "yes" echo 'CFLAGS += -DHAVE_ZLIB' `${PKG_CONFIG} zlib --cflags` >> $CONFIG echo 'LDLIBS += ' `${PKG_CONFIG} zlib --libs` >>$CONFIG else echo "missing - this is required" return 1 fi } check_elf() { if ${PKG_CONFIG} libelf --exists; then echo "HAVE_ELF:=y" >>$CONFIG echo "yes" echo 'CFLAGS += -DHAVE_ELF' `${PKG_CONFIG} libelf --cflags` >> $CONFIG echo 'LDLIBS += ' `${PKG_CONFIG} libelf --libs` >>$CONFIG else echo "missing - this is required" return 1 fi } check_pcap() { local libpcap_err if ${PKG_CONFIG} libpcap --exists; then LIBPCAP_CFLAGS=$(${PKG_CONFIG} libpcap --cflags) LIBPCAP_LDLIBS=$(${PKG_CONFIG} libpcap --libs) else LIBPCAP_CFLAGS="" LIBPCAP_LDLIBS="-lpcap" fi cat >$TMPDIR/libpcaptest.c < #include int main(int argc, char **argv) { pcap_t *pcap = pcap_open_live("ifname", 100, 1, 1000, NULL); return 0; } EOF libpcap_err=$($CC -o $TMPDIR/libpcaptest $TMPDIR/libpcaptest.c $LIBPCAP_CFLAGS $LIBPCAP_LDLIBS 2>&1) if [ "$?" -eq "0" ]; then echo "HAVE_PCAP:=y" >>$CONFIG [ -n "$LIBPCAP_CFLAGS" ] && echo 'CFLAGS += ' $LIBPCAP_CFLAGS >> $CONFIG echo "yes" else echo "missing - this is required" echo "error: $libpcap_err" return 1 fi } check_cap_ng() { if ${PKG_CONFIG} libcap-ng --exists; then echo "HAVE_CAP_NG:=y" >>$CONFIG echo "yes" echo 'CAP_NG_CFLAGS:='`${PKG_CONFIG} libcap-ng --cflags` >> $CONFIG echo 'CAP_NG_LDLIBS:='`${PKG_CONFIG} libcap-ng --libs` >>$CONFIG else echo "no" fi } check_libbpf_function() { local FUNCTION_NAME local FUNCTION_ARGS local FUNCTION_DEFS local LIBBPF_CFLAGS local LIBBPF_LDLIBS local config_var FUNCTION_NAME="$1" FUNCTION_ARGS="$2" FUNCTION_DEFS="$3" LIBBPF_CFLAGS="$4" LIBBPF_LDLIBS="$5" config_var="LIBBPF_$(echo $FUNCTION_NAME | tr 'a-z' 'A-Z')" echo -n " $FUNCTION_NAME support: " # If LIBBPF_LDLIBS is empty that means we're using the submodule version of # libbpf. We know it does support all the APIs we're testing for, so we hard # code it as supported. We can't actually run the check as the embedded # libbpf.a has not been built at configure time. if [ -z "$LIBBPF_LDLIBS" ]; then echo "HAVE_FEATURES+=${config_var}" >>"$CONFIG" echo "yes (submodule)" return 0; fi # If this is set we don't try to link against libbpf, as it may be in a # different submodule and have not been built yet. Instead, we'll copy over # the header files from the libbpf sources so those are used first, # triggering a compile error if the function we are testing for is missing. if [ -n "$LIBBPF_UNBUILT" ]; then LIBBPF_LDLIBS="-Xlinker --unresolved-symbols=ignore-in-object-files" LIBBPF_CFLAGS="-I$TMPDIR/include" mkdir -p "$TMPDIR/include" cp -r headers/bpf headers/linux headers/xdp "$TMPDIR/include/" cp "$LIBBPF_DIR"/src/bpf.h "$LIBBPF_DIR"/src/btf.h "$LIBBPF_DIR"/src/libbpf*.h "$TMPDIR/include/bpf" [ "$?" -eq 0 ] || return fi cat >$TMPDIR/libbpftest.c < #include #include int main(int argc, char **argv) { ${FUNCTION_DEFS}; ${FUNCTION_NAME}${FUNCTION_ARGS}; return 0; } EOF compile_cmd="$CC -o $TMPDIR/libbpftest $TMPDIR/libbpftest.c -Werror $LIBBPF_CFLAGS $LIBBPF_LDLIBS" libbpf_err=$($compile_cmd 2>&1) if [ "$?" -eq "0" ]; then echo "HAVE_FEATURES+=${config_var}" >>"$CONFIG" echo "yes" else echo "no" fi if [ -n "$DEBUG_CONFIGURE" ]; then echo " $compile_cmd" echo "${libbpf_err}" | sed 's/^/ /gm' fi } check_libbpf_functions() { local LIBBPF_CFLAGS local LIBBPF_LDLIBS LIBBPF_CFLAGS="$1" LIBBPF_LDLIBS="$2" check_libbpf_function "perf_buffer__consume" "(NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "btf__load_from_kernel_by_id" "(0)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "btf__type_cnt" "(NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_object__next_map" "(NULL, NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_object__next_program" "(NULL, NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_program__insn_cnt" "(NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_program__type" "(NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_program__flags" "(NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_program__expected_attach_type" "(NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_map_create" "(0, NULL, 0, 0, 0, NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "perf_buffer__new_raw" "(0, 0, NULL, NULL, NULL, NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_xdp_attach" "(0, 0, 0, NULL)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_map__set_autocreate" "(NULL, false)" "" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_prog_test_run_opts" "(0, &opts)" "DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .batch_size = 1)" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" check_libbpf_function "bpf_xdp_query" "(0, 0, &opts)" "DECLARE_LIBBPF_OPTS(bpf_xdp_query_opts, opts, .feature_flags = 1)" "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" } get_libbpf_version() { local libbpf_dir local version libbpf_dir="$1" if [ -f "${libbpf_dir}/libbpf.map" ]; then version=$(grep -oE '^LIBBPF_([0-9.]+)' "${libbpf_dir}/libbpf.map" | sort -rV | head -n1 | cut -d'_' -f2) else version=unknown fi echo $version } check_libbpf() { local libbpf_err if [ "${FORCE_SUBDIR_LIBBPF:-0}" -ne "1" ] && ${PKG_CONFIG} libbpf --exists && [ -z "$LIBBPF_DIR" ]; then LIBBPF_CFLAGS=$(${PKG_CONFIG} libbpf --cflags) LIBBPF_LDLIBS=$(${PKG_CONFIG} libbpf --libs) LIBBPF_VERSION=$(${PKG_CONFIG} libbpf --modversion) cat >$TMPDIR/libbpftest.c < #include #include int main(int argc, char **argv) { void *ptr; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, .pin_root_path = "/path"); DECLARE_LIBBPF_OPTS(bpf_link_create_opts, lopts, .target_btf_id = 0); (void) bpf_object__open_file("file", &opts); (void) bpf_program__name(ptr); (void) bpf_map__set_initial_value(ptr, ptr, 0); return 0; } EOF libbpf_err=$($CC -o $TMPDIR/libbpftest $TMPDIR/libbpftest.c -Werror $LIBBPF_CFLAGS $LIBBPF_LDLIBS 2>&1) if [ "$?" -eq "0" ]; then echo "SYSTEM_LIBBPF:=y" >>$CONFIG echo "LIBBPF_VERSION=$LIBBPF_VERSION" >>$CONFIG echo 'CFLAGS += ' $LIBBPF_CFLAGS >> $CONFIG echo 'LDLIBS += ' $LIBBPF_LDLIBS >>$CONFIG echo 'OBJECT_LIBBPF = ' >>$CONFIG echo "system v$LIBBPF_VERSION" check_libbpf_functions "$LIBBPF_CFLAGS" "$LIBBPF_LDLIBS" return 0 fi else libbpf_err="${PKG_CONFIG} couldn't find libbpf" fi if [ "${FORCE_SYSTEM_LIBBPF:-0}" -eq "1" ]; then echo "FORCE_SYSTEM_LIBBPF is set, but no usable libbpf found on system" echo "error: $libbpf_err" rm -f "$CONFIG" exit 1 fi if [ -n "$LIBBPF_DIR" ]; then [ -z "$LIBBPF_INCLUDE_DIR" ] && LIBBPF_INCLUDE_DIR="$(readlink -m ${LIBBPF_DIR}/include)" [ -z "$LIBBPF_LIB_DIR" ] && LIBBPF_LIB_DIR="$(readlink -m ${LIBBPF_DIR}/src)" LIBBPF_VERSION=$(get_libbpf_version "$LIBBPF_DIR/src") OBJECT_LIBBPF= echo "custom v$LIBBPF_VERSION" check_libbpf_functions "-I${LIBBPF_INCLUDE_DIR}" "-L${LIBBPF_LIB_DIR} -l:libbpf.a" else if ! [ -d "lib/libbpf/src" ] && [ -f ".gitmodules" ] && [ -e ".git" ]; then git submodule init && git submodule update fi LIBBPF_VERSION=$(get_libbpf_version "lib/libbpf/src") LIBBPF_INCLUDE_DIR='$(LIB_DIR)/libbpf/src/root/include' LIBBPF_LIB_DIR='$(LIB_DIR)/libbpf/src' OBJECT_LIBBPF="${LIBBPF_LIB_DIR}/libbpf.a" echo "submodule v$LIBBPF_VERSION" check_libbpf_functions "" "" fi echo "SYSTEM_LIBBPF:=n" >> $CONFIG echo "LIBBPF_VERSION=$LIBBPF_VERSION" >>$CONFIG echo "CFLAGS += -I${LIBBPF_INCLUDE_DIR}" >>$CONFIG echo "BPF_CFLAGS += -I${LIBBPF_INCLUDE_DIR}" >>$CONFIG echo "LDFLAGS += -L${LIBBPF_LIB_DIR}" >>$CONFIG echo 'LDLIBS += -l:libbpf.a' >>$CONFIG echo "OBJECT_LIBBPF = ${OBJECT_LIBBPF}" >>$CONFIG echo -n "zlib support: " check_zlib || exit 1 echo -n "ELF support: " check_elf || exit 1 echo -n "pcap support: " check_pcap || exit 1 } check_secure_getenv() { cat >$TMPDIR/secure_getenv.c < int main(int argc, char **argv) { secure_getenv("test"); return 0; } EOF secure_getenv_err=$($CC -o $TMPDIR/secure_getenv $TMPDIR/secure_getenv.c 2>&1) if [ "$?" -eq "0" ]; then echo "HAVE_FEATURES += SECURE_GETENV" >>"$CONFIG" echo "yes" else echo "no" fi } quiet_config() { cat <$CONFIG quiet_config >> $CONFIG check_opts check_toolchain echo -n "libbpf support: " check_libbpf echo -n "secure_getenv support: " check_secure_getenv echo -n "cap-ng support: " check_cap_ng if [ -n "$KERNEL_HEADERS" ]; then echo "kernel headers: $KERNEL_HEADERS" echo "CFLAGS += -I$KERNEL_HEADERS" >>$CONFIG echo "BPF_CFLAGS += -I$KERNEL_HEADERS" >>$CONFIG fi mv $CONFIG $CONFIG_FINAL xdp-tools-1.5.4/packaging/0000755000175100001660000000000015003640462014741 5ustar runnerdockerxdp-tools-1.5.4/packaging/rpm/0000755000175100001660000000000015003640462015537 5ustar runnerdockerxdp-tools-1.5.4/packaging/rpm/README.org0000644000175100001660000001145415003640462017212 0ustar runnerdocker#+OPTIONS: ^:nil * Releasing and packaging a new version of xdp-tools These are the steps needed to release a new version of xdp-tools. If any of the steps fail, go back and fix the error, then start over from the appropriate step. If the fix requires changes to the sources, commit those, then rewrite the commit made in (1.) on top of this and start over from the beginning. This ensures that we don't end up with a whole series of package revisions just to fix minor errors. ** Steps To release a new version of xdp-tools, follow these steps: 1. Make sure you have valid Kerberos tickets exist for the Fedora and RHEL infrastructure: #+begin_src sh kinit @FEDORAPROJECT.ORG kinit @REDHAT.COM #+end_src 2. Checkout this git repository as well as the RHEL and Fedora dist-git's. Also, make sure you move to the correct branch for RHEL and Fedora. Below is an example of how you could do this: #+begin_src sh export VERSION="" mkdir release_$VERSION cd release_$VERSION git clone git@github.com:xdp-project/xdp-tools.git cd xdp-tools export XDP_TOOLS_DIR=$(pwd) rhpkg clone xdp-tools xdp-tools-RHEL cd xdp-tools-RHEL rhpkg switch-branch -l rhpkg switch-branch rhel-8.3.0 export RHEL_DISTGIT_DIR=$(pwd) cd .. fedpkg clone xdp-tools xdp-tools-FEDORA cd xdp-tools-FEDORA fedpkg switch-branch -l export FEDORA_DISTGIT_DIR=$(pwd) cd .. #+end_src 3. Bump version in =version.mk= and =packacing/rpm/xdp-tools.spec= -- don't forget a changelog entry in the latter. Commit this, bug don't tag and push until the rest of the steps below completed successfully. 4. Run =make archive= to generate a source tarball (xdp-tools-$VERSION.tar.gz). #+begin_src sh cd $XDP_TOOLS_DIR git submodule init git submodule update make archive #+end_src 5. Copy source tarball to =~/rpmbuild/SOURCES=. #+begin_src sh mkdir -p ~/rpmbuild/SOURCES/ cp ./xdp-tools-$VERSION.tar.gz ~/rpmbuild/SOURCES/ #+end_src 6. Build local package using =rpmbuild -ba packaging/rpm/xdp-tools.spec=. 7. Check that building a scratch build on Fedora infrastructure works: =cd $FEDORA_DISTGIT_DIR && fedpkg scratch-build --srpm ~/rpmbuild/SRPMS/xdp-tools-$VERSION.fcXX.src.rpm= 7. Sync the xdp-tools.spec file to dist-git (but don't commit anything yet): - For both RHEL and Fedora, copy over the new version, then manually inspect the git diff and revert any changes that undo previous modifications in that distro. For Fedora, this is mainly changelog entries by rebuild bots, and for RHEL it's mainly the changelog, the =__brp_strip= defines and the symlinks to earlier .so versions for libxdp. - For RHEL also manually create a new symlink entry to the previous .so-version and add it to the file list. Make sure to be on the right branch in each dist-git. 8. Create an SRPM and scratch build for RHEL (in RHEL dist-git directory): #+begin_src sh cd $RHEL_DISTGIT_DIR cp ~/rpmbuild/SOURCES/xdp-tools-$VERSION.tar.gz . rhpkg srpm rhpkg scratch-build --srpm xdp-tools-$VERSION.el8.src.rpm #+end_src 9. Upload new sources files to both Fedora and RHEL - this will also update the 'sources' file in each directory, which is why we didn't commit anything earlier: #+begin_src sh cd $FEDORA_DISTGIT_DIR fedpkg new-sources ~/rpmbuild/SOURCES/xdp-tools-$VERSION.tar.gz git add xdp-tools.spec git commit cd $RHEL_DISTGIT_DIR rhpkg new-sources ~/rpmbuild/SOURCES/xdp-tools-$VERSION.tar.gz git add xdp-tools.spec git commit #+end_src For both, check the git history for commit message inspiration. In particular, to be accepted into the RHEL dist-git, the commit message must reference a valid Bugzilla ID. See the commit log for earlier commits for syntax for this. 10. Push the dist-git repositories and request builds for each: #+begin_src sh cd $FEDORA_DISTGIT_DIR git push fedpkg build cd $RHEL_DISTGIT_DIR git push rhpkg build #+end_src 11. Tag the commit in the xdp-tools repo and push the branch and tags to github. Tag syntax is =v$VERSION=, where =~betaX= becomes =-betaX= (git doesn't allow tildes in tag names). 12. Wait for the CI gating emails to tick in. Check any failures in the CI dashboard and waive and/or fix as necessary. Then talk to QE to have them run the =manual.sst_networking.xdp-tools.tier1= tests and mark it as completed; this will cause the build to be tagged rhel-$VERSION-candidate (from rhel-$VERSION-gate) and allow it to proceed. 13. Add the new build to the errata; this may entail moving the errata status back to =NEW_FILES=. After adding the new build, it should be moved to QE state; if this is not immediately possible, just resolve any issues blocking it. xdp-tools-1.5.4/packaging/rpm/xdp-tools.spec0000644000175100001660000001252215003640462020346 0ustar runnerdockerName: xdp-tools Version: 1.5.4 Release: 1%{?dist} Summary: Utilities and example programs for use with XDP %global _soversion 1.5.0 License: GPL-2.0-only URL: https://github.com/xdp-project/%{name} Source0: https://github.com/xdp-project/%{name}/releases/download/v%{version}/xdp-tools-%{version}.tar.gz BuildRequires: kernel-headers BuildRequires: libbpf-devel BuildRequires: elfutils-libelf-devel BuildRequires: zlib-devel BuildRequires: libpcap-devel BuildRequires: clang >= 10.0.0 BuildRequires: llvm >= 10.0.0 BuildRequires: make BuildRequires: gcc BuildRequires: pkgconfig BuildRequires: m4 BuildRequires: emacs-nox BuildRequires: wireshark-cli BuildRequires: bpftool ExcludeArch: i386 i686 # Always keep xdp-tools and libxdp packages in sync Requires: libxdp = %{version}-%{release} # find-debuginfo produces empty debugsourcefiles.list # disable the debug package to avoid rpmbuild error'ing out because of this %global debug_package %{nil} %global _hardened_build 1 %description Utilities and example programs for use with XDP %package -n libxdp Summary: XDP helper library License: LGPL-2.1-only OR BSD-2-Clause %package -n libxdp-devel Summary: Development files for libxdp License: LGPL-2.1-only OR BSD-2-Clause Requires: kernel-headers Requires: libxdp = %{version}-%{release} %package -n libxdp-static Summary: Static library files for libxdp License: LGPL-2.1-only OR BSD-2-Clause Requires: libxdp-devel = %{version}-%{release} %description -n libxdp The libxdp package contains the libxdp library for managing XDP programs, used by the %{name} package %description -n libxdp-devel The libxdp-devel package contains headers used for building XDP programs using libxdp. %description -n libxdp-static The libxdp-static package contains the static library version of libxdp. %prep %autosetup -p1 -n %{name}-%{version} %build export CFLAGS='%{build_cflags}' export LDFLAGS='%{build_ldflags}' export LIBDIR='%{_libdir}' export RUNDIR='%{_rundir}' export CLANG=$(which clang) export PRODUCTION=1 export DYNAMIC_LIBXDP=1 export FORCE_SYSTEM_LIBBPF=1 export FORCE_EMACS=1 ./configure make %{?_smp_mflags} V=1 %install export DESTDIR='%{buildroot}' export SBINDIR='%{_sbindir}' export LIBDIR='%{_libdir}' export RUNDIR='%{_rundir}' export MANDIR='%{_mandir}' export DATADIR='%{_datadir}' export HDRDIR='%{_includedir}/xdp' make install V=1 %files %{_sbindir}/xdp-bench %{_sbindir}/xdp-filter %{_sbindir}/xdp-forward %{_sbindir}/xdp-loader %{_sbindir}/xdp-monitor %{_sbindir}/xdp-trafficgen %{_sbindir}/xdpdump %{_mandir}/man8/* %{_libdir}/bpf/xdpfilt_*.o %{_libdir}/bpf/xdpdump_*.o %{_datadir}/xdp-tools/ %license LICENSES/* %files -n libxdp %{_libdir}/libxdp.so.1 %{_libdir}/libxdp.so.%{_soversion} %{_libdir}/bpf/xdp-dispatcher.o %{_libdir}/bpf/xsk_def_xdp_prog*.o %{_mandir}/man3/* %license LICENSES/* %files -n libxdp-static %{_libdir}/libxdp.a %files -n libxdp-devel %{_includedir}/xdp/*.h %{_libdir}/libxdp.so %{_libdir}/pkgconfig/libxdp.pc %changelog * Mon Apr 28 2025 Toke Høiland-Jørgensen 1.5.4-1 - Upstream version bump * Fri Mar 7 2025 Toke Høiland-Jørgensen 1.5.3-1 - Upstream version bump * Wed Feb 19 2025 Toke Høiland-Jørgensen 1.5.2-1 - Upstream version bump * Tue Jan 14 2025 Toke Høiland-Jørgensen 1.5.1-1 - Upstream version bump * Thu Jan 9 2025 Toke Høiland-Jørgensen 1.5.0-1 - Upstream version bump * Tue Aug 6 2024 Toke Høiland-Jørgensen 1.4.3-1 - Upstream version bump * Tue Jan 30 2024 Toke Høiland-Jørgensen 1.4.2-1 - Upstream version bump * Fri Oct 20 2023 Toke Høiland-Jørgensen 1.4.1-1 - Upstream version bump * Thu Jul 6 2023 Toke Høiland-Jørgensen 1.4.0-1 - Upstream version bump * Thu Feb 23 2023 Toke Høiland-Jørgensen 1.3.1-1 - Upstream version bump * Tue Feb 7 2023 Toke Høiland-Jørgensen 1.3.0-1 - Upstream version bump * Thu Jan 20 2022 Toke Høiland-Jørgensen 1.2.2-1 - Upstream version bump * Thu Jan 13 2022 Toke Høiland-Jørgensen 1.2.1-1 - Upstream version bump * Wed Jul 7 2021 Toke Høiland-Jørgensen 1.2.0-1 - Upstream version bump * Wed Feb 3 2021 Toke Høiland-Jørgensen 1.1.1-1 - Upstream version bump * Mon Jan 4 2021 Toke Høiland-Jørgensen 1.1.0-1 - Upstream version bump * Thu Aug 20 2020 Toke Høiland-Jørgensen 1.0.1-1 - Upstream version bump * Tue Aug 18 2020 Toke Høiland-Jørgensen 1.0.0-1 - Upstream version bump * Wed Jul 15 2020 Eelco Chaudron 1.0.0~beta3-0.1 - Upstream version bump * Fri Jul 10 2020 Toke Høiland-Jørgensen 1.0.0~beta2-0.1 - Upstream version bump * Mon Jun 15 2020 Toke Høiland-Jørgensen 1.0.0~beta1-0.1 - Upstream version bump * Mon Apr 6 2020 Toke Høiland-Jørgensen 0.0.3-1 - Upstream update, add libxdp sub-packages * Thu Nov 21 2019 Toke Høiland-Jørgensen 0.0.2-1 - Upstream update * Fri Nov 8 2019 Toke Høiland-Jørgensen 0.0.1-1 - Initial release xdp-tools-1.5.4/.lgtm.yml0000644000175100001660000000022115003640462014554 0ustar runnerdockerextraction: cpp: after_prepare: - export RELAXED_LLVM_VERSION=1 path_classifiers: library: - lib/libbpf/*/* - lib/libbpf/* xdp-tools-1.5.4/.gitmodules0000644000175100001660000000015015003640462015166 0ustar runnerdocker[submodule "libbpf"] path = lib/libbpf url = https://github.com/libbpf/libbpf.git ignore = untracked xdp-tools-1.5.4/.clang-format0000644000175100001660000000744215003640462015377 0ustar runnerdocker# SPDX-License-Identifier: GPL-2.0 # # clang-format configuration file. Intended for clang-format >= 4. # # For more information, see: # # Documentation/process/clang-format.rst # https://clang.llvm.org/docs/ClangFormat.html # https://clang.llvm.org/docs/ClangFormatStyleOptions.html # --- AccessModifierOffset: -4 AlignAfterOpenBracket: Align AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Left # Unknown to clang-format-4.0 AlignOperands: true AlignTrailingComments: false AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: None AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: false BinPackArguments: true BinPackParameters: true BraceWrapping: AfterClass: false AfterControlStatement: false AfterEnum: false AfterFunction: true AfterNamespace: true AfterObjCDeclaration: false AfterStruct: false AfterUnion: false AfterExternBlock: false # Unknown to clang-format-5.0 BeforeCatch: false BeforeElse: false IndentBraces: false SplitEmptyFunction: true # Unknown to clang-format-4.0 SplitEmptyRecord: true # Unknown to clang-format-4.0 SplitEmptyNamespace: true # Unknown to clang-format-4.0 BreakBeforeBinaryOperators: None BreakBeforeBraces: Custom BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0 BreakBeforeTernaryOperators: false BreakConstructorInitializersBeforeComma: false BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0 BreakAfterJavaFieldAnnotations: false BreakStringLiterals: false ColumnLimit: 80 CommentPragmas: '^ IWYU pragma:' CompactNamespaces: false # Unknown to clang-format-4.0 ConstructorInitializerAllOnOneLineOrOnePerLine: false ConstructorInitializerIndentWidth: 8 ContinuationIndentWidth: 8 Cpp11BracedListStyle: false DerivePointerAlignment: false DisableFormat: false ExperimentalAutoDetectBinPacking: false FixNamespaceComments: false # Unknown to clang-format-4.0 ForEachMacros: - 'FOR_EACH_OPTION' - 'FOR_EACH_MAP_KEY' - 'bpf_object__for_each_map' IncludeBlocks: Preserve # Unknown to clang-format-5.0 IncludeCategories: - Regex: '.*' Priority: 1 IncludeIsMainRegex: '(Test)?$' IndentCaseLabels: false IndentPPDirectives: None # Unknown to clang-format-5.0 IndentWidth: 8 IndentWrappedFunctionNames: false JavaScriptQuotes: Leave JavaScriptWrapImports: true KeepEmptyLinesAtTheStartOfBlocks: false MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0 ObjCBlockIndentWidth: 8 ObjCSpaceAfterProperty: true ObjCSpaceBeforeProtocolList: true # Taken from git's rules PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0 PenaltyBreakBeforeFirstCallParameter: 30 PenaltyBreakComment: 10 PenaltyBreakFirstLessLess: 0 PenaltyBreakString: 10 PenaltyExcessCharacter: 100 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Right ReflowComments: false SortIncludes: false SortUsingDeclarations: false # Unknown to clang-format-4.0 SpaceAfterCStyleCast: false SpaceAfterTemplateKeyword: true SpaceBeforeAssignmentOperators: true SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0 SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0 SpaceBeforeParens: ControlStatements SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0 SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 SpacesInAngles: false SpacesInContainerLiterals: false SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp03 TabWidth: 8 UseTab: Always ... xdp-tools-1.5.4/xdp-filter/0000755000175100001660000000000015003640462015073 5ustar runnerdockerxdp-tools-1.5.4/xdp-filter/xdpfilt_dny_eth.c0000644000175100001660000000035315003640462020424 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_DENY #define FILT_MODE_ETHERNET #undef FILT_MODE_IPV4 #undef FILT_MODE_IPV6 #undef FILT_MODE_UDP #undef FILT_MODE_TCP #define FUNCNAME xdpfilt_dny_eth #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/xdpfilt_dny_udp.c0000644000175100001660000000035315003640462020434 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_DENY #undef FILT_MODE_ETHERNET #undef FILT_MODE_IPV4 #undef FILT_MODE_IPV6 #define FILT_MODE_UDP #undef FILT_MODE_TCP #define FUNCNAME xdpfilt_dny_udp #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/xdpfilt_alw_ip.c0000644000175100001660000000035415003640462020246 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_ALLOW #undef FILT_MODE_ETHERNET #define FILT_MODE_IPV4 #define FILT_MODE_IPV6 #undef FILT_MODE_UDP #undef FILT_MODE_TCP #define FUNCNAME xdpfilt_alw_ip #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/README.org0000644000175100001660000002470415003640462016550 0ustar runnerdocker#+EXPORT_FILE_NAME: xdp-filter #+TITLE: xdp-filter #+MAN_CLASS_OPTIONS: :section-id "8\" \"DATE\" \"VERSION\" \"A simple XDP-powered packet filter" # This file serves both as a README on github, and as the source for the man # page; the latter through the org-mode man page export support. # . # To export the man page, simply use the org-mode exporter; (require 'ox-man) if # it's not available. There's also a Makefile rule to export it. * xdp-filter - a simple XDP-powered packet filter XDP-filter is a packet filtering utility powered by XDP. It is deliberately simple and so does not have the same matching capabilities as, e.g., netfilter. Instead, thanks to XDP, it can achieve very high drop rates: tens of millions of packets per second on a single CPU core. ** Running xdp-filter The syntax for running xdp-filter is: #+begin_src sh xdp-filter COMMAND [options] Where COMMAND can be one of: load - load xdp-filter on an interface unload - unload xdp-filter from an interface port - add a port to the filter list ip - add an IP address to the filter list ether - add an Ethernet MAC address to the filter list status - show current xdp-filter status poll - poll statistics output help - show the list of available commands #+end_src Each command, and its options are explained below. Or use =xdp-filter COMMAND --help= to see the options for each command. * The LOAD command To use =xdp-filter=, it must first be loaded onto an interface. This is accomplished with the =load= command, which takes the name of the interface as a parameter, and optionally allows specifying the features that should be included. By default all features are loaded, but de-selecting some features can speed up the packet matching, and increase performance by a substantial amount. The syntax for the =load= command is: =xdp-filter load [options] = Where == is the name of the interface to load =xdp-filter= onto, and must be specified. The supported options are: ** -m, --mode Specifies which mode to load the XDP program to be loaded in. The valid values are 'native', which is the default in-driver XDP mode, 'skb', which causes the so-called /skb mode/ (also known as /generic XDP/) to be used, or 'hw' which causes the program to be offloaded to the hardware. ** -p, --policy This sets the policy =xdp-filter= applies to packets *not* matched by any of the filter rules. The default is /allow/, in which packets not matching any rules are allowed to pass. The other option is /deny/, in which *all* packets are dropped *except* those matched by the filter options. =xdp-filter= cannot be loaded simultaneously in /deny/ and /allow/ policy modes on the system. Note that loading =xdp-filter= in /deny/ mode will drop all traffic on the interface until suitable allow rules are installed, so some care is needed to avoid being locked out of a remote system. ** -f, --features Use this option to select which features to include when loaded =xdp-filter=. The default is to load all available features. So select individual features specify one or more of these: * *tcp*: Support filtering on TCP port number * *udp*: Support filtering on UDP port number * *ipv6*: Support filtering on IPv6 addresses * *ipv4*: Support filtering on IPv4 addresses * *ethernet*: Support filtering on Ethernet MAC addresses Specify multiple features by separating them with a comma. E.g.: =tcp,udp,ipv6=. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The UNLOAD command The =unload= command unloads =xdp-filter= from one (or all) interfaces, and cleans up the program state. The syntax for the =load= command is: =xdp-filter unload [options] = Where == is the name of the interface to unload =xdp-filter= from, and must be specified unless the *--all* option is used. The supported options are: ** -a, --all Specify this option to remove =xdp-filter= from all interfaces it was loaded onto. If this option is specified, no == is needed. This option can also be used to clean up all =xdp-filter= state if the XDP program(s) were unloaded by other means. ** -k, --keep-maps Specify this option to prevent =xdp-filter= from clearing its map state. By default, all BPF maps no longer needed by any loaded program are removed. However, this will also remove the contents of the maps (the filtering rules), so this option can be used to keep the maps around so the rules persist until =xdp-filter= is loaded again. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The PORT command Use the =port= command to add a TCP or UDP port to the =xdp-filter= match list. For this to work, =xdp-filter= must be loaded with either the *udp* or the *tcp* feature (or both) on at least one interface. The syntax for the =port= command is: =xdp-filter port [options] = Where == is the port number to add (or remove if the *--remove* is specified). The supported options are: ** -r, --remove Remove the port instead of adding it. ** -m, --mode Select filtering mode. Valid options are *src* and *dst*, both of which may be specified as =src,dst=. If *src* is specified, the port number will added as a /source port/ match, while if *dst* is specified, the port number will be added as a /destination port/ match. If both are specified, a packet will be matched if *either* its source or destination port is the specified port number. ** -p, --proto Specify one (or both) of *udp* and/or *tcp* to match UDP or TCP ports, respectively. ** -s, --status If this option is specified, the current list of matched ports will be printed after inserting the port number. Otherwise, nothing will be printed. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The IP command Use the =ip= command to add an IPv6 or an IPv4 address to the =xdp-filter= match list. The syntax for the =ip= command is: =xdp-filter ip [options] = Where == is the IP address to add (or remove if the *--remove* is specified). Either IPv4 or IPv6 addresses can be specified, but =xdp-filter= must be loaded with the corresponding features (*ipv4* and *ipv6*, respectively). The supported options are: ** -r, --remove Remove the IP address instead of adding it. ** -m, --mode Select filtering mode. Valid options are *src* and *dst*, both of which may be specified as =src,dst=. If *src* is specified, the IP address will added as a /source IP/ match, while if *dst* is specified, the IP address will be added as a /destination IP/ match. If both are specified, a packet will be matched if *either* its source or destination IP is the specified IP address. ** -s, --status If this option is specified, the current list of matched ips will be printed after inserting the IP address. Otherwise, nothing will be printed. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The ETHER command Use the =ether= command to add an Ethernet MAC address to the =xdp-filter= match list. For this to work, =xdp-filter= must be loaded with either the *ethernet* feature on at least one interface. The syntax for the =ether= command is: =xdp-filter ether [options] = Where == is the MAC address to add (or remove if the *--remove* is specified). The supported options are: ** -r, --remove Remove the MAC address instead of adding it. ** -m, --mode Select filtering mode. Valid options are *src* and *dst*, both of which may be specified as =src,dst=. If *src* is specified, the MAC address will added as a /source MAC/ match, while if *dst* is specified, the MAC address will be added as a /destination MAC/ match. If both are specified, a packet will be matched if *either* its source or destination MAC is the specified MAC address. ** -s, --status If this option is specified, the current list of matched ips will be printed after inserting the MAC address. Otherwise, nothing will be printed. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The STATUS command The =status= command prints the current status of =xdp-filter=: Which interfaces it is loaded on, the current list of rules, and some statistics for how many packets have been processed in total, and how many times each rule has been hit. The syntax for the =status= command is: =xdp-filter status [options]= Where the supported options are: ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The POLL command The =poll= command periodically polls the =xdp-filter= statistics map and prints out the total number of packets and bytes processed by =xdp-filter=, as well as the number in the last polling interval, converted to packets (and bytes) per second. This can be used to inspect the performance of =xdp-filter=, and to compare the performance of the different feature sets selectable by the =load= parameter. The syntax for the =poll= command is: =xdp-filter poll [options]= Where the supported options are: ** -i, --interval The polling interval, in milliseconds. Defaults to 1000 (1 second). ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * Examples To filter all packets arriving on port 80 on eth0, issue the following commands: #+begin_src sh # xdp-filter load eth0 -f tcp,udp # xdp-filter port 80 #+end_src To filter all packets *except* those from IP address fc00:dead:cafe::1 issue the following commands (careful, this can lock you out of remote access!): #+begin_src sh # xdp-filter load eth0 -f ipv6 -p deny # xdp-filter ip fc00:dead:cafe::1 -m src #+end_src To allow packets from *either* IP fc00:dead:cafe::1 *or* arriving on port 22, issue the following (careful, this can lock you out of remote access!): #+begin_src sh # xdp-filter load eth0 -f ipv6,tcp -p deny # xdp-filter port 22 # xdp-filter ip fc00:dead:cafe::1 -m src #+end_src * BUGS Please report any bugs on Github: https://github.com/xdp-project/xdp-tools/issues * AUTHOR xdp-filter was written by Toke Høiland-Jørgensen and Jesper Dangaard Brouer. This man page was written by Toke Høiland-Jørgensen. xdp-tools-1.5.4/xdp-filter/xdp-filter.80000644000175100001660000002572115003640462017251 0ustar runnerdocker.TH "xdp-filter" "8" "SEPTEMBER 5, 2022" "V1.5.4" "A simple XDP-powered packet filter" .SH "NAME" xdp-filter \- a simple XDP-powered packet filter .SH "SYNOPSIS" .PP XDP-filter is a packet filtering utility powered by XDP. It is deliberately simple and so does not have the same matching capabilities as, e.g., netfilter. Instead, thanks to XDP, it can achieve very high drop rates: tens of millions of packets per second on a single CPU core. .SS "Running xdp-filter" .PP The syntax for running xdp-filter is: .RS .nf \fCxdp-filter COMMAND [options] Where COMMAND can be one of: load - load xdp-filter on an interface unload - unload xdp-filter from an interface port - add a port to the filter list ip - add an IP address to the filter list ether - add an Ethernet MAC address to the filter list status - show current xdp-filter status poll - poll statistics output help - show the list of available commands \fP .fi .RE .PP Each command, and its options are explained below. Or use \fIxdp\-filter COMMAND \-\-help\fP to see the options for each command. .SH "The LOAD command" .PP To use \fIxdp\-filter\fP, it must first be loaded onto an interface. This is accomplished with the \fIload\fP command, which takes the name of the interface as a parameter, and optionally allows specifying the features that should be included. By default all features are loaded, but de-selecting some features can speed up the packet matching, and increase performance by a substantial amount. .PP The syntax for the \fIload\fP command is: .PP \fIxdp\-filter load [options] \fP .PP Where \fI\fP is the name of the interface to load \fIxdp\-filter\fP onto, and must be specified. The supported options are: .SS "-m, --mode " .PP Specifies which mode to load the XDP program to be loaded in. The valid values are 'native', which is the default in-driver XDP mode, 'skb', which causes the so-called \fIskb mode\fP (also known as \fIgeneric XDP\fP) to be used, or 'hw' which causes the program to be offloaded to the hardware. .SS "-p, --policy " .PP This sets the policy \fIxdp\-filter\fP applies to packets \fBnot\fP matched by any of the filter rules. The default is \fIallow\fP, in which packets not matching any rules are allowed to pass. The other option is \fIdeny\fP, in which \fBall\fP packets are dropped \fBexcept\fP those matched by the filter options. .PP \fIxdp\-filter\fP cannot be loaded simultaneously in \fIdeny\fP and \fIallow\fP policy modes on the system. Note that loading \fIxdp\-filter\fP in \fIdeny\fP mode will drop all traffic on the interface until suitable allow rules are installed, so some care is needed to avoid being locked out of a remote system. .SS "-f, --features " .PP Use this option to select which features to include when loaded \fIxdp\-filter\fP. The default is to load all available features. So select individual features specify one or more of these: .IP \(bu 4 \fBtcp\fP: Support filtering on TCP port number .IP \(bu 4 \fBudp\fP: Support filtering on UDP port number .IP \(bu 4 \fBipv6\fP: Support filtering on IPv6 addresses .IP \(bu 4 \fBipv4\fP: Support filtering on IPv4 addresses .IP \(bu 4 \fBethernet\fP: Support filtering on Ethernet MAC addresses .PP Specify multiple features by separating them with a comma. E.g.: \fItcp,udp,ipv6\fP. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The UNLOAD command" .PP The \fIunload\fP command unloads \fIxdp\-filter\fP from one (or all) interfaces, and cleans up the program state. .PP The syntax for the \fIload\fP command is: .PP \fIxdp\-filter unload [options] \fP .PP Where \fI\fP is the name of the interface to unload \fIxdp\-filter\fP from, and must be specified unless the \fB--all\fP option is used. The supported options are: .SS "-a, --all" .PP Specify this option to remove \fIxdp\-filter\fP from all interfaces it was loaded onto. If this option is specified, no \fI\fP is needed. .PP This option can also be used to clean up all \fIxdp\-filter\fP state if the XDP program(s) were unloaded by other means. .SS "-k, --keep-maps" .PP Specify this option to prevent \fIxdp\-filter\fP from clearing its map state. By default, all BPF maps no longer needed by any loaded program are removed. However, this will also remove the contents of the maps (the filtering rules), so this option can be used to keep the maps around so the rules persist until \fIxdp\-filter\fP is loaded again. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The PORT command" .PP Use the \fIport\fP command to add a TCP or UDP port to the \fIxdp\-filter\fP match list. For this to work, \fIxdp\-filter\fP must be loaded with either the \fBudp\fP or the \fBtcp\fP feature (or both) on at least one interface. .PP The syntax for the \fIport\fP command is: .PP \fIxdp\-filter port [options] \fP .PP Where \fI\fP is the port number to add (or remove if the \fB--remove\fP is specified). The supported options are: .SS "-r, --remove" .PP Remove the port instead of adding it. .SS "-m, --mode " .PP Select filtering mode. Valid options are \fBsrc\fP and \fBdst\fP, both of which may be specified as \fIsrc,dst\fP. If \fBsrc\fP is specified, the port number will added as a \fIsource port\fP match, while if \fBdst\fP is specified, the port number will be added as a \fIdestination port\fP match. If both are specified, a packet will be matched if \fBeither\fP its source or destination port is the specified port number. .SS "-p, --proto " .PP Specify one (or both) of \fBudp\fP and/or \fBtcp\fP to match UDP or TCP ports, respectively. .SS "-s, --status" .PP If this option is specified, the current list of matched ports will be printed after inserting the port number. Otherwise, nothing will be printed. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The IP command" .PP Use the \fIip\fP command to add an IPv6 or an IPv4 address to the \fIxdp\-filter\fP match list. .PP The syntax for the \fIip\fP command is: .PP \fIxdp\-filter ip [options] \fP .PP Where \fI\fP is the IP address to add (or remove if the \fB--remove\fP is specified). Either IPv4 or IPv6 addresses can be specified, but \fIxdp\-filter\fP must be loaded with the corresponding features (\fBipv4\fP and \fBipv6\fP, respectively). The supported options are: .SS "-r, --remove" .PP Remove the IP address instead of adding it. .SS "-m, --mode " .PP Select filtering mode. Valid options are \fBsrc\fP and \fBdst\fP, both of which may be specified as \fIsrc,dst\fP. If \fBsrc\fP is specified, the IP address will added as a \fIsource IP\fP match, while if \fBdst\fP is specified, the IP address will be added as a \fIdestination IP\fP match. If both are specified, a packet will be matched if \fBeither\fP its source or destination IP is the specified IP address. .SS "-s, --status" .PP If this option is specified, the current list of matched ips will be printed after inserting the IP address. Otherwise, nothing will be printed. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The ETHER command" .PP Use the \fIether\fP command to add an Ethernet MAC address to the \fIxdp\-filter\fP match list. For this to work, \fIxdp\-filter\fP must be loaded with either the \fBethernet\fP feature on at least one interface. .PP The syntax for the \fIether\fP command is: .PP \fIxdp\-filter ether [options] \fP .PP Where \fI\fP is the MAC address to add (or remove if the \fB--remove\fP is specified). The supported options are: .SS "-r, --remove" .PP Remove the MAC address instead of adding it. .SS "-m, --mode " .PP Select filtering mode. Valid options are \fBsrc\fP and \fBdst\fP, both of which may be specified as \fIsrc,dst\fP. If \fBsrc\fP is specified, the MAC address will added as a \fIsource MAC\fP match, while if \fBdst\fP is specified, the MAC address will be added as a \fIdestination MAC\fP match. If both are specified, a packet will be matched if \fBeither\fP its source or destination MAC is the specified MAC address. .SS "-s, --status" .PP If this option is specified, the current list of matched ips will be printed after inserting the MAC address. Otherwise, nothing will be printed. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The STATUS command" .PP The \fIstatus\fP command prints the current status of \fIxdp\-filter\fP: Which interfaces it is loaded on, the current list of rules, and some statistics for how many packets have been processed in total, and how many times each rule has been hit. .PP The syntax for the \fIstatus\fP command is: .PP \fIxdp\-filter status [options]\fP .PP Where the supported options are: .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The POLL command" .PP The \fIpoll\fP command periodically polls the \fIxdp\-filter\fP statistics map and prints out the total number of packets and bytes processed by \fIxdp\-filter\fP, as well as the number in the last polling interval, converted to packets (and bytes) per second. This can be used to inspect the performance of \fIxdp\-filter\fP, and to compare the performance of the different feature sets selectable by the \fIload\fP parameter. .PP The syntax for the \fIpoll\fP command is: .PP \fIxdp\-filter poll [options]\fP .PP Where the supported options are: .SS "-i, --interval " .PP The polling interval, in milliseconds. Defaults to 1000 (1 second). .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "Examples" .PP To filter all packets arriving on port 80 on eth0, issue the following commands: .RS .nf \fC# xdp-filter load eth0 -f tcp,udp # xdp-filter port 80 \fP .fi .RE .PP To filter all packets \fBexcept\fP those from IP address fc00:dead:cafe::1 issue the following commands (careful, this can lock you out of remote access!): .RS .nf \fC# xdp-filter load eth0 -f ipv6 -p deny # xdp-filter ip fc00:dead:cafe::1 -m src \fP .fi .RE .PP To allow packets from \fBeither\fP IP fc00:dead:cafe::1 \fBor\fP arriving on port 22, issue the following (careful, this can lock you out of remote access!): .RS .nf \fC# xdp-filter load eth0 -f ipv6,tcp -p deny # xdp-filter port 22 # xdp-filter ip fc00:dead:cafe::1 -m src \fP .fi .RE .SH "BUGS" .PP Please report any bugs on Github: \fIhttps://github.com/xdp-project/xdp-tools/issues\fP .SH "AUTHOR" .PP xdp-filter was written by Toke Høiland-Jørgensen and Jesper Dangaard Brouer. This man page was written by Toke Høiland-Jørgensen. xdp-tools-1.5.4/xdp-filter/xdp-filter.c0000644000175100001660000006564015003640462017330 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "params.h" #include "logging.h" #include "util.h" #include "stats.h" #include "common_kern_user.h" #include "prog_features.h" #define NEED_RLIMIT (20 * 1024 * 1024) /* 10 Mbyte */ #define PROG_NAME "xdp-filter" struct flag_val map_flags_all[] = { {"src", MAP_FLAG_SRC}, {"dst", MAP_FLAG_DST}, {"tcp", MAP_FLAG_TCP}, {"udp", MAP_FLAG_UDP}, {} }; struct flag_val map_flags_srcdst[] = { {"src", MAP_FLAG_SRC}, {"dst", MAP_FLAG_DST}, {} }; struct flag_val map_flags_tcpudp[] = { {"tcp", MAP_FLAG_TCP}, {"udp", MAP_FLAG_UDP}, {} }; static char *find_prog_file(__u32 features) { struct prog_feature *feat; if (!features) return NULL; for (feat = prog_features; feat->prog_name; feat++) { if ((ntohl(feat->features) & features) == features) return strdup(feat->prog_name); } return NULL; } static __u32 find_features(const char *progname) { struct prog_feature *feat; for (feat = prog_features; feat->prog_name; feat++) { if (is_prefix(progname, feat->prog_name)) return ntohl(feat->features); } return 0; } static int map_get_counter_flags(int fd, void *key, __u64 *counter, __u8 *flags) { /* For percpu maps, userspace gets a value per possible CPU */ int nr_cpus = libbpf_num_possible_cpus(); __u64 sum_ctr = 0; int i, err = 0; __u64 *values; if (nr_cpus < 0) return nr_cpus; values = calloc(nr_cpus, sizeof(*values)); if (!values) return -ENOMEM; if ((bpf_map_lookup_elem(fd, key, values)) != 0) { err = -ENOENT; goto out; } /* Sum values from each CPU */ for (i = 0; i < nr_cpus; i++) { __u8 flg = values[i] & MAP_FLAGS; if (!flg) { err = -ENOENT; /* not set */ goto out; } *flags = flg; sum_ctr += values[i] >> COUNTER_SHIFT; } *counter = sum_ctr; out: free(values); return err; } static int map_set_flags(int fd, void *key, __u8 flags, bool delete_empty) { /* For percpu maps, userspace gets a value per possible CPU */ int nr_cpus = libbpf_num_possible_cpus(); __u64 *values; int i, err; if (nr_cpus < 0) return nr_cpus; values = calloc(nr_cpus, sizeof(*values)); if (!values) return -ENOMEM; if (bpf_map_lookup_elem(fd, key, values) != 0) { memset(values, 0, sizeof(*values) * nr_cpus); } else if (!flags && delete_empty) { pr_debug("Deleting empty map value from flags %u\n", flags); err = bpf_map_delete_elem(fd, key); if (err) { err = -errno; pr_warn("Couldn't delete value from state map: %s\n", strerror(-err)); } goto out; } for (i = 0; i < nr_cpus; i++) values[i] = flags ? (values[i] & ~MAP_FLAGS) | (flags & MAP_FLAGS) : 0; pr_debug("Setting new map value %" PRIu64 " from flags %u\n", (uint64_t)values[0], flags); err = bpf_map_update_elem(fd, key, values, 0); if (err) { err = -errno; if (err == -E2BIG) pr_warn("Couldn't add entry: state map is full\n"); else pr_warn("Unable to update state map: %s\n", strerror(-err)); } out: free(values); return err; } static int get_iface_features(__unused const struct iface *iface, struct xdp_program *prog, __unused enum xdp_attach_mode mode, void *arg) { __u32 *all_feats = arg; *all_feats |= find_features(xdp_program__name(prog)); return 0; } static int get_used_features(const char *pin_root_path, __u32 *feats) { __u32 all_feats = 0; int err; err = iterate_pinned_programs(pin_root_path, get_iface_features, &all_feats); if (err && err != -ENOENT) return err; *feats = all_feats; return 0; } static const struct loadopt { bool help; struct iface iface; unsigned int features; enum xdp_attach_mode mode; unsigned int policy_mode; } defaults_load = { .features = FEAT_ALL, .mode = XDP_MODE_NATIVE, .policy_mode = FEAT_ALLOW, }; struct flag_val load_features[] = { {"tcp", FEAT_TCP}, {"udp", FEAT_UDP}, {"ipv6", FEAT_IPV6}, {"ipv4", FEAT_IPV4}, {"ethernet", FEAT_ETHERNET}, {"all", FEAT_ALL}, {} }; struct flag_val print_features[] = { {"tcp", FEAT_TCP}, {"udp", FEAT_UDP}, {"ipv6", FEAT_IPV6}, {"ipv4", FEAT_IPV4}, {"ethernet", FEAT_ETHERNET}, {"allow", FEAT_ALLOW}, {"deny", FEAT_DENY}, {} }; struct enum_val xdp_modes[] = { {"native", XDP_MODE_NATIVE}, {"skb", XDP_MODE_SKB}, {"hw", XDP_MODE_HW}, {NULL, 0} }; struct enum_val policy_modes[] = { {"allow", FEAT_ALLOW}, {"deny", FEAT_DENY}, {NULL, 0} }; static struct prog_option load_options[] = { DEFINE_OPTION("mode", OPT_ENUM, struct loadopt, mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load XDP program in ; default native"), DEFINE_OPTION("policy", OPT_ENUM, struct loadopt, policy_mode, .short_opt = 'p', .typearg = policy_modes, .metavar = "", .help = "Policy for unmatched packets; default allow"), DEFINE_OPTION("dev", OPT_IFNAME, struct loadopt, iface, .positional = true, .metavar = "", .required = true, .help = "Load on device "), DEFINE_OPTION("features", OPT_FLAGS, struct loadopt, features, .short_opt = 'f', .metavar = "", .typearg = load_features, .help = "Features to enable; default all"), END_OPTIONS }; int do_load(const void *cfg, const char *pin_root_path) { char errmsg[STRERR_BUFSIZE], featbuf[100]; const struct loadopt *opt = cfg; int err = EXIT_SUCCESS, lock_fd; struct xdp_program *p = NULL; unsigned int features; char *filename = NULL; __u32 used_feats; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, .pin_root_path = pin_root_path); DECLARE_LIBXDP_OPTS(xdp_program_opts, xdp_opts, 0); if (opt->mode == XDP_MODE_HW) { pr_warn("xdp-filter does not support offloading.\n"); return EXIT_FAILURE; } lock_fd = prog_lock_acquire(pin_root_path); if (lock_fd < 0) return lock_fd; err = get_used_features(pin_root_path, &used_feats); if (err) { pr_warn("Error getting list of loaded programs: %s\n", strerror(-err)); goto out; } err = EXIT_FAILURE; features = opt->features; if (opt->policy_mode == FEAT_DENY && used_feats & FEAT_ALLOW) { pr_warn("xdp-filter is already loaded in allow policy mode. " "Unload before loading in deny mode.\n"); goto out; } else if (opt->policy_mode == FEAT_ALLOW && used_feats & FEAT_DENY) { pr_warn("xdp-filter is already loaded in deny policy mode. " "Unload before loading in allow mode.\n"); goto out; } features |= opt->policy_mode; err = get_pinned_program(&opt->iface, pin_root_path, NULL, &p); if (!err) { pr_warn("xdp-filter is already loaded on %s\n", opt->iface.ifname); xdp_program__close(p); goto out; } print_flags(featbuf, sizeof(featbuf), print_features, features); pr_debug("Looking for eBPF program with features %s\n", featbuf); filename = find_prog_file(features); if (!filename) { pr_warn("Couldn't find an eBPF program with the requested feature set!\n"); goto out; } pr_debug("Found prog '%s' matching feature set to be loaded on interface '%s'.\n", filename, opt->iface.ifname); /* libbpf spits out a lot of unhelpful error messages while loading. * Silence the logging so we can provide our own messages instead; this * is a noop if verbose logging is enabled. */ silence_libbpf_logging(); retry: xdp_opts.find_filename = filename; xdp_opts.opts = &opts; /* prog_name is NULL, so choose the first program in object */ p = xdp_program__create(&xdp_opts); err = libxdp_get_error(p); if (err) { if (err == -EPERM && !double_rlimit()) goto retry; libxdp_strerror(err, errmsg, sizeof(errmsg)); pr_warn("Couldn't load BPF program: %s(%d)\n", errmsg, err); p = NULL; goto out; } err = attach_xdp_program(p, &opt->iface, opt->mode, pin_root_path); if (err) { if (err == -EPERM && !double_rlimit()) { xdp_program__close(p); goto retry; } libxdp_strerror(err, errmsg, sizeof(errmsg)); pr_warn("Couldn't attach XDP program on iface '%s': %s(%d)\n", opt->iface.ifname, errmsg, err); goto out; } out: xdp_program__close(p); free(filename); prog_lock_release(lock_fd); return err; } static int remove_unused_maps(const char *pin_root_path, __u32 features) { int dir_fd, err = 0; dir_fd = open(pin_root_path, O_DIRECTORY); if (dir_fd < 0) { if (errno == ENOENT) return 0; err = -errno; pr_warn("Unable to open pin directory %s: %s\n", pin_root_path, strerror(-err)); goto out; } if (!(features & (FEAT_TCP | FEAT_UDP))) { err = unlink_pinned_map(dir_fd, textify(MAP_NAME_PORTS)); if (err) goto out; } if (!(features & FEAT_IPV4)) { err = unlink_pinned_map(dir_fd, textify(MAP_NAME_IPV4)); if (err) goto out; } if (!(features & FEAT_IPV6)) { err = unlink_pinned_map(dir_fd, textify(MAP_NAME_IPV6)); if (err) goto out; } if (!(features & FEAT_ETHERNET)) { err = unlink_pinned_map(dir_fd, textify(MAP_NAME_ETHERNET)); if (err) goto out; } if (!features) { char buf[PATH_MAX]; err = unlink_pinned_map(dir_fd, textify(XDP_STATS_MAP_NAME)); if (err) goto out; close(dir_fd); dir_fd = -1; err = try_snprintf(buf, sizeof(buf), "%s/%s", pin_root_path, "programs"); if (err) goto out; pr_debug("Removing program directory %s\n", buf); err = rmdir(buf); if (err) { err = -errno; pr_warn("Unable to rmdir: %s\n", strerror(-err)); goto out; } pr_debug("Removing pinning directory %s\n", pin_root_path); err = rmdir(pin_root_path); if (err) { err = -errno; pr_warn("Unable to rmdir: %s\n", strerror(-err)); goto out; } } out: if (dir_fd >= 0) close(dir_fd); return err; } static int remove_iface_program(const struct iface *iface, struct xdp_program *prog, enum xdp_attach_mode mode, void *arg) { char errmsg[STRERR_BUFSIZE], buf[100]; char *pin_root_path = arg; __u32 feats; int err; feats = find_features(xdp_program__name(prog)); if (!feats) { pr_warn("Unrecognised XDP program on interface %s. Not removing.\n", iface->ifname); return -ENOENT; } print_flags(buf, sizeof(buf), print_features, feats); pr_debug("Removing XDP program with features %s from iface %s\n", buf, iface->ifname); err = detach_xdp_program(prog, iface, mode, pin_root_path); if (err) { libxdp_strerror(err, errmsg, sizeof(errmsg)); pr_warn("Removing XDP program on iface %s failed (%d): %s\n", iface->ifname, -err, errmsg); } return err; } static const struct unloadopt { bool all; bool keep; struct iface iface; } defaults_unload = {}; static struct prog_option unload_options[] = { DEFINE_OPTION("dev", OPT_IFNAME, struct unloadopt, iface, .positional = true, .metavar = "", .help = "Unload from device "), DEFINE_OPTION("all", OPT_BOOL, struct unloadopt, all, .short_opt = 'a', .help = "Unload from all interfaces"), DEFINE_OPTION("keep-maps", OPT_BOOL, struct unloadopt, keep, .short_opt = 'k', .help = "Don't destroy unused maps after unloading"), END_OPTIONS }; int do_unload(const void *cfg, const char *pin_root_path) { const struct unloadopt *opt = cfg; int err = EXIT_SUCCESS, lock_fd; enum xdp_attach_mode mode; struct xdp_program *prog; char buf[100]; __u32 feats; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, .pin_root_path = pin_root_path); lock_fd = prog_lock_acquire(pin_root_path); if (lock_fd < 0) return lock_fd; if (opt->all) { pr_debug("Removing xdp-filter from all interfaces\n"); err = iterate_pinned_programs(pin_root_path, remove_iface_program, (void *)pin_root_path); if (err && err != -ENOENT) goto out; goto clean_maps; } if (!opt->iface.ifindex) { pr_warn("Must specify ifname or --all\n"); err = EXIT_FAILURE; goto out; } err = get_pinned_program(&opt->iface, pin_root_path, &mode, &prog); if (err) { pr_warn("xdp-filter is not loaded on %s\n", opt->iface.ifname); err = EXIT_FAILURE; goto out; } err = remove_iface_program(&opt->iface, prog, mode, (void *)pin_root_path); if (err) goto out; clean_maps: if (opt->keep) { pr_debug("Not removing pinned maps because of --keep-maps option\n"); goto out; } pr_debug("Checking map usage and removing unused maps\n"); err = get_used_features(pin_root_path, &feats); if (err) goto out; print_flags(buf, sizeof(buf), print_features, feats); pr_debug("Features still being used: %s\n", feats ? buf : "none"); err = remove_unused_maps(pin_root_path, feats); if (err) goto out; out: prog_lock_release(lock_fd); return err; } int print_ports(int map_fd) { __u32 map_key = -1, prev_key = 0; int err; printf("Filtered ports:\n"); printf(" %-40s Mode Hit counter\n", ""); FOR_EACH_MAP_KEY (err, map_fd, map_key, prev_key) { char buf[100]; __u64 counter; __u8 flags = 0; err = map_get_counter_flags(map_fd, &map_key, &counter, &flags); if (err == -ENOENT) continue; else if (err) return err; print_flags(buf, sizeof(buf), map_flags_all, flags); printf(" %-40u %-15s %" PRIu64 "\n", ntohs(map_key), buf, (uint64_t)counter); } return 0; } static const struct portopt { unsigned int mode; unsigned int proto; __u16 port; bool print_status; bool remove; } defaults_port = {}; static struct prog_option port_options[] = { DEFINE_OPTION("port", OPT_U16, struct portopt, port, .positional = true, .metavar = "", .required = true, .help = "Port to add or remove"), DEFINE_OPTION("remove", OPT_BOOL, struct portopt, remove, .short_opt = 'r', .help = "Remove port instead of adding"), DEFINE_OPTION("mode", OPT_FLAGS, struct portopt, mode, .short_opt = 'm', .metavar = "", .typearg = map_flags_srcdst, .help = "Filter mode; default dst"), DEFINE_OPTION("proto", OPT_FLAGS, struct portopt, proto, .short_opt = 'p', .metavar = "", .typearg = map_flags_tcpudp, .help = "Protocol to filter; default tcp,udp"), DEFINE_OPTION("status", OPT_BOOL, struct portopt, print_status, .short_opt = 's', .help = "Print status of filtered ports after changing"), END_OPTIONS }; int do_port(const void *cfg, const char *pin_root_path) { int map_fd = -1, err = EXIT_SUCCESS, lock_fd; char modestr[100], protostr[100]; const struct portopt *opt = cfg; unsigned int proto = opt->proto; unsigned int mode = opt->mode; struct bpf_map_info info = {}; __u8 flags = 0; __u64 counter; __u32 map_key; lock_fd = prog_lock_acquire(pin_root_path); if (lock_fd < 0) return lock_fd; map_fd = get_pinned_map_fd(pin_root_path, textify(MAP_NAME_PORTS), &info); if (map_fd < 0) { pr_warn("Couldn't find port filter map; is xdp-filter loaded " "with the right features (udp and/or tcp)?\n"); err = EXIT_FAILURE; goto out; } pr_debug("Found map with fd %d for map id %d\n", map_fd, info.id); map_key = htons(opt->port); err = map_get_counter_flags(map_fd, &map_key, &counter, &flags); if (err && err != -ENOENT) goto out; if (opt->remove) { if (mode == 0 && proto == 0) { mode = MAP_FLAG_SRC | MAP_FLAG_DST; proto = MAP_FLAG_TCP | MAP_FLAG_UDP; } flags &= ~(mode | proto); } else { if (mode == 0) mode = MAP_FLAG_DST; if (proto == 0) proto = MAP_FLAG_TCP | MAP_FLAG_UDP; flags |= mode | proto; } print_flags(modestr, sizeof(modestr), map_flags_srcdst, mode); print_flags(protostr, sizeof(protostr), map_flags_tcpudp, proto); pr_debug("%s %s port %u mode %s\n", opt->remove ? "Removing" : "Adding", protostr, opt->port, modestr); if (!(flags & (MAP_FLAG_DST | MAP_FLAG_SRC)) || !(flags & (MAP_FLAG_TCP | MAP_FLAG_UDP))) flags = 0; err = map_set_flags(map_fd, &map_key, flags, false); if (err) goto out; if (opt->print_status) { err = print_ports(map_fd); if (err) goto out; } out: if (map_fd >= 0) close(map_fd); prog_lock_release(lock_fd); return err; } int __print_ips(int map_fd, int af) { struct ip_addr map_key = { .af = af }, prev_key = {}; int err; FOR_EACH_MAP_KEY (err, map_fd, map_key.addr, prev_key.addr) { char flagbuf[100], addrbuf[100]; __u8 flags = 0; __u64 counter; err = map_get_counter_flags(map_fd, &map_key.addr, &counter, &flags); if (err == -ENOENT) continue; else if (err) return err; print_flags(flagbuf, sizeof(flagbuf), map_flags_srcdst, flags); print_addr(addrbuf, sizeof(addrbuf), &map_key); printf(" %-40s %-15s %" PRIu64 "\n", addrbuf, flagbuf, (uint64_t)counter); } return 0; } int print_ips() { int map_fd4 = -1, map_fd6 = -1; char pin_root_path[PATH_MAX]; int err = 0; err = get_bpf_root_dir(pin_root_path, sizeof(pin_root_path), PROG_NAME, true); if (err) goto out; map_fd6 = get_pinned_map_fd(pin_root_path, textify(MAP_NAME_IPV6), NULL); map_fd4 = get_pinned_map_fd(pin_root_path, textify(MAP_NAME_IPV4), NULL); if (map_fd4 < 0 && map_fd6 < 0) { err = -ENOENT; goto out; } printf("Filtered IP addresses:\n"); printf(" %-40s Mode Hit counter\n", ""); if (map_fd6 >= 0) { err = __print_ips(map_fd6, AF_INET6); if (err) goto out; } if (map_fd4 >= 0) err = __print_ips(map_fd4, AF_INET); out: if (map_fd4 >= 0) close(map_fd4); if (map_fd6 >= 0) close(map_fd6); return err; } static int __do_address(const char *pin_root_path, const char *map_name, const char *feat_name, void *map_key, bool remove, int mode) { int map_fd = -1, err = 0; __u8 flags = 0; __u64 counter; map_fd = get_pinned_map_fd(pin_root_path, map_name, NULL); if (map_fd < 0) { pr_warn("Couldn't find filter map; is xdp-filter loaded " "with the %s feature?\n", feat_name); err = -ENOENT; goto out; } err = map_get_counter_flags(map_fd, map_key, &counter, &flags); if (err && err != -ENOENT) goto out; if (remove) flags &= ~mode; else flags |= mode; err = map_set_flags(map_fd, map_key, flags, true); if (err) goto out; out: return err ?: map_fd; } static const struct ipopt { unsigned int mode; struct ip_addr addr; bool print_status; bool remove; } defaults_ip = { .mode = MAP_FLAG_DST, }; static struct prog_option ip_options[] = { DEFINE_OPTION("addr", OPT_IPADDR, struct ipopt, addr, .positional = true, .metavar = "", .required = true, .help = "Address to add or remove"), DEFINE_OPTION("remove", OPT_BOOL, struct ipopt, remove, .short_opt = 'r', .help = "Remove address instead of adding"), DEFINE_OPTION("mode", OPT_FLAGS, struct ipopt, mode, .short_opt = 'm', .metavar = "", .typearg = map_flags_srcdst, .help = "Filter mode; default dst"), DEFINE_OPTION("status", OPT_BOOL, struct ipopt, print_status, .short_opt = 's', .help = "Print status of filtered addresses after changing"), END_OPTIONS }; static int do_ip(const void *cfg, const char *pin_root_path) { int map_fd = -1, err = EXIT_SUCCESS, lock_fd; char modestr[100], addrstr[100]; const struct ipopt *opt = cfg; struct ip_addr addr = opt->addr; bool v6; lock_fd = prog_lock_acquire(pin_root_path); if (lock_fd < 0) return lock_fd; print_flags(modestr, sizeof(modestr), map_flags_srcdst, opt->mode); print_addr(addrstr, sizeof(addrstr), &opt->addr); pr_debug("%s addr %s mode %s\n", opt->remove ? "Removing" : "Adding", addrstr, modestr); v6 = (opt->addr.af == AF_INET6); map_fd = __do_address(pin_root_path, v6 ? textify(MAP_NAME_IPV6) : textify(MAP_NAME_IPV4), v6 ? "ipv6" : "ipv4", &addr.addr, opt->remove, opt->mode); if (map_fd < 0) { err = map_fd; goto out; } if (opt->print_status) { err = print_ips(); if (err) goto out; } out: if (map_fd >= 0) close(map_fd); prog_lock_release(lock_fd); return err; } int print_ethers(int map_fd) { struct mac_addr map_key = {}, prev_key = {}; int err; printf("Filtered MAC addresses:\n"); printf(" %-40s Mode Hit counter\n", ""); FOR_EACH_MAP_KEY (err, map_fd, map_key, prev_key) { char modebuf[100], addrbuf[100]; __u8 flags = 0; __u64 counter; err = map_get_counter_flags(map_fd, &map_key, &counter, &flags); if (err == -ENOENT) continue; else if (err) return err; print_flags(modebuf, sizeof(modebuf), map_flags_srcdst, flags); print_macaddr(addrbuf, sizeof(addrbuf), &map_key); printf(" %-40s %-15s %" PRIu64 "\n", addrbuf, modebuf, (uint64_t)counter); } return 0; } static const struct etheropt { unsigned int mode; struct mac_addr addr; bool print_status; bool remove; } defaults_ether = { .mode = MAP_FLAG_DST, }; static struct prog_option ether_options[] = { DEFINE_OPTION("addr", OPT_MACADDR, struct etheropt, addr, .positional = true, .metavar = "", .required = true, .help = "Address to add or remove"), DEFINE_OPTION("remove", OPT_BOOL, struct etheropt, remove, .short_opt = 'r', .help = "Remove address instead of adding"), DEFINE_OPTION("mode", OPT_FLAGS, struct etheropt, mode, .short_opt = 'm', .metavar = "", .typearg = map_flags_srcdst, .help = "Filter mode; default dst"), DEFINE_OPTION("status", OPT_BOOL, struct etheropt, print_status, .short_opt = 's', .help = "Print status of filtered addresses after changing"), END_OPTIONS }; static int do_ether(const void *cfg, const char *pin_root_path) { int err = EXIT_SUCCESS, map_fd = -1, lock_fd; const struct etheropt *opt = cfg; struct mac_addr addr = opt->addr; char modestr[100], addrstr[100]; lock_fd = prog_lock_acquire(pin_root_path); if (lock_fd < 0) return lock_fd; print_flags(modestr, sizeof(modestr), map_flags_srcdst, opt->mode); print_macaddr(addrstr, sizeof(addrstr), &opt->addr); pr_debug("%s addr %s mode %s\n", opt->remove ? "Removing" : "Adding", addrstr, modestr); map_fd = __do_address(pin_root_path, textify(MAP_NAME_ETHERNET), "ethernet", &addr.addr, opt->remove, opt->mode); if (map_fd < 0) { err = map_fd; goto out; } if (opt->print_status) { err = print_ethers(map_fd); if (err) goto out; } out: if (map_fd >= 0) close(map_fd); prog_lock_release(lock_fd); return err; } static struct prog_option status_options[] = { END_OPTIONS }; int print_iface_status(const struct iface *iface, struct xdp_program *prog, enum xdp_attach_mode mode, __unused void *arg) { __u32 feat = 0; int err; printf("%s\n", xdp_program__name(prog)); err = get_iface_features(iface, prog, XDP_MODE_UNSPEC, &feat); if (err) return err; if (feat) { char featbuf[100]; char namebuf[100]; print_flags(featbuf, sizeof(featbuf), print_features, feat); snprintf(namebuf, sizeof(namebuf), "%s (%s mode)", iface->ifname, get_enum_name(xdp_modes, mode)); printf(" %-40s %s\n", namebuf, featbuf); } return 0; } int do_status(__unused const void *cfg, const char *pin_root_path) { int err = EXIT_SUCCESS, map_fd = -1, lock_fd; struct bpf_map_info info = {}; struct stats_record rec = {}; lock_fd = prog_lock_acquire(pin_root_path); if (lock_fd < 0) return lock_fd; map_fd = get_pinned_map_fd(pin_root_path, textify(XDP_STATS_MAP_NAME), &info); if (map_fd < 0) { err = map_fd; pr_warn("Couldn't find stats map. Maybe xdp-filter is not loaded?\n"); goto out; } rec.stats[XDP_DROP].enabled = true; rec.stats[XDP_PASS].enabled = true; rec.stats[XDP_ABORTED].enabled = true; err = stats_collect(map_fd, info.type, &rec); if (err) goto out; printf("CURRENT XDP-FILTER STATUS:\n\n"); printf("Aggregate per-action statistics:\n"); err = stats_print_one(&rec); if (err) goto out; printf("\n"); printf("Loaded on interfaces:\n"); printf(" %-40s Enabled features\n", ""); err = iterate_pinned_programs(pin_root_path, print_iface_status, NULL); if (err) goto out; printf("\n"); map_fd = get_pinned_map_fd(pin_root_path, textify(MAP_NAME_PORTS), NULL); if (map_fd >= 0) { err = print_ports(map_fd); if (err) goto out; printf("\n"); close(map_fd); map_fd = -1; } err = print_ips(); if (err && err != -ENOENT) goto out; printf("\n"); map_fd = get_pinned_map_fd(pin_root_path, textify(MAP_NAME_ETHERNET), NULL); if (map_fd >= 0) { err = print_ethers(map_fd); if (err) goto out; } printf("\n"); out: if (map_fd >= 0) close(map_fd); prog_lock_release(lock_fd); return err; } static const struct pollopt { __u32 interval; } defaults_poll = { .interval = 1000 }; static struct prog_option poll_options[] = { DEFINE_OPTION("interval", OPT_U32, struct pollopt, interval, .short_opt = 'i', .metavar = "", .help = "Polling interval in milliseconds (default 1000)"), END_OPTIONS }; int do_poll(const void *cfg, const char *pin_root_path) { int err = 0, map_fd = -1, lock_fd; const struct pollopt *opt = cfg; bool exit = false; if (!opt->interval) { err = -EINVAL; pr_warn("Can't use a polling interval of 0\n"); goto out; } lock_fd = prog_lock_acquire(pin_root_path); if (lock_fd < 0) return lock_fd; map_fd = get_pinned_map_fd(pin_root_path, textify(XDP_STATS_MAP_NAME), NULL); if (map_fd < 0) { err = map_fd; pr_warn("Couldn't find stats map. Maybe xdp-filter is not loaded?\n"); prog_lock_release(lock_fd); return EXIT_FAILURE; } prog_lock_release(lock_fd); err = stats_poll(map_fd, opt->interval, &exit, pin_root_path, textify(XDP_STATS_MAP_NAME)); if (err) { pr_warn("Error polling statistics: %s\n", strerror(-err)); goto out; } out: return err ? EXIT_FAILURE : EXIT_SUCCESS; } int do_help(__unused const void *cfg, __unused const char *pin_root_path) { fprintf(stderr, "Usage: xdp-filter COMMAND [options]\n" "\n" "COMMAND can be one of:\n" " load - load xdp-filter on an interface\n" " unload - unload xdp-filter from an interface\n" " port - add a port to the filter list\n" " ip - add an IP address to the filter list\n" " ether - add an Ethernet MAC address to the filter list\n" " status - show current xdp-filter status\n" " poll - poll statistics output\n" " help - show this help message\n" "\n" "Use 'xdp-filter COMMAND --help' to see options for each command\n"); return -1; } static const struct prog_command cmds[] = { DEFINE_COMMAND(load, "Load xdp-filter on an interface"), DEFINE_COMMAND(unload, "Unload xdp-filter from an interface"), DEFINE_COMMAND(port, "Add or remove ports from xdp-filter"), DEFINE_COMMAND(ip, "Add or remove IP addresses from xdp-filter"), DEFINE_COMMAND(ether, "Add or remove MAC addresses from xdp-filter"), DEFINE_COMMAND(poll, "Poll xdp-filter statistics"), DEFINE_COMMAND_NODEF(status, "Show xdp-filter status"), { .name = "help", .func = do_help, .no_cfg = true }, END_COMMANDS }; union all_opts { struct loadopt load; struct unloadopt unload; struct portopt port; struct ipopt ip; struct etheropt ether; struct pollopt poll; }; int main(int argc, char **argv) { if (argc > 1) return dispatch_commands(argv[1], argc - 1, argv + 1, cmds, sizeof(union all_opts), PROG_NAME, true); return do_help(NULL, NULL); } xdp-tools-1.5.4/xdp-filter/tests/0000755000175100001660000000000015003640462016235 5ustar runnerdockerxdp-tools-1.5.4/xdp-filter/tests/test_basic.py0000644000175100001660000002072415003640462020734 0ustar runnerdockerimport subprocess import os import signal import unittest import scapy from scapy.all import (Ether, Packet, IP, IPv6, Raw, UDP, TCP, IPv6ExtHdrRouting) from xdp_test_harness.xdp_case import XDPCase, usingCustomLoader from xdp_test_harness.utils import XDPFlag from . common import XDP_FILTER, Base, get_mode_string @usingCustomLoader class LoadUnload(XDPCase): def setUp(self): self.msg = "WARNING: All tests that follow will likely provide false result.\n" def run_wrap(self, cmd): r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.msg += "command: '" + str(cmd) + "'\n" self.msg += "stdout: '" + r.stdout.decode().strip() + "'\n" if r.stderr is not None: self.msg += "stderr: '" + r.stderr.decode().strip() + "'\n" self.msg += "\n" return r.returncode == 0 def load(self, mode=None): return self.run_wrap([ XDP_FILTER, "load", self.get_contexts().get_local_main().iface, "--verbose", "--mode", get_mode_string( mode if mode else self.get_contexts().get_local_main().xdp_mode ) ]) def unload(self): return self.run_wrap([ XDP_FILTER, "unload", self.get_contexts().get_local_main().iface, "--verbose" ]) def test_load_once(self): self.assertFalse(self.unload(), self.msg) self.assertTrue(self.load(), self.msg) self.assertTrue(self.unload(), self.msg) self.assertFalse(self.unload(), self.msg) def test_load_twice(self): self.assertFalse(self.unload(), self.msg) self.assertTrue(self.load(), self.msg) self.assertFalse(self.load(), self.msg) self.assertTrue(self.unload(), self.msg) self.assertFalse(self.unload(), self.msg) def test_load_hw(self): self.assertFalse(self.unload(), self.msg) self.load(mode=XDPFlag.HW_MODE), self.msg self.unload(), self.msg self.assertFalse(self.unload(), self.msg) class DirectBase: def drop_generic(self, address, target, use_inet6=False): to_send = self.to_send6 if use_inet6 else self.to_send self.arrived(to_send, self.send_packets(to_send)) subprocess.run([XDP_FILTER, target, address, "--mode", self.get_mode()]) self.not_arrived(to_send, self.send_packets(to_send)) subprocess.run([XDP_FILTER, target, address, "--mode", self.get_mode(), "--remove"]) self.arrived(to_send, self.send_packets(to_send)) def test_none_specified(self): self.arrived(self.to_send, self.send_packets(self.to_send)) def test_ether(self): self.drop_generic(self.get_device().ether, "ether") def test_ip(self): self.drop_generic(self.get_device().inet, "ip") def test_port(self): self.drop_generic(str(self.get_port()), "port") @unittest.skipIf(XDPCase.get_contexts().get_local_main().inet6 is None or XDPCase.get_contexts().get_remote_main().inet6 is None, "no inet6 address available") def test_ipv6(self): self.drop_generic(self.get_device().inet6, "ip", use_inet6=True) class BaseSrc: def get_device(self): return self.get_contexts().get_remote_main() def get_port(self): return self.src_port def get_mode(self): return "src" class BaseDst: def get_device(self): return self.get_contexts().get_local_main() def get_port(self): return self.dst_port def get_mode(self): return "dst" class BaseInvert: def setUp(self): subprocess.run([ XDP_FILTER, "load", "--policy", "deny", self.get_contexts().get_local_main().iface, "--mode", get_mode_string( self.get_contexts().get_local_main().xdp_mode ) ]) arrived = Base.not_arrived not_arrived = Base.arrived class DirectDropSrc(Base, DirectBase, BaseSrc): pass class DirectPassSrc(Base, DirectBase, BaseSrc, BaseInvert): pass class DirectDropDst(Base, DirectBase, BaseDst): pass class DirectPassDst(Base, DirectBase, BaseDst, BaseInvert): pass class IPv6ExtensionHeader(Base): def generic(self, extensions): packets = [Ether() / IPv6() / extensions / UDP(dport=55555)] * 5 self.arrived(packets, self.send_packets(packets)) subprocess.run([XDP_FILTER, "port", "55555", "--mode", "dst"]) self.not_arrived(packets, self.send_packets(packets)) subprocess.run([XDP_FILTER, "port", "55555", "--mode", "dst", "--remove"]) self.arrived(packets, self.send_packets(packets)) def test_routing(self): self.generic(scapy.layers.inet6.IPv6ExtHdrRouting()) def test_hop_by_hop(self): self.generic(scapy.layers.inet6.IPv6ExtHdrHopByHop()) def test_destination_options(self): self.generic(scapy.layers.inet6.IPv6ExtHdrDestOpt()) def test_fragment(self): self.generic(scapy.layers.inet6.IPv6ExtHdrFragment()) class IPv4ToIPv6Mapping(Base): def setUp(self): super().setUp() inet = self.get_contexts().get_local_main().inet self.address_explicit = "::ffff:" + inet inet6_split = [format(int(i), "02x") for i in inet.split(".")] self.address_converted = "::ffff:" + \ inet6_split[0] + inet6_split[1] + ":" + \ inet6_split[2] + inet6_split[3] self.packets = self.generate_default_packets( dst_inet=self.address_explicit, use_inet6=True) self.packets += self.generate_default_packets( dst_inet=self.address_converted, use_inet6=True) def test_filter_explicit_address(self): self.arrived(self.packets, self.send_packets(self.packets)) subprocess.run([XDP_FILTER, "ip", self.address_explicit, "--mode", "dst"]) self.not_arrived(self.packets, self.send_packets(self.packets)) subprocess.run([XDP_FILTER, "ip", self.address_explicit, "--mode", "dst", "--remove"]) self.arrived(self.packets, self.send_packets(self.packets)) def test_filter_converted_address(self): self.arrived(self.packets, self.send_packets(self.packets)) subprocess.run([XDP_FILTER, "ip", self.address_converted, "--mode", "dst"]) self.not_arrived(self.packets, self.send_packets(self.packets)) subprocess.run([XDP_FILTER, "ip", self.address_converted, "--mode", "dst", "--remove"]) self.arrived(self.packets, self.send_packets(self.packets)) class Status(Base): def setUp(self): pass def load(self, features): return subprocess.run([ XDP_FILTER, "load", self.get_contexts().get_local_main().iface, "--mode", get_mode_string( self.get_contexts().get_local_main().xdp_mode ), "--features", features, ]) def get_status(self): return subprocess.run( [XDP_FILTER, "status"], capture_output=True ).stdout.decode() def test_ethernet_feature(self): self.load("ethernet") self.check_status("ether", self.get_contexts().get_local_main().ether) def test_ipv4_feature(self): self.load("ipv4") self.check_status("ip", self.get_contexts().get_local_main().inet) def test_udp_feature(self): self.load("udp") self.check_status("port", str(self.dst_port)) def test_all_features(self): self.load("all") self.check_status("ether", self.get_contexts().get_local_main().ether) self.check_status("ip", self.get_contexts().get_local_main().inet) self.check_status("port", str(self.dst_port)) def check_status(self, subcommand, address): self.assertEqual(self.get_status().find(address), -1) subprocess.run([XDP_FILTER, subcommand, address]) self.assertNotEqual(self.get_status().find(address), -1) subprocess.run([XDP_FILTER, subcommand, address, "--remove"]) self.assertEqual(self.get_status().find(address), -1) xdp-tools-1.5.4/xdp-filter/tests/common.py0000644000175100001660000000317215003640462020102 0ustar runnerdockerimport os import subprocess from xdp_test_harness.xdp_case import XDPCase, usingCustomLoader from xdp_test_harness.utils import XDPFlag XDP_FILTER = os.environ.get("XDP_FILTER", "xdp-filter") def get_mode_string(xdp_mode: XDPFlag): if xdp_mode == XDPFlag.SKB_MODE: return "skb" if xdp_mode == XDPFlag.DRV_MODE: return "native" if xdp_mode == XDPFlag.HW_MODE: return "hw" return None @usingCustomLoader class Base(XDPCase): @classmethod def setUpClass(cls): super().setUpClass() cls.src_port = 60001 cls.dst_port = 60002 cls.to_send = cls.generate_default_packets( src_port=cls.src_port, dst_port=cls.dst_port) cls.to_send6 = cls.generate_default_packets( src_port=cls.src_port, dst_port=cls.dst_port, use_inet6=True) def arrived(self, packets, result): self.assertPacketsIn(packets, result.captured_local) for i in result.captured_remote: self.assertPacketContainerEmpty(i) def not_arrived(self, packets, result): self.assertPacketsNotIn(packets, result.captured_local) for i in result.captured_remote: self.assertPacketContainerEmpty(i) def setUp(self): subprocess.check_output([ XDP_FILTER, "load", self.get_contexts().get_local_main().iface, "--mode", get_mode_string( self.get_contexts().get_local_main().xdp_mode ) ], stderr=subprocess.STDOUT) def tearDown(self): subprocess.check_output([ XDP_FILTER, "unload", "--all" ], stderr=subprocess.STDOUT) xdp-tools-1.5.4/xdp-filter/tests/test_slow.py0000644000175100001660000001004015003640462020625 0ustar runnerdockerimport subprocess import os import signal import unittest from scapy.all import (Ether, Packet, IP, IPv6, Raw, UDP, TCP, IPv6ExtHdrRouting) from xdp_test_harness.xdp_case import XDPCase, usingCustomLoader from xdp_test_harness.utils import XDPFlag from . common import Base, XDP_FILTER, get_mode_string class ManyAddresses(Base): def format_number(self, number, delimiter, format_string, part_size, parts_amount): splitted = [] while number > 0: splitted.append(int(number % (1 << part_size))) number = number >> part_size assert(len(splitted) <= parts_amount) if (len(splitted) < parts_amount): splitted += [0] * (parts_amount - len(splitted)) splitted.reverse() return delimiter.join(format(s, format_string) for s in splitted) def generate_addresses(self, delimiter, format_string, parts_amount, full_size): AMOUNT = 257 bits = parts_amount * full_size for gen_number in range(0, (1 << bits) - 1, int((1 << bits) / AMOUNT)): yield self.format_number(gen_number, delimiter, format_string, parts_amount, full_size) def filter_addresses(self, name, delimiter, format_string, parts_amount, full_size): summed = 0 for address in self.generate_addresses(delimiter, format_string, parts_amount, full_size): summed += 1 subprocess.run([XDP_FILTER, name, address, "--mode", "dst"]) output = subprocess.check_output([XDP_FILTER, "status"]) # Each address is on a separate line. self.assertGreaterEqual(len(output.splitlines()), summed) def get_invalid_address(self, name, delimiter, format_string, parts_amount, full_size): """ Try to add addresses to xdp-filter, return address that does not get added. """ last_length = subprocess.check_output([XDP_FILTER, "status"]) for address in self.generate_addresses(delimiter, format_string, parts_amount, full_size): new_length = subprocess.check_output( [XDP_FILTER, name, address, "--mode", "dst", "--status"]) if new_length == last_length: return address last_length = new_length return None def test_ip_arrive(self): missing = self.get_invalid_address("ip", ".", "d", 8, 4) if missing is None: return to_send = self.generate_default_packets(dst_inet=missing) res = self.send_packets(to_send) self.not_arrived(to_send, res) def test_ether_arrive(self): missing = self.get_invalid_address("ether", ":", "02x", 8, 6) if missing is None: return to_send = self.generate_default_packets(dst_ether=missing) res = self.send_packets(to_send) self.not_arrived(to_send, res) def test_port_arrive(self): missing = self.get_invalid_address("port", "", "d", 16, 1) if missing is None: return to_send = self.generate_default_packets(dst_port=missing) res = self.send_packets(to_send) self.not_arrived(to_send, res) def test_ip_status(self): self.filter_addresses("ip", ".", "d", 8, 4) def test_port_status(self): self.filter_addresses("port", "", "d", 16, 1) def test_ether_status(self): self.filter_addresses("ether", ":", "02x", 8, 6) class ManyAddressesInverted(ManyAddresses): def setUp(self): subprocess.run([ XDP_FILTER, "load", "--policy", "deny", self.get_contexts().get_local_main().iface, "--mode", get_mode_string( self.get_contexts().get_local_main().xdp_mode ) ]) arrived = Base.not_arrived not_arrived = Base.arrived xdp-tools-1.5.4/xdp-filter/tests/test-xdp-filter.sh0000644000175100001660000002337515003640462021636 0ustar runnerdockerXDP_LOADER=${XDP_LOADER:-./xdp-loader} XDP_FILTER=${XDP_FILTER:-./xdp-filter} ALL_TESTS="test_load test_print test_output_remove test_ports_allow test_ports_deny test_ipv6_allow test_ipv6_deny test_ipv4_allow test_ipv4_deny test_ether_allow test_ether_deny test_python_basic test_python_slow" try_feat() { local output feat=$1 prog=$2 shift 2 output=$($XDP_FILTER load $NS --features $feat "$@" -v 2>&1) ret=$? if [ "$ret" -ne "0" ]; then return $ret fi echo "$output" regex="Found prog '$prog'" if ! [[ $output =~ $regex ]]; then echo echo "Couldn't find '$regex' in output for feat $feat" >&2 return 1 fi check_run $XDP_FILTER unload $NS -v } test_load() { declare -a FEATS=(tcp udp ipv4 ipv6 ethernet all) declare -a PROGS_D=(xdpfilt_dny_tcp.o xdpfilt_dny_udp.o xdpfilt_dny_ip.o xdpfilt_dny_ip.o xdpfilt_dny_eth.o xdpfilt_dny_all.o) declare -a PROGS_A=(xdpfilt_alw_tcp.o xdpfilt_alw_udp.o xdpfilt_alw_ip.o xdpfilt_alw_ip.o xdpfilt_alw_eth.o xdpfilt_alw_all.o) local len=${#FEATS[@]} for (( i=0; i<$len; i++ )); do if ! try_feat ${FEATS[$i]} ${PROGS_A[$i]}; then return 1 fi if ! try_feat ${FEATS[$i]} ${PROGS_A[$i]} --mode skb; then return 1 fi if ! try_feat ${FEATS[$i]} ${PROGS_D[$i]} --policy deny; then return 1 fi if ! try_feat ${FEATS[$i]} ${PROGS_D[$i]} --policy deny --mode skb; then return 1 fi done if [ -d /sys/fs/bpf/xdp-filter ]; then die "/sys/fs/bpf/xdp-filter still exists!" fi } check_packet() { local filter="$1" local command="$2" local expect="$3" echo "Checking command '$command' filter '$filter'" PID=$(start_background tcpdump --immediate-mode -epni $NS "$filter") echo "Started listener as $PID" ns_exec bash -c "$command" sleep 1 output=$(stop_background $PID) echo "$output" if [[ "$expect" == "OK" ]]; then regex="[1-9] packets? captured" else regex="0 packets captured" fi if [[ "$output" =~ $regex ]]; then echo "Packet check $expect SUCCESS" return 0 else echo "Packet check $expect FAILURE" exit 1 fi } check_port() { local type=$1 local port=$2 local expect=$3 echo "$type port $port $expect" [[ "$type" == "tcp" ]] && command="echo test | socat - TCP6:[$OUTSIDE_IP6]:$port,connect-timeout=1" [[ "$type" == "udp" ]] && command="echo test | socat - UDP6:[$OUTSIDE_IP6]:$port" check_packet "$type dst port $port" "$command" $expect } test_ports_allow() { local TEST_PORT=10000 # default allow mode check_run $XDP_FILTER load -f udp,tcp $NS -v check_port tcp $TEST_PORT OK check_port udp $TEST_PORT OK check_run $XDP_FILTER port $TEST_PORT -v check_port tcp $TEST_PORT NOTOK check_port tcp $[TEST_PORT+1] OK check_port udp $TEST_PORT NOTOK check_port udp $[TEST_PORT+1] OK check_run $XDP_FILTER port -r $TEST_PORT -v check_port tcp $TEST_PORT OK check_port udp $TEST_PORT OK check_run $XDP_FILTER unload $NS -v } test_ports_deny() { local TEST_PORT=10000 # default deny mode check_run $XDP_FILTER load -p deny -f udp,tcp $NS -v check_port tcp $TEST_PORT NOTOK check_port udp $TEST_PORT NOTOK check_run $XDP_FILTER port $TEST_PORT -v check_port tcp $TEST_PORT OK check_port tcp $[TEST_PORT+1] NOTOK check_port udp $TEST_PORT OK check_port udp $[TEST_PORT+1] NOTOK check_run $XDP_FILTER port -r $TEST_PORT -v check_port tcp $TEST_PORT NOTOK check_port udp $TEST_PORT NOTOK check_run $XDP_FILTER unload $NS -v } check_ping6() { check_packet "dst $OUTSIDE_IP6" "$PING6 -c 1 $OUTSIDE_IP6" $1 } test_ipv6_allow() { check_ping6 OK check_run $XDP_FILTER load -f ipv6 $NS -v check_run $XDP_FILTER ip $OUTSIDE_IP6 check_ping6 NOTOK check_run $XDP_FILTER ip -r $OUTSIDE_IP6 check_ping6 OK check_run $XDP_FILTER ip -m src $INSIDE_IP6 check_ping6 NOTOK check_run $XDP_FILTER ip -m src -r $INSIDE_IP6 check_ping6 OK check_run $XDP_FILTER unload $NS -v } test_ipv6_deny() { check_ping6 OK check_run $XDP_FILTER load -p deny -f ipv6 $NS -v check_run $XDP_FILTER ip $OUTSIDE_IP6 check_ping6 OK check_run $XDP_FILTER ip -r $OUTSIDE_IP6 check_ping6 NOTOK check_run $XDP_FILTER ip -m src $INSIDE_IP6 check_ping6 OK check_run $XDP_FILTER ip -m src -r $INSIDE_IP6 check_ping6 NOTOK check_run $XDP_FILTER unload $NS -v } check_ping4() { check_packet "dst $OUTSIDE_IP4" "ping -c 1 $OUTSIDE_IP4" $1 } test_ipv4_allow() { check_ping4 OK check_run $XDP_FILTER load -f ipv4 $NS -v check_run $XDP_FILTER ip $OUTSIDE_IP4 check_ping4 NOTOK check_run $XDP_FILTER ip -r $OUTSIDE_IP4 check_ping4 OK check_run $XDP_FILTER ip -m src $INSIDE_IP4 check_ping4 NOTOK check_run $XDP_FILTER ip -m src -r $INSIDE_IP4 check_ping4 OK check_run $XDP_FILTER unload $NS -v } test_ipv4_deny() { check_ping4 OK check_run $XDP_FILTER load -p deny -f ipv4 $NS -v check_run $XDP_FILTER ip $OUTSIDE_IP4 check_ping4 OK check_run $XDP_FILTER ip -r $OUTSIDE_IP4 check_ping4 NOTOK check_run $XDP_FILTER ip -m src $INSIDE_IP4 check_ping4 OK check_run $XDP_FILTER ip -m src -r $INSIDE_IP4 check_ping4 NOTOK check_run $XDP_FILTER unload $NS -v } test_ether_allow() { check_ping6 OK check_run $XDP_FILTER load -f ethernet $NS -v check_run $XDP_FILTER ether $OUTSIDE_MAC check_ping6 NOTOK check_run $XDP_FILTER ether -r $OUTSIDE_MAC check_ping6 OK check_run $XDP_FILTER ether -m src $INSIDE_MAC check_ping6 NOTOK check_run $XDP_FILTER ether -m src -r $INSIDE_MAC check_ping6 OK check_run $XDP_FILTER unload $NS -v } test_ether_deny() { check_ping6 OK check_run $XDP_FILTER load -p deny -f ethernet $NS -v check_run $XDP_FILTER ether $OUTSIDE_MAC check_ping6 OK check_run $XDP_FILTER ether -r $OUTSIDE_MAC check_ping6 NOTOK check_run $XDP_FILTER ether -m src $INSIDE_MAC check_ping6 OK check_run $XDP_FILTER ether -m src -r $INSIDE_MAC check_ping6 NOTOK check_run $XDP_FILTER unload $NS -v } check_status() { local match local output match="$1" output=$($XDP_FILTER status) if echo "$output" | grep -q $match; then echo "Output check for $match SUCCESS" return 0 else echo "Output check for $match FAILURE" echo "Output: $output" exit 1 fi } check_status_no_match() { local match local output match="$1" output=$($XDP_FILTER status) if echo "$output" | grep -q $match; then echo "Output check for no $match FAILURE" echo "Output: $output" exit 1 else echo "Output check for no $match SUCCESS" return 0 fi } test_print() { check_run $XDP_FILTER load $NS -v check_run $XDP_FILTER ether aa:bb:cc:dd:ee:ff check_status "aa:bb:cc:dd:ee:ff" check_run $XDP_FILTER ip 1.2.3.4 check_status "1.2.3.4" check_run $XDP_FILTER ip aa::bb check_status "aa::bb" check_run $XDP_FILTER port 100 check_status "100.*dst,tcp,udp" check_run $XDP_FILTER unload $NS -v } check_port_removal_from_all() { local command_options=$1 local expected_output=$2 local TEST_PORT=54321 check_run $XDP_FILTER port $TEST_PORT -p tcp,udp -m src,dst check_status "$TEST_PORT.*src,dst,tcp,udp" check_run $XDP_FILTER port $TEST_PORT $command_options -r if [[ -z "$expected_output" ]]; then check_status_no_match "$TEST_PORT" else check_status "$TEST_PORT.*$expected_output" fi } test_output_remove() { check_run $XDP_FILTER load $NS -v # Remove only one mode/proto. check_port_removal_from_all "-m src" "dst,tcp,udp" check_port_removal_from_all "-m dst" "src,tcp,udp" check_port_removal_from_all "-p udp" "src,dst,tcp" check_port_removal_from_all "-p tcp" "src,dst,udp" # Remove one from each. check_port_removal_from_all "-m src -p udp" "dst,tcp" check_port_removal_from_all "-m src -p tcp" "dst,udp" check_port_removal_from_all "-m dst -p udp" "src,tcp" check_port_removal_from_all "-m dst -p tcp" "src,udp" # Remove everything. check_port_removal_from_all "" "" check_port_removal_from_all "-m src,dst" "" check_port_removal_from_all "-p tcp,udp" "" check_port_removal_from_all "-m src,dst -p tcp,udp" "" check_run $XDP_FILTER unload $NS -v } get_python() { if [[ -z "${PYTHON:-}" ]]; then local -a possible=(python3 python) local -a available local found=0 for i in "${possible[@]}"; do PYTHON=$(which $i) if [[ $? -eq 0 ]]; then found=1 break fi done if [[ found -eq 0 ]]; then return 1 fi fi $PYTHON -c "import xdp_test_harness" &> /dev/null if [[ $? -ne 0 ]]; then # Libraries are not installed. return 1 fi echo "$PYTHON" } run_python_test() { local module="$1" local module_path local python module_path="$(realpath --relative-to=. "$TOOL_TESTS_DIR" | sed "s/\//./g")" if [[ $? -ne 0 ]] || [[ $module_path == "." ]]; then return "$SKIPPED_TEST" fi python="$(get_python)" if [[ $? -ne 0 ]]; then return "$SKIPPED_TEST" fi $python -m xdp_test_harness.runner client "$module_path"."$module" if [[ $? -ne 0 ]]; then return 1 fi return 0 } test_python_basic() { run_python_test test_basic } test_python_slow() { run_python_test test_slow } cleanup_tests() { $XDP_FILTER unload $NS >/dev/null 2>&1 $XDP_LOADER unload $NS --all >/dev/null 2>&1 } xdp-tools-1.5.4/xdp-filter/xdpfilt_alw_eth.c0000644000175100001660000000035415003640462020416 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_ALLOW #define FILT_MODE_ETHERNET #undef FILT_MODE_IPV4 #undef FILT_MODE_IPV6 #undef FILT_MODE_UDP #undef FILT_MODE_TCP #define FUNCNAME xdpfilt_alw_eth #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/xdpfilt_dny_ip.c0000644000175100001660000000035315003640462020254 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_DENY #undef FILT_MODE_ETHERNET #define FILT_MODE_IPV4 #define FILT_MODE_IPV6 #undef FILT_MODE_UDP #undef FILT_MODE_TCP #define FUNCNAME xdpfilt_dny_ip #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/xdpfilt_alw_all.c0000644000175100001660000000036015003640462020403 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_ALLOW #define FILT_MODE_ETHERNET #define FILT_MODE_IPV4 #define FILT_MODE_IPV6 #define FILT_MODE_UDP #define FILT_MODE_TCP #define FUNCNAME xdpfilt_alw_all #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/xdpfilt_prog.h0000644000175100001660000001503415003640462017750 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ /* XDP filter program fragment. This header file contains the full-featured * program, split up with ifdefs. The actual program files xdpfilt_*.c * include this file with different #defines to create the * different eBPF program sections that include only the needed features. */ #ifndef __XDPFILT_PROG_H #define __XDPFILT_PROG_H #include #include #include #include #include "common_kern_user.h" /* Defines xdp_stats_map */ #include "xdp/xdp_stats_kern.h" #include "xdp/parsing_helpers.h" #ifdef FILT_MODE_DENY #define VERDICT_HIT XDP_PASS #define VERDICT_MISS XDP_DROP #define FEATURE_OPMODE FEAT_DENY #else #define VERDICT_HIT XDP_DROP #define VERDICT_MISS XDP_PASS #define FEATURE_OPMODE FEAT_ALLOW #endif #define CHECK_RET(ret) \ do { \ if ((ret) < 0) { \ action = XDP_ABORTED; \ goto out; \ } \ } while (0) #define CHECK_VERDICT(type, param) \ do { \ if ((action = lookup_verdict_##type(param)) != VERDICT_MISS) \ goto out; \ } while (0) #define CHECK_MAP(map, key, mask) \ do { \ __u64 *value; \ value = bpf_map_lookup_elem(map, key); \ if ((value) && (*(value) & (mask)) == (mask)) { \ *value += (1 << COUNTER_SHIFT); \ return VERDICT_HIT; \ } \ } while (0) #if defined(FILT_MODE_TCP) || defined(FILT_MODE_UDP) struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, 65536); __type(key, __u32); __type(value, __u64); __uint(pinning, LIBBPF_PIN_BY_NAME); } MAP_NAME_PORTS SEC(".maps"); #ifdef FILT_MODE_TCP static int __always_inline lookup_verdict_tcp(struct tcphdr *tcphdr) { __u32 key; key = tcphdr->dest; CHECK_MAP(&filter_ports, &key, MAP_FLAG_DST | MAP_FLAG_TCP); key = tcphdr->source; CHECK_MAP(&filter_ports, &key, MAP_FLAG_SRC | MAP_FLAG_TCP); return VERDICT_MISS; } #define FEATURE_TCP FEAT_TCP #else #define FEATURE_TCP 0 #endif #ifdef FILT_MODE_UDP static int __always_inline lookup_verdict_udp(struct udphdr *udphdr) { __u32 key; key = udphdr->dest; CHECK_MAP(&filter_ports, &key, MAP_FLAG_DST | MAP_FLAG_UDP); key = udphdr->source; CHECK_MAP(&filter_ports, &key, MAP_FLAG_SRC | MAP_FLAG_UDP); return VERDICT_MISS; } #define FEATURE_UDP FEAT_UDP #else #define FEATURE_UDP 0 #endif #else #define FEATURE_UDP 0 #define FEATURE_TCP 0 #endif /* TCP || UDP */ #ifdef FILT_MODE_IPV4 struct { __uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(max_entries, 10000); __type(key, __u32); __type(value, __u64); __uint(pinning, LIBBPF_PIN_BY_NAME); } MAP_NAME_IPV4 SEC(".maps"); static int __always_inline lookup_verdict_ipv4(struct iphdr *iphdr) { __u32 addr; addr = iphdr->daddr; CHECK_MAP(&filter_ipv4, &addr, MAP_FLAG_DST); addr = iphdr->saddr; CHECK_MAP(&filter_ipv4, &addr, MAP_FLAG_SRC); return VERDICT_MISS; } #define CHECK_VERDICT_IPV4(param) CHECK_VERDICT(ipv4, param) #define FEATURE_IPV4 FEAT_IPV4 #else #define FEATURE_IPV4 0 #define CHECK_VERDICT_IPV4(param) #endif /* FILT_MODE_IPV4 */ #ifdef FILT_MODE_IPV6 struct { __uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(max_entries, 10000); __type(key, struct in6_addr); __type(value, __u64); __uint(pinning, LIBBPF_PIN_BY_NAME); } MAP_NAME_IPV6 SEC(".maps"); static int __always_inline lookup_verdict_ipv6(struct ipv6hdr *ipv6hdr) { struct in6_addr addr; addr = ipv6hdr->daddr; CHECK_MAP(&filter_ipv6, &addr, MAP_FLAG_DST); addr = ipv6hdr->saddr; CHECK_MAP(&filter_ipv6, &addr, MAP_FLAG_SRC); return VERDICT_MISS; } #define CHECK_VERDICT_IPV6(param) CHECK_VERDICT(ipv6, param) #define FEATURE_IPV6 FEAT_IPV6 #else #define FEATURE_IPV6 0 #define CHECK_VERDICT_IPV6(param) #endif /* FILT_MODE_IPV6 */ #ifdef FILT_MODE_ETHERNET struct ethaddr { __u8 addr[ETH_ALEN]; }; struct { __uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(max_entries, 10000); __type(key, struct ethaddr); __type(value, __u64); __uint(pinning, LIBBPF_PIN_BY_NAME); } MAP_NAME_ETHERNET SEC(".maps"); static int __always_inline lookup_verdict_ethernet(struct ethhdr *eth) { struct ethaddr addr = {}; __builtin_memcpy(&addr, eth->h_dest, sizeof(addr)); CHECK_MAP(&filter_ethernet, &addr, MAP_FLAG_DST); __builtin_memcpy(&addr, eth->h_source, sizeof(addr)); CHECK_MAP(&filter_ethernet, &addr, MAP_FLAG_SRC); return VERDICT_MISS; } #define CHECK_VERDICT_ETHERNET(param) CHECK_VERDICT(ethernet, param) #define FEATURE_ETHERNET FEAT_ETHERNET #else #define FEATURE_ETHERNET 0 #define CHECK_VERDICT_ETHERNET(param) #endif /* FILT_MODE_ETHERNET */ #ifndef FUNCNAME #define FUNCNAME xdp_filt_unknown #endif struct { __uint(priority, 10); __uint(XDP_PASS, 1); } XDP_RUN_CONFIG(FUNCNAME); SEC("xdp") int FUNCNAME(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 action = VERDICT_MISS; /* Default action */ struct hdr_cursor nh; struct ethhdr *eth; int eth_type; nh.pos = data; eth_type = parse_ethhdr(&nh, data_end, ð); CHECK_RET(eth_type); CHECK_VERDICT_ETHERNET(eth); #if defined(FILT_MODE_IPV4) || defined(FILT_MODE_IPV6) || \ defined(FILT_MODE_TCP) || defined(FILT_MODE_UDP) struct iphdr *iphdr; struct ipv6hdr *ipv6hdr; int ip_type; if (eth_type == bpf_htons(ETH_P_IP)) { ip_type = parse_iphdr(&nh, data_end, &iphdr); CHECK_RET(ip_type); CHECK_VERDICT_IPV4(iphdr); } else if (eth_type == bpf_htons(ETH_P_IPV6)) { ip_type = parse_ip6hdr(&nh, data_end, &ipv6hdr); CHECK_RET(ip_type); CHECK_VERDICT_IPV6(ipv6hdr); } else { goto out; } #ifdef FILT_MODE_UDP struct udphdr *udphdr; if (ip_type == IPPROTO_UDP) { CHECK_RET(parse_udphdr(&nh, data_end, &udphdr)); CHECK_VERDICT(udp, udphdr); } #endif /* FILT_MODE_UDP */ #ifdef FILT_MODE_TCP struct tcphdr *tcphdr; if (ip_type == IPPROTO_TCP) { CHECK_RET(parse_tcphdr(&nh, data_end, &tcphdr)); CHECK_VERDICT(tcp, tcphdr); } #endif /* FILT_MODE_TCP*/ #endif /* FILT_MODE_{IPV4,IPV6,TCP,UDP} */ out: return xdp_stats_record_action(ctx, action); } char _license[] SEC("license") = "GPL"; __u32 _features SEC("features") = (FEATURE_ETHERNET | FEATURE_IPV4 | FEATURE_IPV6 | FEATURE_UDP | FEATURE_TCP | FEATURE_OPMODE); #else #error "Multiple includes of xdpfilt_prog.h" #endif // include guard xdp-tools-1.5.4/xdp-filter/xdpfilt_alw_udp.c0000644000175100001660000000035415003640462020426 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_ALLOW #undef FILT_MODE_ETHERNET #undef FILT_MODE_IPV4 #undef FILT_MODE_IPV6 #define FILT_MODE_UDP #undef FILT_MODE_TCP #define FUNCNAME xdpfilt_alw_udp #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/xdpfilt_dny_all.c0000644000175100001660000000035715003640462020420 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_DENY #define FILT_MODE_ETHERNET #define FILT_MODE_IPV4 #define FILT_MODE_IPV6 #define FILT_MODE_UDP #define FILT_MODE_TCP #define FUNCNAME xdpfilt_dny_all #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/common_kern_user.h0000644000175100001660000000130315003640462020606 0ustar runnerdocker#ifndef COMMON_KERN_USER_H #define COMMON_KERN_USER_H #define FEAT_TCP (1<<0) #define FEAT_UDP (1<<1) #define FEAT_IPV6 (1<<2) #define FEAT_IPV4 (1<<3) #define FEAT_ETHERNET (1<<4) #define FEAT_ALL (FEAT_TCP|FEAT_UDP|FEAT_IPV6|FEAT_IPV4|FEAT_ETHERNET) #define FEAT_ALLOW (1<<5) #define FEAT_DENY (1<<6) #define MAP_FLAG_SRC (1<<0) #define MAP_FLAG_DST (1<<1) #define MAP_FLAG_TCP (1<<2) #define MAP_FLAG_UDP (1<<3) #define MAP_FLAGS (MAP_FLAG_SRC|MAP_FLAG_DST|MAP_FLAG_TCP|MAP_FLAG_UDP) #define COUNTER_SHIFT 6 #define MAP_NAME_PORTS filter_ports #define MAP_NAME_IPV4 filter_ipv4 #define MAP_NAME_IPV6 filter_ipv6 #define MAP_NAME_ETHERNET filter_ethernet #include "xdp/xdp_stats_kern_user.h" #endif xdp-tools-1.5.4/xdp-filter/xdpfilt_alw_tcp.c0000644000175100001660000000035415003640462020424 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_ALLOW #undef FILT_MODE_ETHERNET #undef FILT_MODE_IPV4 #undef FILT_MODE_IPV6 #undef FILT_MODE_UDP #define FILT_MODE_TCP #define FUNCNAME xdpfilt_alw_tcp #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/xdpfilt_dny_tcp.c0000644000175100001660000000035315003640462020432 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define FILT_MODE_DENY #undef FILT_MODE_ETHERNET #undef FILT_MODE_IPV4 #undef FILT_MODE_IPV6 #undef FILT_MODE_UDP #define FILT_MODE_TCP #define FUNCNAME xdpfilt_dny_tcp #include "xdpfilt_prog.h" xdp-tools-1.5.4/xdp-filter/.gitignore0000644000175100001660000000004015003640462017055 0ustar runnerdocker*.ll xdp-filter prog_features.h xdp-tools-1.5.4/xdp-filter/extract_features.sh0000644000175100001660000000111515003640462020775 0ustar runnerdocker#!/bin/sh cat</dev/null) [ "$?" -ne "0" ] && continue found=0 for w in $featstring; do if [ "$w" = "0x00000000" ]; then found=1 else if [ "$found" -eq "1" ]; then feats=$w break fi fi done echo " {\"$f\", 0x$feats}," done cat< $@ || ( ret=$$?; rm -f $@; exit $$ret ) xdp-tools-1.5.4/xdp-monitor/0000755000175100001660000000000015003640462015275 5ustar runnerdockerxdp-tools-1.5.4/xdp-monitor/xdp-monitor.c0000644000175100001660000000620615003640462017725 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */ static const char *__doc__= "XDP monitor tool, based on tracepoints\n"; static const char *__doc_err_only__= "NOTICE: Only tracking XDP redirect errors\n" " Enable redirect success stats via '-s/--stats'\n" " (which comes with a per packet processing overhead)\n"; #define PROG_NAME "xdp-monitor" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xdp_monitor.skel.h" #include "params.h" #include "util.h" #include "logging.h" static int mask = SAMPLE_REDIRECT_ERR_CNT | SAMPLE_CPUMAP_ENQUEUE_CNT | SAMPLE_CPUMAP_KTHREAD_CNT | SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI; DEFINE_SAMPLE_INIT(xdp_monitor); static const struct monitoropt { bool stats; bool extended; __u32 interval; } defaults_monitoropt = { .stats = false, .interval = 2 }; static struct prog_option xdpmonitor_options[] = { DEFINE_OPTION("interval", OPT_U32, struct monitoropt, interval, .short_opt = 'i', .metavar = "", .help = "Polling interval (default 2)"), DEFINE_OPTION("stats", OPT_BOOL, struct monitoropt, stats, .short_opt = 's', .help = "Enable statistics for transmitted packets (not just errors)"), DEFINE_OPTION("extended", OPT_BOOL, struct monitoropt, extended, .short_opt = 'e', .help = "Start running in extended output mode (C^\\ to toggle)"), END_OPTIONS }; int main(int argc, char **argv) { int ret = EXIT_FAIL_OPTION; struct monitoropt cfg = {}; struct xdp_monitor *skel; if (parse_cmdline_args(argc, argv, xdpmonitor_options, &cfg, sizeof(cfg), PROG_NAME, PROG_NAME, __doc__, &defaults_monitoropt) != 0) return ret; /* If all the options are parsed ok, make sure we are root! */ if (check_bpf_environ()) return ret; skel = xdp_monitor__open(); if (!skel) { pr_warn("Failed to xdp_monitor__open: %s\n", strerror(errno)); return EXIT_FAIL_BPF; } ret = sample_init_pre_load(skel, NULL); if (ret < 0) { pr_warn("Failed to sample_init_pre_load: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } ret = xdp_monitor__load(skel); if (ret < 0) { pr_warn("Failed to xdp_monitor__load: %s\n", strerror(errno)); ret = EXIT_FAIL_BPF; goto end_destroy; } if (cfg.stats) mask |= SAMPLE_REDIRECT_CNT; else printf("%s", __doc_err_only__); if (cfg.extended) sample_switch_mode(); ret = sample_init(skel, mask, 0, 0); if (ret < 0) { pr_warn("Failed to initialize sample: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } ret = sample_run(cfg.interval, NULL, NULL); if (ret < 0) { pr_warn("Failed during sample run: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_destroy; } ret = EXIT_OK; end_destroy: xdp_monitor__destroy(skel); sample_teardown(); return ret; } xdp-tools-1.5.4/xdp-monitor/README.org0000644000175100001660000001444015003640462016746 0ustar runnerdocker#+EXPORT_FILE_NAME: xdp-monitor #+TITLE: xdp-monitor #+MAN_CLASS_OPTIONS: :section-id "8\" \"DATE\" \"VERSION\" \"A simple XDP monitoring tool" # This file serves both as a README on github, and as the source for the man # page; the latter through the org-mode man page export support. # . # To export the man page, simply use the org-mode exporter; (require 'ox-man) if # it's not available. There's also a Makefile rule to export it. * XDP-monitor - a simple BPF-powered XDP monitoring tool XDP-monitor is a tool that monitors various XDP related statistics and events using BPF tracepoints infrastructure, trying to be as low overhead as possible. Note that by default, statistics for successful XDP redirect events is disabled, as that leads to a per-packet BPF tracing overhead, which while being low overhead, can lead to packet processing degradation. This tool relies on the BPF raw tracepoints infrastructure in the kernel. There is more information on the meaning of the output in both default (terse) and verbose output mode, in the =Output Format Description= section. ** Running xdp-monitor The syntax for running xdp-monitor is: #+begin_src sh xdp-monitor [options] #+end_src The supported options are: ** -i, --interval Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. ** -s, --stats Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. ** -e, --extended Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-\ while the program is running. See also the *Output Format Description* section below. ** -v, --verbose Enable verbose logging. Supply twice to enable verbose logging from the underlying =libxdp= and =libbpf= libraries. ** --version Show the application version and exit. ** -h, --help Display a summary of the available options * Output Format Description By default, redirect success statistics are disabled, use =--stats= to enable. The terse output mode is default, extended output mode can be activated using the =--extended= command line option. SIGQUIT (Ctrl + \\) can be used to switch the mode dynamically at runtime. Terse mode displays at most the following fields: #+begin_src sh rx/s Number of packets received per second redir/s Number of packets successfully redirected per second err,drop/s Aggregated count of errors per second (including dropped packets) xmit/s Number of packets transmitted on the output device per second #+end_src Verbose output mode displays at most the following fields: #+begin_src sh FIELD DESCRIPTION receive Displays the number of packets received and errors encountered Whenever an error or packet drop occurs, details of per CPU error and drop statistics will be expanded inline in terse mode. pkt/s - Packets received per second drop/s - Packets dropped per second error/s - Errors encountered per second redirect - Displays the number of packets successfully redirected Errors encountered are expanded under redirect_err field Note that passing -s to enable it has a per packet overhead redir/s - Packets redirected successfully per second redirect_err Displays the number of packets that failed redirection The errno is expanded under this field with per CPU count The recognized errors are: EINVAL: Invalid redirection ENETDOWN: Device being redirected to is down EMSGSIZE: Packet length too large for device EOPNOTSUPP: Operation not supported ENOSPC: No space in ptr_ring of cpumap kthread error/s - Packets that failed redirection per second enqueue to cpu N Displays the number of packets enqueued to bulk queue of CPU N Expands to cpu:FROM->N to display enqueue stats for each CPU enqueuing to CPU N Received packets can be associated with the CPU redirect program is enqueuing packets to. pkt/s - Packets enqueued per second from other CPU to CPU N drop/s - Packets dropped when trying to enqueue to CPU N bulk-avg - Average number of packets processed for each event kthread Displays the number of packets processed in CPUMAP kthread for each CPU Packets consumed from ptr_ring in kthread, and its xdp_stats (after calling CPUMAP bpf prog) are expanded below this. xdp_stats are expanded as a total and then per-CPU to associate it to each CPU's pinned CPUMAP kthread. pkt/s - Packets consumed per second from ptr_ring drop/s - Packets dropped per second in kthread sched - Number of times kthread called schedule() xdp_stats (also expands to per-CPU counts) pass/s - XDP_PASS count for CPUMAP program execution drop/s - XDP_DROP count for CPUMAP program execution redir/s - XDP_REDIRECT count for CPUMAP program execution xdp_exception Displays xdp_exception tracepoint events This can occur due to internal driver errors, unrecognized XDP actions and due to explicit user trigger by use of XDP_ABORTED Each action is expanded below this field with its count hit/s - Number of times the tracepoint was hit per second devmap_xmit Displays devmap_xmit tracepoint events This tracepoint is invoked for successful transmissions on output device but these statistics are not available for generic XDP mode, hence they will be omitted from the output when using SKB mode xmit/s - Number of packets that were transmitted per second drop/s - Number of packets that failed transmissions per second drv_err/s - Number of internal driver errors per second bulk-avg - Average number of packets processed for each event #+end_src * BUGS Please report any bugs on Github: https://github.com/xdp-project/xdp-tools/issues * AUTHOR The original xdp-monitor tool was written by Jesper Dangaard Brouer. It was then rewritten to support more features by Kumar Kartikeya Dwivedi. This man page was written by Kumar Kartikeya Dwivedi. xdp-tools-1.5.4/xdp-monitor/xdp_monitor.bpf.c0000644000175100001660000000040515003640462020550 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc. * * XDP monitor tool, based on tracepoints */ #include #include char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-monitor/tests/0000755000175100001660000000000015003640462016437 5ustar runnerdockerxdp-tools-1.5.4/xdp-monitor/tests/test-xdp-monitor.sh0000644000175100001660000000041015003640462022223 0ustar runnerdockerXDP_LOADER=${XDP_LOADER:-./xdp-loader} XDP_MONITOR=${XDP_MONITOR:-./xdp-monitor} ALL_TESTS="test_monitor" test_monitor() { export XDP_SAMPLE_IMMEDIATE_EXIT=1 check_run $XDP_MONITOR -vv check_run $XDP_MONITOR -s -vv check_run $XDP_MONITOR -e -vv } xdp-tools-1.5.4/xdp-monitor/xdp-monitor.80000644000175100001660000001600515003640462017650 0ustar runnerdocker.TH "xdp-monitor" "8" "DECEMBER 12, 2022" "V1.5.4" "A simple XDP monitoring tool" .SH "NAME" XDP-monitor \- a simple BPF-powered XDP monitoring tool .SH "SYNOPSIS" .PP XDP-monitor is a tool that monitors various XDP related statistics and events using BPF tracepoints infrastructure, trying to be as low overhead as possible. .PP Note that by default, statistics for successful XDP redirect events is disabled, as that leads to a per-packet BPF tracing overhead, which while being low overhead, can lead to packet processing degradation. .PP This tool relies on the BPF raw tracepoints infrastructure in the kernel. .PP There is more information on the meaning of the output in both default (terse) and verbose output mode, in the \fIOutput Format Description\fP section. .SS "Running xdp-monitor" .PP The syntax for running xdp-monitor is: .RS .nf \fCxdp-monitor [options] \fP .fi .RE .PP The supported options are: .SS "-i, --interval " .PP Set the polling interval for collecting all statistics and displaying them to the output. The unit of interval is in seconds. .SS "-s, --stats" .PP Enable statistics for successful redirection. This option comes with a per packet tracing overhead, for recording all successful redirections. .SS "-e, --extended" .PP Start xdp-bench in "extended" output mode. If not set, xdp-bench will start in "terse" mode. The output mode can be switched by hitting C-$\ while the program is running. See also the \fBOutput Format Description\fP section below. .SS "-v, --verbose" .PP Enable verbose logging. Supply twice to enable verbose logging from the underlying \fIlibxdp\fP and \fIlibbpf\fP libraries. .SS "--version" .PP Show the application version and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "Output Format Description" .PP By default, redirect success statistics are disabled, use \fI\-\-stats\fP to enable. The terse output mode is default, extended output mode can be activated using the \fI\-\-extended\fP command line option. .PP SIGQUIT (Ctrl + \\) can be used to switch the mode dynamically at runtime. .PP Terse mode displays at most the following fields: .RS .nf \fCrx/s Number of packets received per second redir/s Number of packets successfully redirected per second err,drop/s Aggregated count of errors per second (including dropped packets) xmit/s Number of packets transmitted on the output device per second \fP .fi .RE .PP Verbose output mode displays at most the following fields: .RS .nf \fCFIELD DESCRIPTION receive Displays the number of packets received and errors encountered Whenever an error or packet drop occurs, details of per CPU error and drop statistics will be expanded inline in terse mode. pkt/s - Packets received per second drop/s - Packets dropped per second error/s - Errors encountered per second redirect - Displays the number of packets successfully redirected Errors encountered are expanded under redirect_err field Note that passing -s to enable it has a per packet overhead redir/s - Packets redirected successfully per second redirect_err Displays the number of packets that failed redirection The errno is expanded under this field with per CPU count The recognized errors are: EINVAL: Invalid redirection ENETDOWN: Device being redirected to is down EMSGSIZE: Packet length too large for device EOPNOTSUPP: Operation not supported ENOSPC: No space in ptr_ring of cpumap kthread error/s - Packets that failed redirection per second enqueue to cpu N Displays the number of packets enqueued to bulk queue of CPU N Expands to cpu:FROM->N to display enqueue stats for each CPU enqueuing to CPU N Received packets can be associated with the CPU redirect program is enqueuing packets to. pkt/s - Packets enqueued per second from other CPU to CPU N drop/s - Packets dropped when trying to enqueue to CPU N bulk-avg - Average number of packets processed for each event kthread Displays the number of packets processed in CPUMAP kthread for each CPU Packets consumed from ptr_ring in kthread, and its xdp_stats (after calling CPUMAP bpf prog) are expanded below this. xdp_stats are expanded as a total and then per-CPU to associate it to each CPU's pinned CPUMAP kthread. pkt/s - Packets consumed per second from ptr_ring drop/s - Packets dropped per second in kthread sched - Number of times kthread called schedule() xdp_stats (also expands to per-CPU counts) pass/s - XDP_PASS count for CPUMAP program execution drop/s - XDP_DROP count for CPUMAP program execution redir/s - XDP_REDIRECT count for CPUMAP program execution xdp_exception Displays xdp_exception tracepoint events This can occur due to internal driver errors, unrecognized XDP actions and due to explicit user trigger by use of XDP_ABORTED Each action is expanded below this field with its count hit/s - Number of times the tracepoint was hit per second devmap_xmit Displays devmap_xmit tracepoint events This tracepoint is invoked for successful transmissions on output device but these statistics are not available for generic XDP mode, hence they will be omitted from the output when using SKB mode xmit/s - Number of packets that were transmitted per second drop/s - Number of packets that failed transmissions per second drv_err/s - Number of internal driver errors per second bulk-avg - Average number of packets processed for each event \fP .fi .RE .SH "BUGS" .PP Please report any bugs on Github: \fIhttps://github.com/xdp-project/xdp-tools/issues\fP .SH "AUTHOR" .PP The original xdp-monitor tool was written by Jesper Dangaard Brouer. It was then rewritten to support more features by Kumar Kartikeya Dwivedi. This man page was written by Kumar Kartikeya Dwivedi. xdp-tools-1.5.4/xdp-monitor/.gitignore0000644000175100001660000000001415003640462017260 0ustar runnerdockerxdp-monitor xdp-tools-1.5.4/xdp-monitor/Makefile0000644000175100001660000000053715003640462016742 0ustar runnerdocker# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) XDP_TARGETS := xdp_monitor.bpf BPF_SKEL_TARGETS := $(XDP_TARGETS) # Don't install skeleton object files XDP_OBJ_INSTALL := TOOL_NAME := xdp-monitor MAN_PAGE := xdp-monitor.8 TEST_FILE := tests/test-xdp-monitor.sh USER_TARGETS := xdp-monitor LIB_DIR = ../lib include $(LIB_DIR)/common.mk xdp-tools-1.5.4/LICENSES/0000755000175100001660000000000015003640462014222 5ustar runnerdockerxdp-tools-1.5.4/LICENSES/LGPL-2.10000644000175100001660000006542515003640462015215 0ustar runnerdockerValid-License-Identifier: LGPL-2.1 Valid-License-Identifier: LGPL-2.1+ SPDX-URL: https://spdx.org/licenses/LGPL-2.1.html Usage-Guide: To use this license in source code, put one of the following SPDX tag/value pairs into a comment according to the placement guidelines in the licensing rules documentation. For 'GNU Lesser General Public License (LGPL) version 2.1 only' use: SPDX-License-Identifier: LGPL-2.1 For 'GNU Lesser General Public License (LGPL) version 2.1 or any later version' use: SPDX-License-Identifier: LGPL-2.1+ License-Text: GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. one line to give the library's name and an idea of what it does. Copyright (C) year name of author This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. signature of Ty Coon, 1 April 1990 Ty Coon, President of Vice That's all there is to it! xdp-tools-1.5.4/LICENSES/GPL-2.00000644000175100001660000004457215003640462015100 0ustar runnerdockerValid-License-Identifier: GPL-2.0 Valid-License-Identifier: GPL-2.0-only Valid-License-Identifier: GPL-2.0+ Valid-License-Identifier: GPL-2.0-or-later SPDX-URL: https://spdx.org/licenses/GPL-2.0.html Usage-Guide: To use this license in source code, put one of the following SPDX tag/value pairs into a comment according to the placement guidelines in the licensing rules documentation. For 'GNU General Public License (GPL) version 2 only' use: SPDX-License-Identifier: GPL-2.0 or SPDX-License-Identifier: GPL-2.0-only For 'GNU General Public License (GPL) version 2 or any later version' use: SPDX-License-Identifier: GPL-2.0+ or SPDX-License-Identifier: GPL-2.0-or-later License-Text: GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. xdp-tools-1.5.4/LICENSES/BSD-2-Clause0000644000175100001660000000315115003640462016126 0ustar runnerdockerValid-License-Identifier: BSD-2-Clause SPDX-URL: https://spdx.org/licenses/BSD-2-Clause.html Usage-Guide: To use the BSD 2-clause "Simplified" License put the following SPDX tag/value pair into a comment according to the placement guidelines in the licensing rules documentation: SPDX-License-Identifier: BSD-2-Clause License-Text: Copyright (c) . All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xdp-tools-1.5.4/.gitignore0000644000175100001660000000107415003640462015007 0ustar runnerdocker# Prerequisites *.d # Object files *.o *.ko *.obj *.elf # Linker output *.ilk *.exp *.ll # Precompiled Headers *.gch *.pch # Libraries *.lib *.a *.la *.lo # Shared objects (inc. Windows DLLs) *.dll *.so *.so.* *.dylib # Executables *.exe *.out *.app *.i*86 *.x86_64 *.hex # Debug files *.dSYM/ *.su *.idb *.pdb # Kernel Module Compile Results *.mod* *.cmd .tmp_versions/ modules.order Module.symvers Mkfile.old dkms.conf config.mk xdp-dispatcher.c *.man *.rpm /xdp-tools-*.tar.gz .ccls-cache .clangd .cache compile_commands.json # BPF skeleton files *.skel.h xdp-tools-1.5.4/xdp-trafficgen/0000755000175100001660000000000015003640462015716 5ustar runnerdockerxdp-tools-1.5.4/xdp-trafficgen/README.org0000644000175100001660000001321515003640462017366 0ustar runnerdocker#+EXPORT_FILE_NAME: xdp-trafficgen #+TITLE: xdp-trafficgen #+OPTIONS: ^:nil #+MAN_CLASS_OPTIONS: :section-id "8\" \"DATE\" \"VERSION\" \"An XDP-based traffic generator" # This file serves both as a README on github, and as the source for the man # page; the latter through the org-mode man page export support. # . # To export the man page, simply use the org-mode exporter; (require 'ox-man) if # it's not available. There's also a Makefile rule to export it. * XDP-trafficgen - an XDP-based traffic generator XDP-trafficgen is a packet generator utilising the XDP kernel subsystem to generate packets transmit them through a network interface. Packets are dynamically generated and transmitted in the kernel, allowing for high performance (millions of packets per second per core). XDP-trafficgen supports generating UDP traffic with fixed or dynamic destination ports, and also has basic support for generating dummy TCP traffic on a single flow. ** Running xdp-traffigen The syntax for running xdp-trafficgen is: #+begin_src sh Usage: xdp-trafficgen COMMAND [options] COMMAND can be one of: udp - run in UDP mode tcp - run in TCP mode #+end_src Each command, and its options are explained below. Or use =xdp-trafficgen COMMAND --help= to see the options for each command. * The UDP command The UDP command generates UDP traffic to a given destination IP and either a fixed destination port, or a range of port numbers. Only IPv6 traffic is supported, and the generated packets will have their IP hop limit set to 1, so they can't be routed. The syntax for the =udp= command is: =xdp-trafficgen udp [options] = Where == is the name of the destination interface that packets will be transmitted on. Note that the network driver of this network interface must support being the target of XDP redirects (it must implement the =ndo_xdp_xmit= driver operation). The supported options are: ** -m, --dst-mac Set the destination MAC address of generated packets. The default is to generate packets with an all-zero destination MAC. ** -M, --src-mac Set the source MAC address of the generated packets. The default is to use the MAC address of the interface packets are transmitted on. ** -a, --dst-addr Destination IP address of generated packets. The default is the link-local =fe80::2= address. ** -A, --src-addr Source IP address of generated packets. The default is the link-local =fe80::1= address. ** -p, --dst-port Destination UDP port of generated packets, or the first port in the range if running with =--dyn-ports= set. Defaults to 1. ** -P, --src-port Source UDP port of generated packets. Defaults to 1. ** -d, --dyn-ports Enable dynamic port mode where the destination port is varied over a range of == starting from the =--dst-port=. ** -n, --num-packets Number of packets to send before exiting. If not supplied, =xdp-trafficgen= will keep sending packets until interrupted. ** -s, --pkt-size Size of each UDP packet being sent, including the Ethernet header. The minimum size, which is also the default, is 64 bytes. ** -t, --threads Number of simultaneous threads to transmit from. Each thread will be pinned to a separate CPU core if possible. Defaults to 1. ** -I, --interval Output transmission statistics with this interval (in seconds). ** -v, --verbose Enable verbose logging (-vv: more verbose). ** --version Display version information and exit. ** -h, --help Display a summary of the available options * The TCP command The TCP command generates dummy TCP traffic in a single TCP flow. This relies on first installing an ingress XDP program on the interface used to transmit on. Then, a regular TCP socket connection is established from userspace, and once the handshake is completed, the XDP program will take over and start generating traffic on that flow tuple. The ingress XDP program will intercept ACK packets from the receiver, and keep track of the receive window. The traffic generator has no congestion control, and only very basic retransmit tracking: in essence, any duplicate ACKs from the receiver will cause the sender to reset its send sequence number to the last ACKed value and restart from there. The same thing happens if no progress on the window is made within two seconds. This means that the traffic generator can generate a large amount of dummy traffic, but if there's packet loss a lot of this can be retransmissions. The syntax for the =tcp= command is: =xdp-trafficgen tcp [options] -i = Where == is the name of the destination interface that packets will be transmitted on and == is the peer hostname or IP address to connect to (only IPv6 is supported). Note that the network driver of this network interface must support being the target of XDP redirects (it must implement the =ndo_xdp_xmit= driver operation). The supported options are: ** -p, --dst-port Connect to destination . Default 10000. ** -m, --mode Load ingress XDP program in ; default native (valid values: native,skb,hw) ** -n, --num-packets Number of packets to send before exiting. If not supplied, =xdp-trafficgen= will keep sending packets until interrupted. ** -I, --interval Output transmission statistics with this interval (in seconds). ** -v, --verbose Enable verbose logging (-vv: more verbose). ** --version Display version information and exit. ** -h, --help Display a summary of the available options * BUGS Please report any bugs on Github: https://github.com/xdp-project/xdp-tools/issues * AUTHOR xdp-trafficgen and this man page were written by Toke Høiland-Jørgensen. xdp-tools-1.5.4/xdp-trafficgen/tests/0000755000175100001660000000000015003640462017060 5ustar runnerdockerxdp-tools-1.5.4/xdp-trafficgen/tests/test-xdp-trafficgen.sh0000644000175100001660000000150615003640462023274 0ustar runnerdockerXDP_LOADER=${XDP_LOADER:-./xdp-loader} XDP_TRAFFICGEN=${XDP_TRAFFICGEN:-./xdp-trafficgen} ALL_TESTS="test_udp test_tcp" PIDS="" skip_if_missing_kernel_support() { $XDP_TRAFFICGEN probe ret=$? if [ "$ret" -eq "161" ]; then exit $SKIPPED_TEST elif [ "$ret" -ne "0" ]; then exit 1 fi } test_udp() { skip_if_missing_kernel_support export XDP_SAMPLE_IMMEDIATE_EXIT=1 check_run $XDP_TRAFFICGEN udp $NS -n 1 } test_tcp() { skip_if_missing_kernel_support export XDP_SAMPLE_IMMEDIATE_EXIT=1 PID=$(start_background_ns_devnull "socat -6 TCP-LISTEN:10000,reuseaddr,fork -") $XDP_TRAFFICGEN tcp -i $NS $INSIDE_IP6 -n 1 res=$? stop_background $PID return $res } cleanup_tests() { $XDP_LOADER unload $NS --all >/dev/null 2>&1 $XDP_LOADER clean >/dev/null 2>&1 } xdp-tools-1.5.4/xdp-trafficgen/xdp-trafficgen.c0000644000175100001660000006043615003640462020774 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "params.h" #include "logging.h" #include "util.h" #include "xdp_sample.h" #include "xdp-trafficgen.h" #include "xdp_trafficgen.skel.h" #define PROG_NAME "xdp-trafficgen" #ifndef BPF_F_TEST_XDP_LIVE_FRAMES #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) #endif #define IFINDEX_LO 1 static int mask = SAMPLE_DEVMAP_XMIT_CNT_MULTI | SAMPLE_DROP_OK; DEFINE_SAMPLE_INIT(xdp_trafficgen); static bool status_exited = false; struct udp_packet { struct ethhdr eth; struct ipv6hdr iph; struct udphdr udp; __u8 payload[64 - sizeof(struct udphdr) - sizeof(struct ethhdr) - sizeof(struct ipv6hdr)]; } __attribute__((__packed__)); static struct udp_packet pkt_udp = { .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), .iph.version = 6, .iph.nexthdr = IPPROTO_UDP, .iph.payload_len = bpf_htons(sizeof(struct udp_packet) - offsetof(struct udp_packet, udp)), .iph.hop_limit = 1, .iph.saddr.s6_addr16 = {bpf_htons(0xfe80), 0, 0, 0, 0, 0, 0, bpf_htons(1)}, .iph.daddr.s6_addr16 = {bpf_htons(0xfe80), 0, 0, 0, 0, 0, 0, bpf_htons(2)}, .udp.source = bpf_htons(1), .udp.dest = bpf_htons(1), .udp.len = bpf_htons(sizeof(struct udp_packet) - offsetof(struct udp_packet, udp)), }; struct thread_config { void *pkt; size_t pkt_size; __u32 cpu_core_id; __u32 num_pkts; __u32 batch_size; struct xdp_program *prog; }; static int run_prog(const struct thread_config *cfg, bool *status_var) { #ifdef HAVE_LIBBPF_BPF_PROG_TEST_RUN_OPTS struct xdp_md ctx_in = { .data_end = cfg->pkt_size, }; DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = cfg->pkt, .data_size_in = cfg->pkt_size, .ctx_in = &ctx_in, .ctx_size_in = sizeof(ctx_in), .repeat = cfg->num_pkts ?: 1 << 20, .flags = BPF_F_TEST_XDP_LIVE_FRAMES, .batch_size = cfg->batch_size, ); __u64 iterations = 0; cpu_set_t cpu_cores; int err; CPU_ZERO(&cpu_cores); CPU_SET(cfg->cpu_core_id, &cpu_cores); pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpu_cores); do { err = xdp_program__test_run(cfg->prog, &opts, 0); if (err) return -errno; iterations += opts.repeat; } while (!*status_var && (!cfg->num_pkts || cfg->num_pkts > iterations)); return 0; #else __unused const void *c = cfg, *s = status_var; return -EOPNOTSUPP; #endif } static void *run_traffic(void *arg) { const struct thread_config *cfg = arg; int err; err = run_prog(cfg, &status_exited); if (err) pr_warn("Couldn't run trafficgen program: %s\n", strerror(-err)); kill(getpid(), SIGINT); return NULL; } static int probe_kernel_support(void) { DECLARE_LIBXDP_OPTS(xdp_program_opts, opts); struct xdp_trafficgen *skel; struct xdp_program *prog; __u8 data[ETH_HLEN] = {}; bool status = 0; int err; skel = xdp_trafficgen__open(); if (!skel) { err = -errno; pr_warn("Couldn't open XDP program: %s\n", strerror(-err)); return err; } err = sample_init_pre_load(skel, "lo"); if (err < 0) { pr_warn("Failed to sample_init_pre_load: %s\n", strerror(-err)); goto out; } opts.obj = skel->obj; opts.prog_name = "xdp_drop"; prog = xdp_program__create(&opts); if (!prog) { err = -errno; pr_warn("Couldn't load XDP program: %s\n", strerror(-err)); goto out; } const struct thread_config cfg = { .pkt = data, .pkt_size = sizeof(data), .num_pkts = 1, .batch_size = 1, .prog = prog }; err = run_prog(&cfg, &status); if (err == -EOPNOTSUPP) { pr_warn("BPF_PROG_RUN with batch size support is missing from libbpf.\n"); } else if (err == -EINVAL) { err = -EOPNOTSUPP; pr_warn("Kernel doesn't support live packet mode for XDP BPF_PROG_RUN.\n"); } else if (err) { pr_warn("Error probing kernel support: %s\n", strerror(-err)); } xdp_program__close(prog); out: xdp_trafficgen__destroy(skel); return err; } static int create_runners(pthread_t **runner_threads, struct thread_config **thread_configs, int num_threads, struct thread_config *tcfg, struct xdp_program *prog) { struct thread_config *t; pthread_t *threads; int i, err; threads = calloc(num_threads, sizeof(pthread_t)); if (!threads) { pr_warn("Couldn't allocate memory\n"); return -ENOMEM; } t = calloc(num_threads, sizeof(struct thread_config)); if (!t) { pr_warn("Couldn't allocate memory\n"); free(threads); return -ENOMEM; } for (i = 0; i < num_threads; i++) { memcpy(&t[i], tcfg, sizeof(*tcfg)); tcfg->cpu_core_id++; t[i].prog = xdp_program__clone(prog, 0); err = libxdp_get_error(t[i].prog); if (err) { pr_warn("Failed to clone xdp_program: %s\n", strerror(-err)); t[i].prog = NULL; goto err; } err = pthread_create(&threads[i], NULL, run_traffic, &t[i]); if (err < 0) { pr_warn("Failed to create traffic thread: %s\n", strerror(-err)); goto err; } } *runner_threads = threads; *thread_configs = t; return 0; err: for (i = 0; i < num_threads; i++) { pthread_cancel(threads[i]); xdp_program__close(t[i].prog); } free(t); free(threads); return err; } static __be16 calc_udp_cksum(const struct udp_packet *pkt) { __u32 chksum = pkt->iph.nexthdr + bpf_ntohs(pkt->iph.payload_len); int i; for (i = 0; i < 8; i++) { chksum += bpf_ntohs(pkt->iph.saddr.s6_addr16[i]); chksum += bpf_ntohs(pkt->iph.daddr.s6_addr16[i]); } chksum += bpf_ntohs(pkt->udp.source); chksum += bpf_ntohs(pkt->udp.dest); chksum += bpf_ntohs(pkt->udp.len); while (chksum >> 16) chksum = (chksum & 0xFFFF) + (chksum >> 16); return bpf_htons(~chksum); } static const struct udpopt { __u32 num_pkts; struct iface iface; struct mac_addr dst_mac; struct mac_addr src_mac; struct ip_addr dst_ip; struct ip_addr src_ip; __u16 dst_port; __u16 src_port; __u16 dyn_ports; __u16 threads; __u16 interval; __u16 pkt_size; } defaults_udp = { .interval = 1, .threads = 1, .pkt_size = 64, }; static struct udp_packet *prepare_udp_pkt(const struct udpopt *cfg) { struct mac_addr src_mac = cfg->src_mac; struct udp_packet *pkt = NULL; __u16 payload_len; int err; if (macaddr_is_null(&src_mac)) { err = get_mac_addr(cfg->iface.ifindex, &src_mac); if (err) goto err; } if (cfg->pkt_size < sizeof(*pkt)) { pr_warn("Minimum packet size is %zu bytes\n", sizeof(*pkt)); goto err; } pkt = calloc(1, cfg->pkt_size); if (!pkt) goto err; memcpy(pkt, &pkt_udp, sizeof(*pkt)); payload_len = cfg->pkt_size - offsetof(struct udp_packet, udp); pkt->iph.payload_len = bpf_htons(payload_len); pkt->udp.len = bpf_htons(payload_len); memcpy(pkt->eth.h_source, &src_mac, sizeof(src_mac)); if (!macaddr_is_null(&cfg->dst_mac)) memcpy(pkt->eth.h_dest, &cfg->dst_mac, sizeof(cfg->dst_mac)); if (!ipaddr_is_null(&cfg->src_ip)) { if (cfg->src_ip.af != AF_INET6) { pr_warn("Only IPv6 is supported\n"); goto err; } pkt->iph.saddr = cfg->src_ip.addr.addr6; } if (!ipaddr_is_null(&cfg->dst_ip)) { if (cfg->dst_ip.af != AF_INET6) { pr_warn("Only IPv6 is supported\n"); goto err; } pkt->iph.daddr = cfg->dst_ip.addr.addr6; } if (cfg->src_port) pkt->udp.source = bpf_htons(cfg->src_port); if (cfg->dst_port) pkt->udp.dest = bpf_htons(cfg->dst_port); pkt->udp.check = calc_udp_cksum(pkt); return pkt; err: free(pkt); return NULL; } static struct prog_option udp_options[] = { DEFINE_OPTION("dst-mac", OPT_MACADDR, struct udpopt, dst_mac, .short_opt = 'm', .metavar = "", .help = "Destination MAC address of generated packets"), DEFINE_OPTION("src-mac", OPT_MACADDR, struct udpopt, src_mac, .short_opt = 'M', .metavar = "", .help = "Source MAC address of generated packets"), DEFINE_OPTION("dst-addr", OPT_IPADDR, struct udpopt, dst_ip, .short_opt = 'a', .metavar = "", .help = "Destination IP address of generated packets"), DEFINE_OPTION("src-addr", OPT_IPADDR, struct udpopt, src_ip, .short_opt = 'A', .metavar = "", .help = "Source IP address of generated packets"), DEFINE_OPTION("dst-port", OPT_U16, struct udpopt, dst_port, .short_opt = 'p', .metavar = "", .help = "Destination port of generated packets"), DEFINE_OPTION("src-port", OPT_U16, struct udpopt, src_port, .short_opt = 'P', .metavar = "", .help = "Source port of generated packets"), DEFINE_OPTION("dyn-ports", OPT_U16, struct udpopt, dyn_ports, .short_opt = 'd', .metavar = "", .help = "Dynamically vary destination port over a range of "), DEFINE_OPTION("num-packets", OPT_U32, struct udpopt, num_pkts, .short_opt = 'n', .metavar = "", .help = "Number of packets to send"), DEFINE_OPTION("pkt-size", OPT_U16, struct udpopt, pkt_size, .short_opt = 's', .metavar = "", .help = "Packet size. Default 64."), DEFINE_OPTION("threads", OPT_U16, struct udpopt, threads, .short_opt = 't', .metavar = "", .help = "Number of simultaneous threads to transmit from"), DEFINE_OPTION("interval", OPT_U16, struct udpopt, interval, .short_opt = 'I', .metavar = "", .help = "Output statistics with this interval"), DEFINE_OPTION("interface", OPT_IFNAME, struct udpopt, iface, .positional = true, .metavar = "", .required = true, .help = "Load on device "), END_OPTIONS }; int do_udp(const void *opt, __unused const char *pin_root_path) { const struct udpopt *cfg = opt; DECLARE_LIBXDP_OPTS(xdp_program_opts, opts); struct thread_config *t = NULL, tcfg = { .pkt_size = cfg->pkt_size, .num_pkts = cfg->num_pkts, }; struct trafficgen_state bpf_state = {}; struct xdp_trafficgen *skel = NULL; struct udp_packet *payload = NULL; pthread_t *runner_threads = NULL; struct xdp_program *prog = NULL; int err = 0, i; char buf[100]; __u32 key = 0; err = probe_kernel_support(); if (err) return err; payload = prepare_udp_pkt(cfg); if (!payload) { err = -ENOMEM; goto out; } tcfg.pkt = payload; skel = xdp_trafficgen__open(); if (!skel) { err = -errno; pr_warn("Couldn't open XDP program: %s\n", strerror(-err)); goto out; } err = sample_init_pre_load(skel, cfg->iface.ifname); if (err < 0) { pr_warn("Failed to sample_init_pre_load: %s\n", strerror(-err)); goto out; } skel->rodata->config.port_start = cfg->dst_port; skel->rodata->config.port_range = cfg->dyn_ports; skel->rodata->config.ifindex_out = cfg->iface.ifindex; bpf_state.next_port = cfg->dst_port; if (cfg->dyn_ports) opts.prog_name = "xdp_redirect_update_port"; else opts.prog_name = "xdp_redirect_notouch"; opts.obj = skel->obj; prog = xdp_program__create(&opts); if (!prog) { err = -errno; libxdp_strerror(err, buf, sizeof(buf)); pr_warn("Couldn't open BPF file: %s\n", buf); goto out; } err = xdp_trafficgen__load(skel); if (err) goto out; err = bpf_map_update_elem(bpf_map__fd(skel->maps.state_map), &key, &bpf_state, BPF_EXIST); if (err) { err = -errno; pr_warn("Couldn't set initial state map value: %s\n", strerror(-err)); goto out; } err = sample_init(skel, mask, IFINDEX_LO, cfg->iface.ifindex); if (err < 0) { pr_warn("Failed to initialize sample: %s\n", strerror(-err)); goto out; } err = create_runners(&runner_threads, &t, cfg->threads, &tcfg, prog); if (err) goto out; pr_info("Transmitting on %s (ifindex %d)\n", cfg->iface.ifname, cfg->iface.ifindex); err = sample_run(cfg->interval, NULL, NULL); status_exited = true; for (i = 0; i < cfg->threads; i++) { pthread_join(runner_threads[i], NULL); xdp_program__close(t[i].prog); } out: xdp_program__close(prog); xdp_trafficgen__destroy(skel); free(runner_threads); free(payload); free(t); return err; } struct tcp_packet { struct ethhdr eth; struct ipv6hdr iph; struct tcphdr tcp; __u8 payload[1500 - sizeof(struct tcphdr) - sizeof(struct ethhdr) - sizeof(struct ipv6hdr)]; } __attribute__((__packed__)); static __unused struct tcp_packet pkt_tcp = { .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), .iph.version = 6, .iph.nexthdr = IPPROTO_TCP, .iph.payload_len = bpf_htons(sizeof(struct tcp_packet) - offsetof(struct tcp_packet, tcp)), .iph.hop_limit = 64, .iph.saddr.s6_addr16 = {bpf_htons(0xfe80), 0, 0, 0, 0, 0, 0, bpf_htons(1)}, .iph.daddr.s6_addr16 = {bpf_htons(0xfe80), 0, 0, 0, 0, 0, 0, bpf_htons(2)}, .tcp.source = bpf_htons(1), .tcp.dest = bpf_htons(1), .tcp.window = bpf_htons(0x100), .tcp.doff = 5, .tcp.ack = 1, }; static void hexdump_data(void *data, int size) { unsigned char *ptr = data; int i; for (i = 0; i < size; i++) { if (i % 16 == 0) pr_debug("\n%06X: ", i); else if (i % 2 == 0) pr_debug(" "); pr_debug("%02X", *ptr++); } pr_debug("\n"); } static __be16 calc_tcp_cksum(const struct tcp_packet *pkt) { __u32 chksum = bpf_htons(pkt->iph.nexthdr) + pkt->iph.payload_len; int payload_len = sizeof(pkt->payload); struct tcphdr tcph_ = pkt->tcp; __u16 *ptr = (void *)&tcph_; int i; tcph_.check = 0; for (i = 0; i < 8; i++) { chksum += pkt->iph.saddr.s6_addr16[i]; chksum += pkt->iph.daddr.s6_addr16[i]; } for (i = 0; i < 10; i++) chksum += *(ptr++); ptr = (void *)&pkt->payload; for (i = 0; i < payload_len / 2; i++) chksum += *(ptr++); if (payload_len % 2) chksum += (*((__u8 *)ptr)) << 8; while (chksum >> 16) chksum = (chksum & 0xFFFF) + (chksum >> 16); return ~chksum; } static void prepare_tcp_pkt(const struct tcp_flowkey *fkey, const struct tcp_flowstate *fstate) { memcpy(pkt_tcp.eth.h_source, fstate->src_mac, ETH_ALEN); memcpy(pkt_tcp.eth.h_dest, fstate->dst_mac, ETH_ALEN); pkt_tcp.iph.saddr = fkey->src_ip; pkt_tcp.iph.daddr = fkey->dst_ip; pkt_tcp.tcp.source = fkey->src_port; pkt_tcp.tcp.dest = fkey->dst_port; pkt_tcp.tcp.seq = bpf_htonl(fstate->seq); pkt_tcp.tcp.ack_seq = bpf_htonl(fstate->rcv_seq); pkt_tcp.tcp.check = calc_tcp_cksum(&pkt_tcp); pr_debug("TCP packet:\n"); hexdump_data(&pkt_tcp, sizeof(pkt_tcp)); } struct enum_val xdp_modes[] = { {"native", XDP_MODE_NATIVE}, {"skb", XDP_MODE_SKB}, {"hw", XDP_MODE_HW}, {NULL, 0} }; static const struct tcpopt { __u32 num_pkts; struct iface iface; char *dst_addr; __u16 dst_port; __u16 interval; __u16 timeout; enum xdp_attach_mode mode; } defaults_tcp = { .interval = 1, .dst_port = 10000, .timeout = 2, .mode = XDP_MODE_NATIVE, }; static struct prog_option tcp_options[] = { DEFINE_OPTION("dst-port", OPT_U16, struct tcpopt, dst_port, .short_opt = 'p', .metavar = "", .help = "Connect to destination . Default 10000"), DEFINE_OPTION("num-packets", OPT_U32, struct tcpopt, num_pkts, .short_opt = 'n', .metavar = "", .help = "Number of packets to send"), DEFINE_OPTION("interval", OPT_U16, struct tcpopt, interval, .short_opt = 'I', .metavar = "", .help = "Output statistics with this interval"), DEFINE_OPTION("timeout", OPT_U16, struct tcpopt, timeout, .short_opt = 't', .metavar = "", .help = "TCP connect timeout (default 2 seconds)."), DEFINE_OPTION("interface", OPT_IFNAME, struct tcpopt, iface, .metavar = "", .required = true, .short_opt = 'i', .help = "Connect through device "), DEFINE_OPTION("mode", OPT_ENUM, struct tcpopt, mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load ingress XDP program in ; default native"), DEFINE_OPTION("dst-addr", OPT_STRING, struct tcpopt, dst_addr, .positional = true, .required = true, .metavar = "", .help = "Destination host of generated stream"), END_OPTIONS }; int do_tcp(const void *opt, __unused const char *pin_root_path) { const struct tcpopt *cfg = opt; struct addrinfo *ai = NULL, hints = { .ai_family = AF_INET6, .ai_socktype = SOCK_STREAM, .ai_protocol = IPPROTO_TCP, }; struct ip_addr local_addr = { .af = AF_INET6 }, remote_addr = { .af = AF_INET6 }; struct bpf_map *state_map = NULL, *fstate_map; DECLARE_LIBXDP_OPTS(xdp_program_opts, opts, .prog_name = "xdp_handle_tcp_recv"); struct xdp_program *ifindex_prog = NULL, *test_prog = NULL; struct sockaddr_in6 local_saddr = {}, *addr6; struct thread_config *t = NULL, tcfg = { .pkt = &pkt_tcp, .pkt_size = sizeof(pkt_tcp), .num_pkts = cfg->num_pkts, }; struct trafficgen_state bpf_state = {}; struct xdp_trafficgen *skel = NULL; char buf_local[50], buf_remote[50]; pthread_t *runner_threads = NULL; socklen_t sockaddr_sz, tcpi_sz; __u16 local_port, remote_port; int sock = -1, err = -EINVAL; struct tcp_flowstate fstate; struct timeval timeout = { .tv_sec = cfg->timeout, }; struct tcp_info tcpi = {}; bool attached = false; __u16 num_threads = 1; __u32 key = 0; char port[6]; int i, sopt; err = probe_kernel_support(); if (err) return err; skel = xdp_trafficgen__open(); if (!skel) { err = -errno; pr_warn("Couldn't open XDP program: %s\n", strerror(-err)); goto out; } err = sample_init_pre_load(skel, cfg->iface.ifname); if (err < 0) { pr_warn("Failed to sample_init_pre_load: %s\n", strerror(-err)); goto out; } opts.obj = skel->obj; skel->rodata->config.ifindex_out = cfg->iface.ifindex; snprintf(port, sizeof(port), "%d", cfg->dst_port); err = getaddrinfo(cfg->dst_addr, port, &hints, &ai); if (err) { pr_warn("Couldn't resolve hostname: %s\n", gai_strerror(err)); goto out; } addr6 = (struct sockaddr_in6* )ai->ai_addr; remote_addr.addr.addr6 = addr6->sin6_addr; remote_port = bpf_ntohs(addr6->sin6_port); bpf_state.flow_key.dst_port = addr6->sin6_port; bpf_state.flow_key.dst_ip = addr6->sin6_addr; print_addr(buf_remote, sizeof(buf_remote), &remote_addr); ifindex_prog = xdp_program__create(&opts); if (!ifindex_prog) { err = -errno; pr_warn("Couldn't open XDP program: %s\n", strerror(-err)); goto out; } opts.prog_name = "xdp_redirect_send_tcp"; test_prog = xdp_program__create(&opts); if (!test_prog) { err = -errno; pr_warn("Couldn't find test program: %s\n", strerror(-err)); goto out; } state_map = skel->maps.state_map; fstate_map = skel->maps.flow_state_map; if (!fstate_map) { pr_warn("Couldn't find BPF maps\n"); goto out; } err = xdp_program__attach(ifindex_prog, cfg->iface.ifindex, cfg->mode, 0); if (err) { err = -errno; pr_warn("Couldn't attach XDP program to iface '%s': %s\n", cfg->iface.ifname, strerror(-err)); goto out; } attached = true; err = bpf_map_update_elem(bpf_map__fd(state_map), &key, &bpf_state, BPF_EXIST); if (err) { err = -errno; pr_warn("Couldn't set initial state map value: %s\n", strerror(-err)); goto out; } err = sample_init(skel, mask, IFINDEX_LO, cfg->iface.ifindex); if (err < 0) { pr_warn("Failed to initialize sample: %s\n", strerror(-err)); goto out; } sock = socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP); if (sock < 0) { err = -errno; pr_warn("Couldn't open TCP socket: %s\n", strerror(-err)); goto out; } err = setsockopt(sock, SOL_SOCKET, SO_BINDTOIFINDEX, &cfg->iface.ifindex, sizeof(cfg->iface.ifindex)); if (err) { err = -errno; pr_warn("Couldn't bind to device '%s': %s\n", cfg->iface.ifname, strerror(-err)); goto out; } sopt = fcntl(sock, F_GETFL, NULL); if (sopt < 0) { err = -errno; pr_warn("Couldn't get socket opts: %s\n", strerror(-err)); goto out; } err = fcntl(sock, F_SETFL, sopt | O_NONBLOCK); if (err) { err = -errno; pr_warn("Couldn't set socket non-blocking: %s\n", strerror(-err)); goto out; } err = connect(sock, ai->ai_addr, ai->ai_addrlen); if (err && errno == EINPROGRESS) { fd_set wait; FD_ZERO(&wait); FD_SET(sock, &wait); err = select(sock + 1, NULL, &wait, NULL, &timeout); if (!err) { err = -1; errno = ETIMEDOUT; } else if (err > 0) { err = 0; } } if (err) { err = -errno; pr_warn("Couldn't connect to destination: %s\n", strerror(-err)); goto out; } err = fcntl(sock, F_SETFL, sopt); if (err) { err = -errno; pr_warn("Couldn't reset socket opts: %s\n", strerror(-err)); goto out; } sockaddr_sz = sizeof(local_saddr); err = getsockname(sock, (struct sockaddr *)&local_saddr, &sockaddr_sz); if (err) { err = -errno; pr_warn("Couldn't get local address: %s\n", strerror(-err)); goto out; } local_addr.addr.addr6 = local_saddr.sin6_addr; local_port = bpf_htons(local_saddr.sin6_port); print_addr(buf_local, sizeof(buf_local), &local_addr); printf("Connected to %s port %d from %s port %d\n", buf_remote, remote_port, buf_local, local_port); bpf_state.flow_key.src_port = local_saddr.sin6_port; bpf_state.flow_key.src_ip = local_saddr.sin6_addr; tcpi_sz = sizeof(tcpi); err = getsockopt(sock, IPPROTO_TCP, TCP_INFO, &tcpi, &tcpi_sz); if (err) { err = -errno; pr_warn("Couldn't get TCP_INFO for socket: %s\n", strerror(-err)); goto out; } err = bpf_map_lookup_elem(bpf_map__fd(fstate_map), &bpf_state.flow_key, &fstate); if (err) { err = -errno; pr_warn("Couldn't find flow state in map: %s\n", strerror(-err)); goto out; } if (tcpi.tcpi_snd_wnd != fstate.window) { pr_warn("TCP_INFO and packet data disagree on window (%u != %u)\n", tcpi.tcpi_snd_wnd, fstate.window); } fstate.wscale = tcpi.tcpi_rcv_wscale; fstate.flow_state = FLOW_STATE_RUNNING; err = bpf_map_update_elem(bpf_map__fd(fstate_map), &bpf_state.flow_key, &fstate, BPF_EXIST); if (err) { err = -errno; pr_warn("Couldn't update flow state map: %s\n", strerror(-err)); goto out; } err = bpf_map_update_elem(bpf_map__fd(state_map), &key, &bpf_state, BPF_EXIST); if (err) { err = -errno; pr_warn("Couldn't update program state map: %s\n", strerror(-err)); goto out; } prepare_tcp_pkt(&bpf_state.flow_key, &fstate); err = create_runners(&runner_threads, &t, num_threads, &tcfg, test_prog); if (err) goto out; err = sample_run(cfg->interval, NULL, NULL); status_exited = true; for (i = 0; i < num_threads; i++) { pthread_join(runner_threads[i], NULL); xdp_program__close(t[i].prog); } /* send 3 RSTs with 200ms interval to kill the other side of the connection */ for (i = 0; i < 3; i++) { usleep(200000); pkt_tcp.tcp.rst = 1; pkt_tcp.iph.payload_len = bpf_htons(sizeof(struct tcphdr)); pkt_tcp.tcp.check = calc_tcp_cksum(&pkt_tcp); tcfg.cpu_core_id = 0; tcfg.num_pkts = 1; tcfg.pkt_size = offsetof(struct tcp_packet, payload); tcfg.prog = test_prog; run_traffic(&tcfg); } out: if (ai) freeaddrinfo(ai); if (sock >= 0) close(sock); if (attached) xdp_program__detach(ifindex_prog, cfg->iface.ifindex, cfg->mode, 0); xdp_program__close(ifindex_prog); xdp_program__close(test_prog); xdp_trafficgen__destroy(skel); free(runner_threads); free(t); return err; } static const struct probeopt { } defaults_probe = {}; static struct prog_option probe_options[] = {}; int do_probe(__unused const void *cfg, __unused const char *pin_root_path) { int err = probe_kernel_support(); if (!err) pr_info("Kernel supports live packet mode for XDP BPF_PROG_RUN.\n"); return err; } int do_help(__unused const void *cfg, __unused const char *pin_root_path) { fprintf(stderr, "Usage: xdp-trafficgen COMMAND [options]\n" "\n" "COMMAND can be one of:\n" " udp - run in UDP mode\n" " tcp - run in TCP mode\n" " help - show this help message\n" "\n" "Use 'xdp-trafficgen COMMAND --help' to see options for each command\n"); return -1; } static const struct prog_command cmds[] = { DEFINE_COMMAND(udp, "Run in UDP mode"), DEFINE_COMMAND(tcp, "Run in TCP mode"), DEFINE_COMMAND(probe, "Probe kernel support"), { .name = "help", .func = do_help, .no_cfg = true }, END_COMMANDS }; union all_opts { struct udpopt udp; }; int main(int argc, char **argv) { if (argc > 1) return dispatch_commands(argv[1], argc - 1, argv + 1, cmds, sizeof(union all_opts), PROG_NAME, false); return do_help(NULL, NULL); } xdp-tools-1.5.4/xdp-trafficgen/xdp-trafficgen.h0000644000175100001660000000166215003640462020775 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef XDP_TRAFFICGEN_H #define XDP_TRAFFICGEN_H #include #include #include struct tcp_flowkey { struct in6_addr src_ip; struct in6_addr dst_ip; __u16 dst_port; __u16 src_port; }; #define FLOW_STATE_NEW 1 #define FLOW_STATE_RUNNING 2 #define FLOW_STATE_DONE 3 struct tcp_flowstate { struct bpf_spin_lock lock; __u8 dst_mac[ETH_ALEN]; __u8 src_mac[ETH_ALEN]; __u64 last_progress; __u64 retransmits; __u32 flow_state; __u32 seq; /* our last sent seqno */ __u32 ack_seq; /* last seqno that got acked */ __u32 rcv_seq; /* receiver's seqno (our ACK seq) */ __u32 dupack; __u32 last_print; __u32 highest_seq; __u16 window; __u8 wscale; }; struct trafficgen_config { int ifindex_out; __u16 port_start; __u16 port_range; }; struct trafficgen_state { struct tcp_flowkey flow_key; __u16 next_port; }; #endif xdp-tools-1.5.4/xdp-trafficgen/xdp-trafficgen.80000644000175100001660000001311715003640462020713 0ustar runnerdocker.TH "xdp-trafficgen" "8" "JANUARY 9, 2025" "V1.5.4" "An XDP-based traffic generator" .SH "NAME" XDP-trafficgen \- an XDP-based traffic generator .SH "SYNOPSIS" .PP XDP-trafficgen is a packet generator utilising the XDP kernel subsystem to generate packets transmit them through a network interface. Packets are dynamically generated and transmitted in the kernel, allowing for high performance (millions of packets per second per core). .PP XDP-trafficgen supports generating UDP traffic with fixed or dynamic destination ports, and also has basic support for generating dummy TCP traffic on a single flow. .SS "Running xdp-traffigen" .PP The syntax for running xdp-trafficgen is: .RS .nf \fCUsage: xdp-trafficgen COMMAND [options] COMMAND can be one of: udp - run in UDP mode tcp - run in TCP mode \fP .fi .RE .PP Each command, and its options are explained below. Or use \fIxdp\-trafficgen COMMAND \-\-help\fP to see the options for each command. .SH "The UDP command" .PP The UDP command generates UDP traffic to a given destination IP and either a fixed destination port, or a range of port numbers. Only IPv6 traffic is supported, and the generated packets will have their IP hop limit set to 1, so they can't be routed. .PP The syntax for the \fIudp\fP command is: .PP \fIxdp\-trafficgen udp [options] \fP .PP Where \fI\fP is the name of the destination interface that packets will be transmitted on. Note that the network driver of this network interface must support being the target of XDP redirects (it must implement the \fIndo_xdp_xmit\fP driver operation). .PP The supported options are: .SS "-m, --dst-mac " .PP Set the destination MAC address of generated packets. The default is to generate packets with an all-zero destination MAC. .SS "-M, --src-mac " .PP Set the source MAC address of the generated packets. The default is to use the MAC address of the interface packets are transmitted on. .SS "-a, --dst-addr " .PP Destination IP address of generated packets. The default is the link-local \fIfe80::2\fP address. .SS "-A, --src-addr " .PP Source IP address of generated packets. The default is the link-local \fIfe80::1\fP address. .SS "-p, --dst-port " .PP Destination UDP port of generated packets, or the first port in the range if running with \fI\-\-dyn\-ports\fP set. Defaults to 1. .SS "-P, --src-port " .PP Source UDP port of generated packets. Defaults to 1. .SS "-d, --dyn-ports " .PP Enable dynamic port mode where the destination port is varied over a range of \fI\fP starting from the \fI\-\-dst\-port\fP. .SS "-n, --num-packets " .PP Number of packets to send before exiting. If not supplied, \fIxdp\-trafficgen\fP will keep sending packets until interrupted. .SS "-s, --pkt-size " .PP Size of each UDP packet being sent, including the Ethernet header. The minimum size, which is also the default, is 64 bytes. .SS "-t, --threads " .PP Number of simultaneous threads to transmit from. Each thread will be pinned to a separate CPU core if possible. Defaults to 1. .SS "-I, --interval " .PP Output transmission statistics with this interval (in seconds). .SS "-v, --verbose" .PP Enable verbose logging (-vv: more verbose). .SS "--version" .PP Display version information and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "The TCP command" .PP The TCP command generates dummy TCP traffic in a single TCP flow. This relies on first installing an ingress XDP program on the interface used to transmit on. Then, a regular TCP socket connection is established from userspace, and once the handshake is completed, the XDP program will take over and start generating traffic on that flow tuple. The ingress XDP program will intercept ACK packets from the receiver, and keep track of the receive window. .PP The traffic generator has no congestion control, and only very basic retransmit tracking: in essence, any duplicate ACKs from the receiver will cause the sender to reset its send sequence number to the last ACKed value and restart from there. The same thing happens if no progress on the window is made within two seconds. This means that the traffic generator can generate a large amount of dummy traffic, but if there's packet loss a lot of this can be retransmissions. .PP The syntax for the \fItcp\fP command is: .PP \fIxdp\-trafficgen tcp [options] \-i \fP .PP Where \fI\fP is the name of the destination interface that packets will be transmitted on and \fI\fP is the peer hostname or IP address to connect to (only IPv6 is supported). Note that the network driver of this network interface must support being the target of XDP redirects (it must implement the \fIndo_xdp_xmit\fP driver operation). .PP The supported options are: .SS "-p, --dst-port " .PP Connect to destination . Default 10000. .SS "-m, --mode " .PP Load ingress XDP program in ; default native (valid values: native,skb,hw) .SS "-n, --num-packets " .PP Number of packets to send before exiting. If not supplied, \fIxdp\-trafficgen\fP will keep sending packets until interrupted. .SS "-I, --interval " .PP Output transmission statistics with this interval (in seconds). .SS "-v, --verbose" .PP Enable verbose logging (-vv: more verbose). .SS "--version" .PP Display version information and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "BUGS" .PP Please report any bugs on Github: \fIhttps://github.com/xdp-project/xdp-tools/issues\fP .SH "AUTHOR" .PP xdp-trafficgen and this man page were written by Toke Høiland-Jørgensen. xdp-tools-1.5.4/xdp-trafficgen/xdp_trafficgen.bpf.c0000644000175100001660000002075215003640462021621 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #define XDP_STATS_MAP_PINNING LIBBPF_PIN_NONE #include "xdp-trafficgen.h" #include #include #include #include #include #include #include #include #include #include #include #include #if defined(HAVE_LIBBPF_BPF_PROGRAM__FLAGS) && defined(DEBUG) /* We use the many-argument version of bpf_printk() for debugging, so only * enable it if we have the libbpf helper that selects the vprintf version. This * was introduced in libbpf 0.6.0, which is the same versionn as the * bpf_program__flags() method, so use that as an indicator since we don't * feature detect on the BPF helpers themselves. */ #define TCP_DEBUG #endif char _license[] SEC("license") = "GPL"; struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); __type(key, __u32); __type(value, struct trafficgen_state); } state_map SEC(".maps"); const volatile struct trafficgen_config config; static void update_checksum(__u16 *sum, __u32 diff) { /* We use the RFC 1071 method for incremental checksum updates * because that can be used directly with the 32-bit sequence * number difference (relying on folding for large differences) */ __u32 cksum = diff + (__u16)~bpf_ntohs(*sum); while (cksum > 0xffff) cksum = (cksum & 0xffff) + (cksum >> 16); *sum = bpf_htons(~cksum); } static __u16 csum_fold_helper(__u32 csum) { csum = (csum & 0xffff) + (csum >> 16); return ~((csum & 0xffff) + (csum >> 16)); } SEC("xdp") int xdp_redirect_notouch(struct xdp_md *ctx) { __u32 key = bpf_get_smp_processor_id();; struct datarec *rec; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) return XDP_ABORTED; NO_TEAR_INC(rec->xdp_redirect); return bpf_redirect(config.ifindex_out, 0); } SEC("xdp") int xdp_redirect_update_port(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct trafficgen_state *state; __u16 cur_port, port_diff; int action = XDP_ABORTED; struct datarec *rec; struct udphdr *hdr; __u32 key = 0; hdr = data + (sizeof(struct ethhdr) + sizeof(struct ipv6hdr)); if (hdr + 1 > data_end) goto out; state = bpf_map_lookup_elem(&state_map, &key); if (!state) goto out; key = bpf_get_smp_processor_id(); rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) goto out; cur_port = bpf_ntohs(hdr->dest); port_diff = state->next_port - cur_port; if (port_diff) { update_checksum(&hdr->check, port_diff); hdr->dest = bpf_htons(state->next_port); } if (state->next_port++ >= config.port_start + config.port_range - 1) state->next_port = config.port_start; action = bpf_redirect(config.ifindex_out, 0); NO_TEAR_INC(rec->processed); out: return action; } SEC("xdp") int xdp_drop(struct xdp_md *ctx) { return XDP_DROP; } struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 1); __type(key, struct tcp_flowkey); __type(value, struct tcp_flowstate); } flow_state_map SEC(".maps"); static int cmp_ipaddr(struct in6_addr *a_, struct in6_addr *b_) { __u8 *a = (void *)a_, *b = (void *)b_; int i; for (i = 0; i < sizeof(struct in6_addr); i++) { if (*a > *b) return -1; if (*a < *b) return 1; a++; b++; } return 0; } static inline __u8 before(__u32 seq1, __u32 seq2) { return (__s32)(seq1 - seq2) < 0; } /* Fixed 2 second timeout */ #define TCP_RTO 2000000000UL SEC("xdp") int xdp_handle_tcp_recv(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; struct tcp_flowstate *fstate, new_fstate = {}; void *data = (void *)(long)ctx->data; struct hdr_cursor nh = { .pos = data }; struct trafficgen_state *state; struct tcp_flowkey key = {}; int eth_type, ip_type, err; struct ipv6hdr *ipv6hdr; struct tcphdr *tcphdr; int action = XDP_PASS; struct ethhdr *eth; __u8 new_match; __u32 ack_seq; int i; eth_type = parse_ethhdr(&nh, data_end, ð); if (eth_type != bpf_htons(ETH_P_IPV6)) goto out; ip_type = parse_ip6hdr(&nh, data_end, &ipv6hdr); if (ip_type != IPPROTO_TCP) goto out; if (parse_tcphdr(&nh, data_end, &tcphdr) < 0) goto out; state = bpf_map_lookup_elem(&state_map, &key); if (!state) goto out; /* swap dst and src for received packet */ key.dst_ip = ipv6hdr->saddr; key.dst_port = tcphdr->source; new_match = !cmp_ipaddr(&key.dst_ip, &state->flow_key.dst_ip) && key.dst_port == state->flow_key.dst_port; key.src_ip = ipv6hdr->daddr; key.src_port = tcphdr->dest; fstate = bpf_map_lookup_elem(&flow_state_map, &key); if (!fstate) { if (!new_match) goto out; new_fstate.flow_state = FLOW_STATE_NEW; new_fstate.seq = bpf_ntohl(tcphdr->ack_seq); for (i = 0; i < ETH_ALEN; i++) { new_fstate.dst_mac[i] = eth->h_source[i]; new_fstate.src_mac[i] = eth->h_dest[i]; } err = bpf_map_update_elem(&flow_state_map, &key, &new_fstate, BPF_NOEXIST); if (err) goto out; fstate = bpf_map_lookup_elem(&flow_state_map, &key); if (!fstate) goto out; } ack_seq = bpf_ntohl(tcphdr->ack_seq); #ifdef TCP_DEBUG bpf_printk("Got state seq %u ack_seq %u new %u seq %u new %u window %u\n", fstate->seq, fstate->ack_seq, ack_seq, fstate->rcv_seq, bpf_ntohl(tcphdr->seq), bpf_htons(tcphdr->window)); #endif bpf_spin_lock(&fstate->lock); if (fstate->ack_seq == ack_seq) fstate->dupack++; fstate->window = bpf_ntohs(tcphdr->window); fstate->ack_seq = ack_seq; fstate->rcv_seq = bpf_ntohl(tcphdr->seq); if (tcphdr->syn) fstate->rcv_seq++; if (tcphdr->fin || tcphdr->rst) fstate->flow_state = FLOW_STATE_DONE; /* If we've taken over the flow management, (after the handshake), drop * the packet */ if (fstate->flow_state >= FLOW_STATE_RUNNING) action = XDP_DROP; bpf_spin_unlock(&fstate->lock); out: return action; } SEC("xdp") int xdp_redirect_send_tcp(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; __u32 new_seq, ack_seq, window; struct trafficgen_state *state; struct tcp_flowstate *fstate; int action = XDP_ABORTED; struct ipv6hdr *ipv6hdr; struct tcphdr *tcphdr; struct datarec *rec; __u8 resend = 0; #ifdef TCP_DEBUG __u8 print = 0; #endif __u16 pkt_len; __u32 key = 0; __u64 now; ipv6hdr = data + sizeof(struct ethhdr); tcphdr = data + (sizeof(struct ethhdr) + sizeof(struct ipv6hdr)); if (tcphdr + 1 > data_end || ipv6hdr + 1 > data_end) goto ret; pkt_len = bpf_ntohs(ipv6hdr->payload_len) - sizeof(*tcphdr); state = bpf_map_lookup_elem(&state_map, &key); if (!state) goto ret; key = bpf_get_smp_processor_id(); rec = bpf_map_lookup_elem(&rx_cnt, &key); if (!rec) goto ret; fstate = bpf_map_lookup_elem(&flow_state_map, (const void *)&state->flow_key); if (!fstate) goto out; now = bpf_ktime_get_coarse_ns(); bpf_spin_lock(&fstate->lock); if (fstate->flow_state != FLOW_STATE_RUNNING) { action = XDP_DROP; bpf_spin_unlock(&fstate->lock); goto out; } /* reset sequence on packet loss */ if (fstate->dupack || (fstate->last_progress && now - fstate->last_progress > TCP_RTO)) { fstate->seq = fstate->ack_seq; fstate->dupack = 0; } new_seq = fstate->seq; ack_seq = fstate->ack_seq; window = fstate->window << fstate->wscale; #ifdef TCP_DEBUG if (fstate->last_print != fstate->seq) { fstate->last_print = fstate->seq; print = 1; } #endif if (!before(new_seq + pkt_len, ack_seq + window)) { /* We caught up to the end up the RWIN, spin until ACKs come * back opening up the window */ action = XDP_DROP; bpf_spin_unlock(&fstate->lock); #ifdef TCP_DEBUG if (print) bpf_printk("Dropping because %u isn't before %u (ack_seq %u wnd %u)", new_seq + pkt_len, ack_seq + window, ack_seq, window); #endif goto out; } if (!before(new_seq, fstate->highest_seq)) { fstate->highest_seq = new_seq; } else { resend = 1; fstate->retransmits++; } fstate->seq = new_seq + pkt_len; fstate->last_progress = now; bpf_spin_unlock(&fstate->lock); new_seq = bpf_htonl(new_seq); if (new_seq != tcphdr->seq) { __u32 csum; csum = bpf_csum_diff(&tcphdr->seq, sizeof(__u32), &new_seq, sizeof(new_seq), ~tcphdr->check); tcphdr->seq = new_seq; tcphdr->check = csum_fold_helper(csum); } action = bpf_redirect(config.ifindex_out, 0); out: /* record retransmissions as XDP_TX return codes until we get better stats */ if (resend) NO_TEAR_INC(rec->issue); if (action == XDP_REDIRECT) NO_TEAR_INC(rec->xdp_redirect); else NO_TEAR_INC(rec->dropped); ret: return action; } xdp-tools-1.5.4/xdp-trafficgen/.gitignore0000644000175100001660000000001715003640462017704 0ustar runnerdockerxdp-trafficgen xdp-tools-1.5.4/xdp-trafficgen/Makefile0000644000175100001660000000064315003640462017361 0ustar runnerdocker# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) XDP_TARGETS := xdp_trafficgen.bpf BPF_SKEL_TARGETS := $(XDP_TARGETS) # Don't install skeleton object files XDP_OBJ_INSTALL := TOOL_NAME := xdp-trafficgen USER_TARGETS := xdp-trafficgen MAN_PAGE := xdp-trafficgen.8 EXTRA_DEPS := xdp-trafficgen.h USER_LIBS = -lpthread TEST_FILE := tests/test-xdp-trafficgen.sh LIB_DIR = ../lib include $(LIB_DIR)/common.mk xdp-tools-1.5.4/headers/0000755000175100001660000000000015003640462014430 5ustar runnerdockerxdp-tools-1.5.4/headers/xdp/0000755000175100001660000000000015003640462015223 5ustar runnerdockerxdp-tools-1.5.4/headers/xdp/xdp_stats_kern.h0000644000175100001660000000261315003640462020426 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ /* Used *ONLY* by BPF-prog running kernel side. */ #ifndef __XDP_STATS_KERN_H #define __XDP_STATS_KERN_H /* Data record type 'struct datarec' is defined in common/xdp_stats_kern_user.h, * programs using this header must first include that file. */ #ifndef __XDP_STATS_KERN_USER_H #warning "You forgot to #include <../common/xdp_stats_kern_user.h>" #include <../common/xdp_stats_kern_user.h> #endif #ifndef XDP_STATS_MAP_PINNING #define XDP_STATS_MAP_PINNING LIBBPF_PIN_BY_NAME #endif /* Keeps stats per (enum) xdp_action */ struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, XDP_ACTION_MAX); __type(key, __u32); __type(value, struct xdp_stats_record); __uint(pinning, LIBBPF_PIN_BY_NAME); } XDP_STATS_MAP_NAME SEC(".maps"); static __always_inline __u32 xdp_stats_record_action(struct xdp_md *ctx, __u32 action) { if (action >= XDP_ACTION_MAX) return XDP_ABORTED; /* Lookup in kernel BPF-side return pointer to actual data record */ struct xdp_stats_record *rec = bpf_map_lookup_elem(&xdp_stats_map, &action); if (!rec) return XDP_ABORTED; /* BPF_MAP_TYPE_PERCPU_ARRAY returns a data record specific to current * CPU and XDP hooks runs under Softirq, which makes it safe to update * without atomic operations. */ rec->rx_packets++; rec->rx_bytes += (ctx->data_end - ctx->data); return action; } #endif /* __XDP_STATS_KERN_H */ xdp-tools-1.5.4/headers/xdp/parsing_helpers.h0000644000175100001660000001365315003640462020571 0ustar runnerdocker/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-clause) */ /* * This file contains parsing functions that can be used in eXDP programs. The * functions are marked as __always_inline, and fully defined in this header * file to be included in the BPF program. * * Each helper parses a packet header, including doing bounds checking, and * returns the type of its contents if successful, and -1 otherwise. * * For Ethernet and IP headers, the content type is the type of the payload * (h_proto for Ethernet, nexthdr for IPv6), for ICMP it is the ICMP type field. * All return values are in host byte order. */ #ifndef __PARSING_HELPERS_H #define __PARSING_HELPERS_H #include #include #include #include #include #include #include #include #include #include #include /* Header cursor to keep track of current parsing position */ struct hdr_cursor { void *pos; }; /* * struct vlan_hdr - vlan header * @h_vlan_TCI: priority and VLAN ID * @h_vlan_encapsulated_proto: packet type ID or len */ struct vlan_hdr { __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; }; /* * Struct icmphdr_common represents the common part of the icmphdr and icmp6hdr * structures. */ struct icmphdr_common { __u8 type; __u8 code; __sum16 cksum; }; /* Allow users of header file to redefine VLAN max depth */ #ifndef VLAN_MAX_DEPTH #define VLAN_MAX_DEPTH 4 #endif /* Longest chain of IPv6 extension headers to resolve */ #ifndef IPV6_EXT_MAX_CHAIN #define IPV6_EXT_MAX_CHAIN 6 #endif static __always_inline int proto_is_vlan(__u16 h_proto) { return !!(h_proto == bpf_htons(ETH_P_8021Q) || h_proto == bpf_htons(ETH_P_8021AD)); } /* Notice, parse_ethhdr() will skip VLAN tags, by advancing nh->pos and returns * next header EtherType, BUT the ethhdr pointer supplied still points to the * Ethernet header. Thus, caller can look at eth->h_proto to see if this was a * VLAN tagged packet. */ static __always_inline int parse_ethhdr(struct hdr_cursor *nh, void *data_end, struct ethhdr **ethhdr) { struct ethhdr *eth = nh->pos; struct vlan_hdr *vlh; __u16 h_proto; int i; if (eth + 1 > data_end) return -1; nh->pos = eth + 1; *ethhdr = eth; vlh = nh->pos; h_proto = eth->h_proto; /* Use loop unrolling to avoid the verifier restriction on loops; * support up to VLAN_MAX_DEPTH layers of VLAN encapsulation. */ #pragma unroll for (i = 0; i < VLAN_MAX_DEPTH; i++) { if (!proto_is_vlan(h_proto)) break; if (vlh + 1 > data_end) break; h_proto = vlh->h_vlan_encapsulated_proto; vlh++; } nh->pos = vlh; return h_proto; /* network-byte-order */ } static __always_inline int skip_ip6hdrext(struct hdr_cursor *nh, void *data_end, __u8 next_hdr_type) { for (int i = 0; i < IPV6_EXT_MAX_CHAIN; ++i) { struct ipv6_opt_hdr *hdr = nh->pos; if (hdr + 1 > data_end) return -1; switch (next_hdr_type) { case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: case IPPROTO_ROUTING: case IPPROTO_MH: nh->pos = (char *)hdr + (hdr->hdrlen + 1) * 8; next_hdr_type = hdr->nexthdr; break; case IPPROTO_AH: nh->pos = (char *)hdr + (hdr->hdrlen + 2) * 4; next_hdr_type = hdr->nexthdr; break; case IPPROTO_FRAGMENT: nh->pos = (char *)hdr + 8; next_hdr_type = hdr->nexthdr; break; default: /* Found a header that is not an IPv6 extension header */ return next_hdr_type; } } return -1; } static __always_inline int parse_ip6hdr(struct hdr_cursor *nh, void *data_end, struct ipv6hdr **ip6hdr) { struct ipv6hdr *ip6h = nh->pos; /* Pointer-arithmetic bounds check; pointer +1 points to after end of * thing being pointed to. We will be using this style in the remainder * of the tutorial. */ if (ip6h + 1 > data_end) return -1; nh->pos = ip6h + 1; *ip6hdr = ip6h; return skip_ip6hdrext(nh, data_end, ip6h->nexthdr); } static __always_inline int parse_iphdr(struct hdr_cursor *nh, void *data_end, struct iphdr **iphdr) { struct iphdr *iph = nh->pos; int hdrsize; if (iph + 1 > data_end) return -1; hdrsize = iph->ihl * 4; /* Variable-length IPv4 header, need to use byte-based arithmetic */ if (nh->pos + hdrsize > data_end) return -1; nh->pos += hdrsize; *iphdr = iph; return iph->protocol; } static __always_inline int parse_icmp6hdr(struct hdr_cursor *nh, void *data_end, struct icmp6hdr **icmp6hdr) { struct icmp6hdr *icmp6h = nh->pos; if (icmp6h + 1 > data_end) return -1; nh->pos = icmp6h + 1; *icmp6hdr = icmp6h; return icmp6h->icmp6_type; } static __always_inline int parse_icmphdr(struct hdr_cursor *nh, void *data_end, struct icmphdr **icmphdr) { struct icmphdr *icmph = nh->pos; if (icmph + 1 > data_end) return -1; nh->pos = icmph + 1; *icmphdr = icmph; return icmph->type; } static __always_inline int parse_icmphdr_common(struct hdr_cursor *nh, void *data_end, struct icmphdr_common **icmphdr) { struct icmphdr_common *h = nh->pos; if (h + 1 > data_end) return -1; nh->pos = h + 1; *icmphdr = h; return h->type; } /* * parse_udphdr: parse the udp header and return the length of the udp payload */ static __always_inline int parse_udphdr(struct hdr_cursor *nh, void *data_end, struct udphdr **udphdr) { int len; struct udphdr *h = nh->pos; if (h + 1 > data_end) return -1; nh->pos = h + 1; *udphdr = h; len = bpf_ntohs(h->len) - sizeof(struct udphdr); if (len < 0) return -1; return len; } /* * parse_tcphdr: parse and return the length of the tcp header */ static __always_inline int parse_tcphdr(struct hdr_cursor *nh, void *data_end, struct tcphdr **tcphdr) { int len; struct tcphdr *h = nh->pos; if (h + 1 > data_end) return -1; len = h->doff * 4; if ((void *) h + len > data_end) return -1; nh->pos = h + 1; *tcphdr = h; return len; } #endif /* __PARSING_HELPERS_H */ xdp-tools-1.5.4/headers/xdp/prog_dispatcher.h0000644000175100001660000000221015003640462020544 0ustar runnerdocker/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-clause) */ #ifndef __PROG_DISPATCHER_H #define __PROG_DISPATCHER_H #include #define XDP_METADATA_SECTION "xdp_metadata" #define XDP_DISPATCHER_VERSION 2 /* magic byte is 'X' + 'D' + 'P' (88+68+80=236) */ #define XDP_DISPATCHER_MAGIC 236 /* default retval for dispatcher corresponds to the highest bit in the * chain_call_actions bitmap; we use this to make sure the dispatcher always * continues the calls chain if a function does not have an freplace program * attached. */ #define XDP_DISPATCHER_RETVAL 31 #ifndef MAX_DISPATCHER_ACTIONS #define MAX_DISPATCHER_ACTIONS 10 #endif struct xdp_dispatcher_config { __u8 magic; /* Set to XDP_DISPATCHER_MAGIC */ __u8 dispatcher_version; /* Set to XDP_DISPATCHER_VERSION */ __u8 num_progs_enabled; /* Number of active program slots */ __u8 is_xdp_frags; /* Whether this dispatcher is loaded with XDP frags support */ __u32 chain_call_actions[MAX_DISPATCHER_ACTIONS]; __u32 run_prios[MAX_DISPATCHER_ACTIONS]; __u32 program_flags[MAX_DISPATCHER_ACTIONS]; }; #endif xdp-tools-1.5.4/headers/xdp/xdp_sample.bpf.h0000644000175100001660000000674415003640462020311 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 #ifndef _XDP_SAMPLE_BPF_H #define _XDP_SAMPLE_BPF_H #include #include #include #include #include #include "xdp_sample_shared.h" #define ETH_ALEN 6 #define ETH_P_802_3_MIN 0x0600 #define ETH_P_8021Q 0x8100 #define ETH_P_8021AD 0x88A8 #define ETH_P_IP 0x0800 #define ETH_P_IPV6 0x86DD #define ETH_P_ARP 0x0806 #define IPPROTO_ICMPV6 58 #define EINVAL 22 #define ENETDOWN 100 #define EMSGSIZE 90 #define EOPNOTSUPP 95 #define ENOSPC 28 typedef struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(map_flags, BPF_F_MMAPABLE); __type(key, unsigned int); __type(value, struct datarec); } array_map; extern array_map rx_cnt; extern const volatile int nr_cpus; enum { XDP_REDIRECT_SUCCESS = 0, XDP_REDIRECT_ERROR = 1 }; static __always_inline void swap_src_dst_mac(void *data) { unsigned short *p = data; unsigned short dst[3]; dst[0] = p[0]; dst[1] = p[1]; dst[2] = p[2]; p[0] = p[3]; p[1] = p[4]; p[2] = p[5]; p[3] = dst[0]; p[4] = dst[1]; p[5] = dst[2]; } /* * Note: including linux/compiler.h or linux/kernel.h for the macros below * conflicts with vmlinux.h include in BPF files, so we define them here. * * Following functions are taken from kernel sources and * break aliasing rules in their original form. * * While kernel is compiled with -fno-strict-aliasing, * perf uses -Wstrict-aliasing=3 which makes build fail * under gcc 4.4. * * Using extra __may_alias__ type to allow aliasing * in this case. */ typedef __u8 __attribute__((__may_alias__)) __u8_alias_t; typedef __u16 __attribute__((__may_alias__)) __u16_alias_t; typedef __u32 __attribute__((__may_alias__)) __u32_alias_t; typedef __u64 __attribute__((__may_alias__)) __u64_alias_t; static __always_inline void __read_once_size(const volatile void *p, void *res, int size) { switch (size) { case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break; case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break; case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break; case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break; default: asm volatile ("" : : : "memory"); __builtin_memcpy((void *)res, (const void *)p, size); asm volatile ("" : : : "memory"); } } static __always_inline void __write_once_size(volatile void *p, void *res, int size) { switch (size) { case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break; case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break; case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break; case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break; default: asm volatile ("" : : : "memory"); __builtin_memcpy((void *)p, (const void *)res, size); asm volatile ("" : : : "memory"); } } #define READ_ONCE(x) \ ({ \ union { typeof(x) __val; char __c[1]; } __u = \ { .__c = { 0 } }; \ __read_once_size(&(x), __u.__c, sizeof(x)); \ __u.__val; \ }) #define WRITE_ONCE(x, val) \ ({ \ union { typeof(x) __val; char __c[1]; } __u = \ { .__val = (val) }; \ __write_once_size(&(x), __u.__c, sizeof(x)); \ __u.__val; \ }) /* Add a value using relaxed read and relaxed write. Less expensive than * fetch_add when there is no write concurrency. */ #define NO_TEAR_ADD(x, val) WRITE_ONCE((x), READ_ONCE(x) + (val)) #define NO_TEAR_INC(x) NO_TEAR_ADD((x), 1) #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif xdp-tools-1.5.4/headers/xdp/xdp_helpers.h0000644000175100001660000000045415003640462017714 0ustar runnerdocker/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-clause) */ #ifndef __XDP_HELPERS_H #define __XDP_HELPERS_H #define _CONCAT(x,y) x ## y #define XDP_RUN_CONFIG(f) _CONCAT(_,f) SEC(".xdp_run_config") #define XDP_DEFAULT_RUN_PRIO 50 #define XDP_DEFAULT_CHAIN_CALL_ACTIONS (1< */ /* So as not to clash with these functions when they where part of libbpf */ #ifndef __LIBBPF_XSK_H #define __LIBBPF_XSK_H #include #include #include #include #ifdef __cplusplus extern "C" { #endif #ifdef __GNUC_STDC_INLINE__ #define XDP_ALWAYS_INLINE inline __attribute__((__always_inline__)) #elif __GNUC_GNU_INLINE__ #define XDP_ALWAYS_INLINE static inline __attribute__((__always_inline__)) #else #define XDP_ALWAYS_INLINE static inline #endif /* Do not access these members directly. Use the functions below. */ #define DEFINE_XSK_RING(name) \ struct name { \ __u32 cached_prod; \ __u32 cached_cons; \ __u32 mask; \ __u32 size; \ __u32 *producer; \ __u32 *consumer; \ void *ring; \ __u32 *flags; \ } DEFINE_XSK_RING(xsk_ring_prod); DEFINE_XSK_RING(xsk_ring_cons); /* For a detailed explanation on the memory barriers associated with the * ring, please take a look at net/xdp/xsk_queue.h in the Linux kernel source tree. */ struct xsk_umem; struct xsk_socket; XDP_ALWAYS_INLINE __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, __u32 idx) { __u64 *addrs = (__u64 *)fill->ring; return &addrs[idx & fill->mask]; } XDP_ALWAYS_INLINE const __u64 * xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx) { const __u64 *addrs = (const __u64 *)comp->ring; return &addrs[idx & comp->mask]; } XDP_ALWAYS_INLINE struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, __u32 idx) { struct xdp_desc *descs = (struct xdp_desc *)tx->ring; return &descs[idx & tx->mask]; } XDP_ALWAYS_INLINE const struct xdp_desc * xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx) { const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; return &descs[idx & rx->mask]; } XDP_ALWAYS_INLINE int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r) { return *r->flags & XDP_RING_NEED_WAKEUP; } XDP_ALWAYS_INLINE __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) { __u32 free_entries = r->cached_cons - r->cached_prod; if (free_entries >= nb) return free_entries; /* Refresh the local tail pointer. * cached_cons is r->size bigger than the real consumer pointer so * that this addition can be avoided in the more frequently * executed code that computs free_entries in the beginning of * this function. Without this optimization it whould have been * free_entries = r->cached_cons - r->cached_prod + r->size */ r->cached_cons = __atomic_load_n(r->consumer, __ATOMIC_ACQUIRE); r->cached_cons += r->size; return r->cached_cons - r->cached_prod; } XDP_ALWAYS_INLINE __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) { __u32 entries = r->cached_prod - r->cached_cons; if (entries == 0) { r->cached_prod = __atomic_load_n(r->producer, __ATOMIC_ACQUIRE); entries = r->cached_prod - r->cached_cons; } return (entries > nb) ? nb : entries; } XDP_ALWAYS_INLINE __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx) { if (xsk_prod_nb_free(prod, nb) < nb) return 0; *idx = prod->cached_prod; prod->cached_prod += nb; return nb; } XDP_ALWAYS_INLINE void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb) { /* Make sure everything has been written to the ring before indicating * this to the kernel by writing the producer pointer. */ __atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE); } XDP_ALWAYS_INLINE __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx) { __u32 entries = xsk_cons_nb_avail(cons, nb); if (entries > 0) { *idx = cons->cached_cons; cons->cached_cons += entries; } return entries; } XDP_ALWAYS_INLINE void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb) { cons->cached_cons -= nb; } XDP_ALWAYS_INLINE void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb) { /* Make sure data has been read before indicating we are done * with the entries by updating the consumer pointer. */ __atomic_store_n(cons->consumer, *cons->consumer + nb, __ATOMIC_RELEASE); } XDP_ALWAYS_INLINE void *xsk_umem__get_data(void *umem_area, __u64 addr) { return &((char *)umem_area)[addr]; } XDP_ALWAYS_INLINE __u64 xsk_umem__extract_addr(__u64 addr) { return addr & XSK_UNALIGNED_BUF_ADDR_MASK; } XDP_ALWAYS_INLINE __u64 xsk_umem__extract_offset(__u64 addr) { return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; } XDP_ALWAYS_INLINE __u64 xsk_umem__add_offset_to_addr(__u64 addr) { return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr); } int xsk_umem__fd(const struct xsk_umem *umem); int xsk_socket__fd(const struct xsk_socket *xsk); #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 #define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */ #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 #define XSK_UMEM__DEFAULT_FLAGS 0 #define XSK_UMEM__DEFAULT_TX_METADATA_LEN 0 struct xsk_umem_config { __u32 fill_size; __u32 comp_size; __u32 frame_size; __u32 frame_headroom; __u32 flags; }; /* The following fields are optional: * * @fd, @size, @fill_size, @comp_size, @frame_size, @frame_headroom, * @flags, @tx_metadata_len * If @fd is unset, a new sockfd will be created. * If @size is unset, @umem_area must be page-aligned. * If the remaining fields are unset, they will be set to * default value (see `xsk_set_umem_config()`). * * Except for the fields mentioned above, no field can be set. */ struct xsk_umem_opts { size_t sz; int fd; __u64 size; __u32 fill_size; __u32 comp_size; __u32 frame_size; __u32 frame_headroom; __u32 flags; __u32 tx_metadata_len; size_t :0; }; #define xsk_umem_opts__last_field tx_metadata_len int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd); int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd); /* Flags for the libbpf_flags field. * We still call this field libbpf_flags for compatibility reasons. */ #define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0) #define XSK_LIBXDP_FLAGS__INHIBIT_PROG_LOAD (1 << 0) struct xsk_socket_config { __u32 rx_size; __u32 tx_size; union { __u32 libbpf_flags; __u32 libxdp_flags; }; __u32 xdp_flags; __u16 bind_flags; }; /* * The following fields should not be NULL at the same time: * * @rx, @tx * At least one traffic direction should be assigned for an xsk. * * The following fields are optional: * * @fill, @comp, @rx_size, @tx_size, @libxdp_flags, @xdp_flags, * @bind_flags * If @fill and @comp are both unset, they will be set to umem's * fill_save and comp_save respectively. Note that it is invalid * to set only one of them. * If the remaining fields are unset, they will be set to * default value (see `xsk_set_xdp_socket_config()`). * * Except for the fields mentioned above, no field can be set. */ struct xsk_socket_opts { size_t sz; struct xsk_ring_cons *rx; struct xsk_ring_prod *tx; struct xsk_ring_prod *fill; struct xsk_ring_cons *comp; __u32 rx_size; __u32 tx_size; __u32 libxdp_flags; __u32 xdp_flags; __u16 bind_flags; size_t :0; }; #define xsk_socket_opts__last_field bind_flags /* Set config to NULL to get the default configuration. */ int xsk_umem__create(struct xsk_umem **umem, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *config); int xsk_umem__create_with_fd(struct xsk_umem **umem, int fd, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *config); /* Newer version to create umem by opts, recommended to use. */ struct xsk_umem *xsk_umem__create_opts(void *umem_area, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, struct xsk_umem_opts *opts); int xsk_socket__create(struct xsk_socket **xsk, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *config); int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_socket_config *config); /* Newer version to create xsk by opts, recommended to use. */ struct xsk_socket *xsk_socket__create_opts(const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_socket_opts *opts); /* Returns 0 for success and -EBUSY if the umem is still in use. */ int xsk_umem__delete(struct xsk_umem *umem); void xsk_socket__delete(struct xsk_socket *xsk); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBBPF_XSK_H */ /* For new functions post libbpf */ #ifndef __LIBXDP_XSK_H #define __LIBXDP_XSK_H #ifdef __cplusplus extern "C" { #endif #ifdef __cplusplus } /* extern "C" */ #endif #endif /* __LIBXDP_XSK_H */ xdp-tools-1.5.4/headers/xdp/xdp_sample_shared.h0000644000175100001660000000047415003640462021063 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0-only #ifndef _XDP_SAMPLE_SHARED_H #define _XDP_SAMPLE_SHARED_H #include struct datarec { size_t processed; size_t dropped; size_t issue; union { size_t xdp_pass; size_t info; }; size_t xdp_drop; size_t xdp_redirect; } __attribute__((aligned(64))); #endif xdp-tools-1.5.4/headers/xdp/xdp_stats_kern_user.h0000644000175100001660000000105115003640462021457 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ /* Used by BPF-prog kernel side BPF-progs and userspace programs, * for sharing xdp_stats common struct and DEFINEs. */ #ifndef __XDP_STATS_KERN_USER_H #define __XDP_STATS_KERN_USER_H /* This is the data record stored in the map */ struct xdp_stats_record { union { __u64 packets; __u64 rx_packets; }; union { __u64 bytes; __u64 rx_bytes; }; }; #ifndef XDP_ACTION_MAX #define XDP_ACTION_MAX (XDP_REDIRECT + 1) #endif #define XDP_STATS_MAP_NAME xdp_stats_map #endif /* __XDP_STATS_KERN_USER_H */ xdp-tools-1.5.4/headers/xdp/libxdp.h0000644000175100001660000001350015003640462016655 0ustar runnerdocker// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* * XDP management utility functions * * Copyright (C) 2020 Toke Høiland-Jørgensen */ #ifndef __LIBXDP_LIBXDP_H #define __LIBXDP_LIBXDP_H #include #include #include #include #include "xdp_helpers.h" #ifdef __cplusplus extern "C" { #endif #define XDP_BPFFS_ENVVAR "LIBXDP_BPFFS" #define XDP_BPFFS_MOUNT_ENVVAR "LIBXDP_BPFFS_AUTOMOUNT" #define XDP_OBJECT_ENVVAR "LIBXDP_OBJECT_PATH" enum xdp_attach_mode { XDP_MODE_UNSPEC = 0, XDP_MODE_NATIVE, XDP_MODE_SKB, XDP_MODE_HW }; /* This is compatible with libbpf logging levels */ enum libxdp_print_level { LIBXDP_WARN, LIBXDP_INFO, LIBXDP_DEBUG, }; typedef int (*libxdp_print_fn_t)(enum libxdp_print_level level, const char *, va_list ap); libxdp_print_fn_t libxdp_set_print(libxdp_print_fn_t fn); struct xdp_program; struct xdp_multiprog; long libxdp_get_error(const void *ptr); int libxdp_strerror(int err, char *buf, size_t size); int libxdp_clean_references(int ifindex); struct xdp_program *xdp_program__from_bpf_obj(struct bpf_object *obj, const char *section_name); struct xdp_program *xdp_program__find_file(const char *filename, const char *section_name, struct bpf_object_open_opts *opts); struct xdp_program *xdp_program__open_file(const char *filename, const char *section_name, struct bpf_object_open_opts *opts); struct xdp_program *xdp_program__from_fd(int fd); struct xdp_program *xdp_program__from_id(__u32 prog_id); struct xdp_program *xdp_program__from_pin(const char *pin_path); struct xdp_program *xdp_program__clone(struct xdp_program *xdp_prog, unsigned int flags); void xdp_program__close(struct xdp_program *xdp_prog); int xdp_program__test_run(struct xdp_program *xdp_prog, struct bpf_test_run_opts *opts, unsigned int flags); enum xdp_attach_mode xdp_program__is_attached(const struct xdp_program *xdp_prog, int ifindex); const char *xdp_program__name(const struct xdp_program *xdp_prog); const unsigned char *xdp_program__tag(const struct xdp_program *xdp_prog); struct bpf_object *xdp_program__bpf_obj(struct xdp_program *xdp_prog); const struct btf *xdp_program__btf(struct xdp_program *xdp_prog); uint32_t xdp_program__id(const struct xdp_program *xdp_prog); int xdp_program__fd(const struct xdp_program *xdp_prog); unsigned int xdp_program__run_prio(const struct xdp_program *xdp_prog); int xdp_program__set_run_prio(struct xdp_program *xdp_prog, unsigned int run_prio); bool xdp_program__chain_call_enabled(const struct xdp_program *xdp_prog, enum xdp_action action); int xdp_program__set_chain_call_enabled(struct xdp_program *prog, unsigned int action, bool enabled); int xdp_program__print_chain_call_actions(const struct xdp_program *prog, char *buf, size_t buf_len); bool xdp_program__xdp_frags_support(const struct xdp_program *prog); int xdp_program__set_xdp_frags_support(struct xdp_program *prog, bool frags); int xdp_program__pin(struct xdp_program *xdp_prog, const char *pin_path); int xdp_program__attach(struct xdp_program *xdp_prog, int ifindex, enum xdp_attach_mode mode, unsigned int flags); int xdp_program__attach_multi(struct xdp_program **progs, size_t num_progs, int ifindex, enum xdp_attach_mode mode, unsigned int flags); int xdp_program__detach(struct xdp_program *xdp_prog, int ifindex, enum xdp_attach_mode mode, unsigned int flags); int xdp_program__detach_multi(struct xdp_program **progs, size_t num_progs, int ifindex, enum xdp_attach_mode mode, unsigned int flags); struct xdp_multiprog *xdp_multiprog__get_from_ifindex(int ifindex); struct xdp_program *xdp_multiprog__next_prog(const struct xdp_program *prog, const struct xdp_multiprog *mp); void xdp_multiprog__close(struct xdp_multiprog *mp); int xdp_multiprog__detach(struct xdp_multiprog *mp); enum xdp_attach_mode xdp_multiprog__attach_mode(const struct xdp_multiprog *mp); struct xdp_program *xdp_multiprog__main_prog(const struct xdp_multiprog *mp); struct xdp_program *xdp_multiprog__hw_prog(const struct xdp_multiprog *mp); bool xdp_multiprog__is_legacy(const struct xdp_multiprog *mp); int xdp_multiprog__program_count(const struct xdp_multiprog *mp); bool xdp_multiprog__xdp_frags_support(const struct xdp_multiprog *mp); /* Only following members can be set at once: * * @obj, @prog_name * Create using BPF program with name @prog_name in BPF object @obj * * @prog_name is optional. In absence of @prog_name, first program of BPF * object is picked. * * @find_filename, @prog_name, @opts * Create using BPF program with name @prog_name in BPF object located in * LIBXDP_OBJECT_PATH with filename @find_filename, using * bpf_object_open_opts @opts. * * @prog_name and @opts is optional. In absence of @prog_name, first * program of BPF object is picked. * * @open_filename, @prog_name, @opts * Create using BPF program with name @prog_name in BPF object located at * path @open_filename, using bpf_object_open_opts @opts. * * @prog_name and @opts is optional. In absence of @prog_name, first * program of BPF object is picked. * * @id * Load from BPF program with ID @id * * @fd * Load from BPF program with fd @fd * * When one of these combinations is set, all other members of the opts struct * must be zeroed out. */ struct xdp_program_opts { size_t sz; struct bpf_object *obj; struct bpf_object_open_opts *opts; const char *prog_name; const char *find_filename; const char *open_filename; const char *pin_path; __u32 id; int fd; size_t :0; }; #define xdp_program_opts__last_field fd #define DECLARE_LIBXDP_OPTS DECLARE_LIBBPF_OPTS struct xdp_program *xdp_program__create(struct xdp_program_opts *opts); #ifdef __cplusplus } /* extern "C" */ #endif #endif xdp-tools-1.5.4/headers/xdp/xdp_sample_common.bpf.h0000644000175100001660000001753215003640462021656 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /* GPLv2, Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */ #ifndef _XDP_SAMPLE_COMMON_BPF_H #define _XDP_SAMPLE_COMMON_BPF_H #include "xdp_sample.bpf.h" #include #include #include #include #include #include array_map rx_cnt SEC(".maps"); array_map redir_err_cnt SEC(".maps"); array_map cpumap_enqueue_cnt SEC(".maps"); array_map cpumap_kthread_cnt SEC(".maps"); array_map exception_cnt SEC(".maps"); array_map devmap_xmit_cnt SEC(".maps"); array_map rxq_cnt SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(max_entries, 32 * 32); __type(key, __u64); __type(value, struct datarec); } devmap_xmit_cnt_multi SEC(".maps"); const volatile int nr_cpus = 0; /* These can be set before loading so that redundant comparisons can be DCE'd by * the verifier, and only actual matches are tried after loading tp_btf program. * This allows sample to filter tracepoint stats based on net_device. */ const volatile int from_match[32] = {}; const volatile int to_match[32] = {}; int cpumap_map_id = 0; /* Find if b is part of set a, but if a is empty set then evaluate to true */ #define IN_SET(a, b) \ ({ \ bool __res = !(a)[0]; \ for (int i = 0; i < ARRAY_SIZE(a) && (a)[i]; i++) { \ __res = (a)[i] == (b); \ if (__res) \ break; \ } \ __res; \ }) static __always_inline __u32 xdp_get_err_key(int err) { switch (err) { case 0: return 0; case -EINVAL: return 2; case -ENETDOWN: return 3; case -EMSGSIZE: return 4; case -EOPNOTSUPP: return 5; case -ENOSPC: return 6; default: return 1; } } static __always_inline int xdp_redirect_collect_stat(int from, int err) { __u32 cpu = bpf_get_smp_processor_id(); __u32 key = XDP_REDIRECT_ERROR; struct datarec *rec; __u32 idx; if (!IN_SET(from_match, from)) return 0; key = xdp_get_err_key(err); idx = key * nr_cpus + cpu; rec = bpf_map_lookup_elem(&redir_err_cnt, &idx); if (!rec) return 0; if (key) NO_TEAR_INC(rec->dropped); else NO_TEAR_INC(rec->processed); return 0; /* Indicate event was filtered (no further processing)*/ /* * Returning 1 here would allow e.g. a perf-record tracepoint * to see and record these events, but it doesn't work well * in-practice as stopping perf-record also unload this * bpf_prog. Plus, there is additional overhead of doing so. */ } SEC("tp_btf/xdp_redirect_err") int BPF_PROG(tp_xdp_redirect_err, const struct net_device *dev, const struct bpf_prog *xdp, const void *tgt, int err, const struct bpf_map *map, __u32 index) { return xdp_redirect_collect_stat(dev->ifindex, err); } SEC("tp_btf/xdp_redirect_map_err") int BPF_PROG(tp_xdp_redirect_map_err, const struct net_device *dev, const struct bpf_prog *xdp, const void *tgt, int err, const struct bpf_map *map, __u32 index) { return xdp_redirect_collect_stat(dev->ifindex, err); } SEC("tp_btf/xdp_redirect") int BPF_PROG(tp_xdp_redirect, const struct net_device *dev, const struct bpf_prog *xdp, const void *tgt, int err, const struct bpf_map *map, __u32 index) { return xdp_redirect_collect_stat(dev->ifindex, err); } SEC("tp_btf/xdp_redirect_map") int BPF_PROG(tp_xdp_redirect_map, const struct net_device *dev, const struct bpf_prog *xdp, const void *tgt, int err, const struct bpf_map *map, __u32 index) { return xdp_redirect_collect_stat(dev->ifindex, err); } SEC("tp_btf/xdp_cpumap_enqueue") int BPF_PROG(tp_xdp_cpumap_enqueue, int map_id, unsigned int processed, unsigned int drops, int to_cpu) { __u32 cpu = bpf_get_smp_processor_id(); struct datarec *rec; __u32 idx; if (cpumap_map_id && cpumap_map_id != map_id) return 0; idx = to_cpu * nr_cpus + cpu; rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &idx); if (!rec) return 0; NO_TEAR_ADD(rec->processed, processed); NO_TEAR_ADD(rec->dropped, drops); /* Record bulk events, then userspace can calc average bulk size */ if (processed > 0) NO_TEAR_INC(rec->issue); /* Inception: It's possible to detect overload situations, via * this tracepoint. This can be used for creating a feedback * loop to XDP, which can take appropriate actions to mitigate * this overload situation. */ return 0; } SEC("tp_btf/xdp_cpumap_kthread") int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed, unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats) { struct datarec *rec; __u32 cpu; if (cpumap_map_id && cpumap_map_id != map_id) return 0; cpu = bpf_get_smp_processor_id(); rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &cpu); if (!rec) return 0; NO_TEAR_ADD(rec->processed, processed); NO_TEAR_ADD(rec->dropped, drops); NO_TEAR_ADD(rec->xdp_pass, xdp_stats->pass); NO_TEAR_ADD(rec->xdp_drop, xdp_stats->drop); NO_TEAR_ADD(rec->xdp_redirect, xdp_stats->redirect); /* Count times kthread yielded CPU via schedule call */ if (sched) NO_TEAR_INC(rec->issue); return 0; } SEC("tp_btf/xdp_cpumap_kthread") int BPF_PROG(tp_xdp_cpumap_compat, int map_id, unsigned int processed, unsigned int drops, int sched) { struct datarec *rec; __u32 cpu; if (cpumap_map_id && cpumap_map_id != map_id) return 0; cpu = bpf_get_smp_processor_id(); rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &cpu); if (!rec) return 0; NO_TEAR_ADD(rec->processed, processed); NO_TEAR_ADD(rec->dropped, drops); /* Count times kthread yielded CPU via schedule call */ if (sched) NO_TEAR_INC(rec->issue); return 0; } SEC("tp_btf/xdp_exception") int BPF_PROG(tp_xdp_exception, const struct net_device *dev, const struct bpf_prog *xdp, __u32 act) { __u32 cpu = bpf_get_smp_processor_id(); struct datarec *rec; __u32 key = act, idx; if (!IN_SET(from_match, dev->ifindex)) return 0; if (!IN_SET(to_match, dev->ifindex)) return 0; if (key > XDP_REDIRECT) key = XDP_REDIRECT + 1; idx = key * nr_cpus + cpu; rec = bpf_map_lookup_elem(&exception_cnt, &idx); if (!rec) return 0; NO_TEAR_INC(rec->dropped); return 0; } SEC("tp_btf/xdp_devmap_xmit") int BPF_PROG(tp_xdp_devmap_xmit, const struct net_device *from_dev, const struct net_device *to_dev, int sent, int drops, int err) { struct datarec *rec; int idx_in, idx_out; __u32 cpu; idx_in = from_dev->ifindex; idx_out = to_dev->ifindex; if (!IN_SET(from_match, idx_in)) return 0; if (!IN_SET(to_match, idx_out)) return 0; cpu = bpf_get_smp_processor_id(); rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &cpu); if (!rec) return 0; NO_TEAR_ADD(rec->processed, sent); NO_TEAR_ADD(rec->dropped, drops); /* Record bulk events, then userspace can calc average bulk size */ NO_TEAR_INC(rec->info); /* Record error cases, where no frame were sent */ /* Catch API error of drv ndo_xdp_xmit sent more than count */ if (err || drops < 0) NO_TEAR_INC(rec->issue); return 0; } SEC("tp_btf/xdp_devmap_xmit") int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device *from_dev, const struct net_device *to_dev, int sent, int drops, int err) { struct datarec empty = {}; struct datarec *rec; int idx_in, idx_out; __u64 idx; idx_in = from_dev->ifindex; idx_out = to_dev->ifindex; idx = idx_in; idx = idx << 32 | idx_out; if (!IN_SET(from_match, idx_in)) return 0; if (!IN_SET(to_match, idx_out)) return 0; bpf_map_update_elem(&devmap_xmit_cnt_multi, &idx, &empty, BPF_NOEXIST); rec = bpf_map_lookup_elem(&devmap_xmit_cnt_multi, &idx); if (!rec) return 0; NO_TEAR_ADD(rec->processed, sent); NO_TEAR_ADD(rec->dropped, drops); NO_TEAR_INC(rec->info); if (err || drops < 0) NO_TEAR_INC(rec->issue); return 0; } #endif xdp-tools-1.5.4/headers/bpf/0000755000175100001660000000000015003640462015177 5ustar runnerdockerxdp-tools-1.5.4/headers/bpf/vmlinux.h0000644000175100001660000000070615003640462017055 0ustar runnerdocker#ifndef __VMLINUX_H__ #define __VMLINUX_H__ #ifndef BPF_NO_PRESERVE_ACCESS_INDEX #pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) #endif struct net_device { int ifindex; }; struct xdp_cpumap_stats { unsigned int redirect; unsigned int pass; unsigned int drop; }; struct bpf_prog { }; struct bpf_map { }; #ifndef BPF_NO_PRESERVE_ACCESS_INDEX #pragma clang attribute pop #endif #endif /* __VMLINUX_H__ */ xdp-tools-1.5.4/headers/bpf/bpf_trace_helpers.h0000644000175100001660000001270115003640462021020 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __BPF_TRACE_HELPERS_H #define __BPF_TRACE_HELPERS_H #include #define ___bpf_concat(a, b) a ## b #define ___bpf_apply(fn, n) ___bpf_concat(fn, n) #define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N #define ___bpf_narg(...) \ ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) #define ___bpf_empty(...) \ ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) #define ___bpf_ctx_cast0() ctx #define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0] #define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1] #define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2] #define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3] #define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4] #define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5] #define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6] #define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7] #define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8] #define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9] #define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10] #define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11] #define ___bpf_ctx_cast(args...) \ ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) /* * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and * similar kinds of BPF programs, that accept input arguments as a single * pointer to untyped u64 array, where each u64 can actually be a typed * pointer or integer of different size. Instead of requring user to write * manual casts and work with array elements by index, BPF_PROG macro * allows user to declare a list of named and typed input arguments in the * same syntax as for normal C function. All the casting is hidden and * performed transparently, while user code can just assume working with * function arguments of specified type and name. * * Original raw context argument is preserved as well as 'ctx' argument. * This is useful when using BPF helpers that expect original context * as one of the parameters (e.g., for bpf_perf_event_output()). */ #define BPF_PROG(name, args...) \ name(unsigned long long *ctx); \ static __always_inline typeof(name(0)) \ ____##name(unsigned long long *ctx, ##args); \ typeof(name(0)) name(unsigned long long *ctx) \ { \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ return ____##name(___bpf_ctx_cast(args)); \ _Pragma("GCC diagnostic pop") \ } \ static __always_inline typeof(name(0)) \ ____##name(unsigned long long *ctx, ##args) struct pt_regs; #define ___bpf_kprobe_args0() ctx #define ___bpf_kprobe_args1(x) \ ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx) #define ___bpf_kprobe_args2(x, args...) \ ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx) #define ___bpf_kprobe_args3(x, args...) \ ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx) #define ___bpf_kprobe_args4(x, args...) \ ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx) #define ___bpf_kprobe_args5(x, args...) \ ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx) #define ___bpf_kprobe_args(args...) \ ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) /* * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific * low-level way of getting kprobe input arguments from struct pt_regs, and * provides a familiar typed and named function arguments syntax and * semantics of accessing kprobe input paremeters. * * Original struct pt_regs* context is preserved as 'ctx' argument. This might * be necessary when using BPF helpers like bpf_perf_event_output(). */ #define BPF_KPROBE(name, args...) \ name(struct pt_regs *ctx); \ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\ typeof(name(0)) name(struct pt_regs *ctx) \ { \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ return ____##name(___bpf_kprobe_args(args)); \ _Pragma("GCC diagnostic pop") \ } \ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) #define ___bpf_kretprobe_args0() ctx #define ___bpf_kretprobe_argsN(x, args...) \ ___bpf_kprobe_args(args), (void *)PT_REGS_RET(ctx) #define ___bpf_kretprobe_args(args...) \ ___bpf_apply(___bpf_kretprobe_args, ___bpf_empty(args))(args) /* * BPF_KRETPROBE is similar to BPF_KPROBE, except, in addition to listing all * input kprobe arguments, one last extra argument has to be specified, which * captures kprobe return value. */ #define BPF_KRETPROBE(name, args...) \ name(struct pt_regs *ctx); \ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\ typeof(name(0)) name(struct pt_regs *ctx) \ { \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ return ____##name(___bpf_kretprobe_args(args)); \ _Pragma("GCC diagnostic pop") \ } \ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) #endif xdp-tools-1.5.4/headers/linux/0000755000175100001660000000000015003640462015567 5ustar runnerdockerxdp-tools-1.5.4/headers/linux/if_link.h0000644000175100001660000007457315003640462017373 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_IF_LINK_H #define _UAPI_LINUX_IF_LINK_H #include #include /* This struct should be in sync with struct rtnl_link_stats64 */ struct rtnl_link_stats { __u32 rx_packets; __u32 tx_packets; __u32 rx_bytes; __u32 tx_bytes; __u32 rx_errors; __u32 tx_errors; __u32 rx_dropped; __u32 tx_dropped; __u32 multicast; __u32 collisions; /* detailed rx_errors: */ __u32 rx_length_errors; __u32 rx_over_errors; __u32 rx_crc_errors; __u32 rx_frame_errors; __u32 rx_fifo_errors; __u32 rx_missed_errors; /* detailed tx_errors */ __u32 tx_aborted_errors; __u32 tx_carrier_errors; __u32 tx_fifo_errors; __u32 tx_heartbeat_errors; __u32 tx_window_errors; /* for cslip etc */ __u32 rx_compressed; __u32 tx_compressed; __u32 rx_nohandler; }; /** * struct rtnl_link_stats64 - The main device statistics structure. * * @rx_packets: Number of good packets received by the interface. * For hardware interfaces counts all good packets received from the device * by the host, including packets which host had to drop at various stages * of processing (even in the driver). * * @tx_packets: Number of packets successfully transmitted. * For hardware interfaces counts packets which host was able to successfully * hand over to the device, which does not necessarily mean that packets * had been successfully transmitted out of the device, only that device * acknowledged it copied them out of host memory. * * @rx_bytes: Number of good received bytes, corresponding to @rx_packets. * * For IEEE 802.3 devices should count the length of Ethernet Frames * excluding the FCS. * * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets. * * For IEEE 802.3 devices should count the length of Ethernet Frames * excluding the FCS. * * @rx_errors: Total number of bad packets received on this network device. * This counter must include events counted by @rx_length_errors, * @rx_crc_errors, @rx_frame_errors and other errors not otherwise * counted. * * @tx_errors: Total number of transmit problems. * This counter must include events counter by @tx_aborted_errors, * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors, * @tx_window_errors and other errors not otherwise counted. * * @rx_dropped: Number of packets received but not processed, * e.g. due to lack of resources or unsupported protocol. * For hardware interfaces this counter may include packets discarded * due to L2 address filtering but should not include packets dropped * by the device due to buffer exhaustion which are counted separately in * @rx_missed_errors (since procfs folds those two counters together). * * @tx_dropped: Number of packets dropped on their way to transmission, * e.g. due to lack of resources. * * @multicast: Multicast packets received. * For hardware interfaces this statistic is commonly calculated * at the device level (unlike @rx_packets) and therefore may include * packets which did not reach the host. * * For IEEE 802.3 devices this counter may be equivalent to: * * - 30.3.1.1.21 aMulticastFramesReceivedOK * * @collisions: Number of collisions during packet transmissions. * * @rx_length_errors: Number of packets dropped due to invalid length. * Part of aggregate "frame" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter should be equivalent to a sum * of the following attributes: * * - 30.3.1.1.23 aInRangeLengthErrors * - 30.3.1.1.24 aOutOfRangeLengthField * - 30.3.1.1.25 aFrameTooLongErrors * * @rx_over_errors: Receiver FIFO overflow event counter. * * Historically the count of overflow events. Such events may be * reported in the receive descriptors or via interrupts, and may * not correspond one-to-one with dropped packets. * * The recommended interpretation for high speed interfaces is - * number of packets dropped because they did not fit into buffers * provided by the host, e.g. packets larger than MTU or next buffer * in the ring was not available for a scatter transfer. * * Part of aggregate "frame" errors in `/proc/net/dev`. * * This statistics was historically used interchangeably with * @rx_fifo_errors. * * This statistic corresponds to hardware events and is not commonly used * on software devices. * * @rx_crc_errors: Number of packets received with a CRC error. * Part of aggregate "frame" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter must be equivalent to: * * - 30.3.1.1.6 aFrameCheckSequenceErrors * * @rx_frame_errors: Receiver frame alignment errors. * Part of aggregate "frame" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter should be equivalent to: * * - 30.3.1.1.7 aAlignmentErrors * * @rx_fifo_errors: Receiver FIFO error counter. * * Historically the count of overflow events. Those events may be * reported in the receive descriptors or via interrupts, and may * not correspond one-to-one with dropped packets. * * This statistics was used interchangeably with @rx_over_errors. * Not recommended for use in drivers for high speed interfaces. * * This statistic is used on software devices, e.g. to count software * packet queue overflow (can) or sequencing errors (GRE). * * @rx_missed_errors: Count of packets missed by the host. * Folded into the "drop" counter in `/proc/net/dev`. * * Counts number of packets dropped by the device due to lack * of buffer space. This usually indicates that the host interface * is slower than the network interface, or host is not keeping up * with the receive packet rate. * * This statistic corresponds to hardware events and is not used * on software devices. * * @tx_aborted_errors: * Part of aggregate "carrier" errors in `/proc/net/dev`. * For IEEE 802.3 devices capable of half-duplex operation this counter * must be equivalent to: * * - 30.3.1.1.11 aFramesAbortedDueToXSColls * * High speed interfaces may use this counter as a general device * discard counter. * * @tx_carrier_errors: Number of frame transmission errors due to loss * of carrier during transmission. * Part of aggregate "carrier" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter must be equivalent to: * * - 30.3.1.1.13 aCarrierSenseErrors * * @tx_fifo_errors: Number of frame transmission errors due to device * FIFO underrun / underflow. This condition occurs when the device * begins transmission of a frame but is unable to deliver the * entire frame to the transmitter in time for transmission. * Part of aggregate "carrier" errors in `/proc/net/dev`. * * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for * old half-duplex Ethernet. * Part of aggregate "carrier" errors in `/proc/net/dev`. * * For IEEE 802.3 devices possibly equivalent to: * * - 30.3.2.1.4 aSQETestErrors * * @tx_window_errors: Number of frame transmission errors due * to late collisions (for Ethernet - after the first 64B of transmission). * Part of aggregate "carrier" errors in `/proc/net/dev`. * * For IEEE 802.3 devices this counter must be equivalent to: * * - 30.3.1.1.10 aLateCollisions * * @rx_compressed: Number of correctly received compressed packets. * This counters is only meaningful for interfaces which support * packet compression (e.g. CSLIP, PPP). * * @tx_compressed: Number of transmitted compressed packets. * This counters is only meaningful for interfaces which support * packet compression (e.g. CSLIP, PPP). * * @rx_nohandler: Number of packets received on the interface * but dropped by the networking stack because the device is * not designated to receive packets (e.g. backup link in a bond). */ struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; /* detailed rx_errors: */ __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; /* detailed tx_errors */ __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; /* for cslip etc */ __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; }; /* The struct should be in sync with struct ifmap */ struct rtnl_link_ifmap { __u64 mem_start; __u64 mem_end; __u64 base_addr; __u16 irq; __u8 dma; __u8 port; }; /* * IFLA_AF_SPEC * Contains nested attributes for address family specific attributes. * Each address family may create a attribute with the address family * number as type and create its own attribute structure in it. * * Example: * [IFLA_AF_SPEC] = { * [AF_INET] = { * [IFLA_INET_CONF] = ..., * }, * [AF_INET6] = { * [IFLA_INET6_FLAGS] = ..., * [IFLA_INET6_CONF] = ..., * } * } */ enum { IFLA_UNSPEC, IFLA_ADDRESS, IFLA_BROADCAST, IFLA_IFNAME, IFLA_MTU, IFLA_LINK, IFLA_QDISC, IFLA_STATS, IFLA_COST, #define IFLA_COST IFLA_COST IFLA_PRIORITY, #define IFLA_PRIORITY IFLA_PRIORITY IFLA_MASTER, #define IFLA_MASTER IFLA_MASTER IFLA_WIRELESS, /* Wireless Extension event - see wireless.h */ #define IFLA_WIRELESS IFLA_WIRELESS IFLA_PROTINFO, /* Protocol specific information for a link */ #define IFLA_PROTINFO IFLA_PROTINFO IFLA_TXQLEN, #define IFLA_TXQLEN IFLA_TXQLEN IFLA_MAP, #define IFLA_MAP IFLA_MAP IFLA_WEIGHT, #define IFLA_WEIGHT IFLA_WEIGHT IFLA_OPERSTATE, IFLA_LINKMODE, IFLA_LINKINFO, #define IFLA_LINKINFO IFLA_LINKINFO IFLA_NET_NS_PID, IFLA_IFALIAS, IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ IFLA_VFINFO_LIST, IFLA_STATS64, IFLA_VF_PORTS, IFLA_PORT_SELF, IFLA_AF_SPEC, IFLA_GROUP, /* Group the device belongs to */ IFLA_NET_NS_FD, IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */ #define IFLA_PROMISCUITY IFLA_PROMISCUITY IFLA_NUM_TX_QUEUES, IFLA_NUM_RX_QUEUES, IFLA_CARRIER, IFLA_PHYS_PORT_ID, IFLA_CARRIER_CHANGES, IFLA_PHYS_SWITCH_ID, IFLA_LINK_NETNSID, IFLA_PHYS_PORT_NAME, IFLA_PROTO_DOWN, IFLA_GSO_MAX_SEGS, IFLA_GSO_MAX_SIZE, IFLA_PAD, IFLA_XDP, IFLA_EVENT, IFLA_NEW_NETNSID, IFLA_IF_NETNSID, IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */ IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_DOWN_COUNT, IFLA_NEW_IFINDEX, IFLA_MIN_MTU, IFLA_MAX_MTU, IFLA_PROP_LIST, IFLA_ALT_IFNAME, /* Alternative ifname */ IFLA_PERM_ADDRESS, IFLA_PROTO_DOWN_REASON, __IFLA_MAX }; #define IFLA_MAX (__IFLA_MAX - 1) enum { IFLA_PROTO_DOWN_REASON_UNSPEC, IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */ IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */ __IFLA_PROTO_DOWN_REASON_CNT, IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1 }; /* backwards compatibility for userspace */ #ifndef __KERNEL__ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) #endif enum { IFLA_INET_UNSPEC, IFLA_INET_CONF, __IFLA_INET_MAX, }; #define IFLA_INET_MAX (__IFLA_INET_MAX - 1) /* ifi_flags. IFF_* flags. The only change is: IFF_LOOPBACK, IFF_BROADCAST and IFF_POINTOPOINT are more not changeable by user. They describe link media characteristics and set by device driver. Comments: - Combination IFF_BROADCAST|IFF_POINTOPOINT is invalid - If neither of these three flags are set; the interface is NBMA. - IFF_MULTICAST does not mean anything special: multicasts can be used on all not-NBMA links. IFF_MULTICAST means that this media uses special encapsulation for multicast frames. Apparently, all IFF_POINTOPOINT and IFF_BROADCAST devices are able to use multicasts too. */ /* IFLA_LINK. For usual devices it is equal ifi_index. If it is a "virtual interface" (f.e. tunnel), ifi_link can point to real physical interface (f.e. for bandwidth calculations), or maybe 0, what means, that real media is unknown (usual for IPIP tunnels, when route to endpoint is allowed to change) */ /* Subtype attributes for IFLA_PROTINFO */ enum { IFLA_INET6_UNSPEC, IFLA_INET6_FLAGS, /* link flags */ IFLA_INET6_CONF, /* sysctl parameters */ IFLA_INET6_STATS, /* statistics */ IFLA_INET6_MCAST, /* MC things. What of them? */ IFLA_INET6_CACHEINFO, /* time values and max reasm size */ IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */ IFLA_INET6_TOKEN, /* device token */ IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */ __IFLA_INET6_MAX }; #define IFLA_INET6_MAX (__IFLA_INET6_MAX - 1) enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_EUI64, IN6_ADDR_GEN_MODE_NONE, IN6_ADDR_GEN_MODE_STABLE_PRIVACY, IN6_ADDR_GEN_MODE_RANDOM, }; /* Bridge section */ enum { IFLA_BR_UNSPEC, IFLA_BR_FORWARD_DELAY, IFLA_BR_HELLO_TIME, IFLA_BR_MAX_AGE, IFLA_BR_AGEING_TIME, IFLA_BR_STP_STATE, IFLA_BR_PRIORITY, IFLA_BR_VLAN_FILTERING, IFLA_BR_VLAN_PROTOCOL, IFLA_BR_GROUP_FWD_MASK, IFLA_BR_ROOT_ID, IFLA_BR_BRIDGE_ID, IFLA_BR_ROOT_PORT, IFLA_BR_ROOT_PATH_COST, IFLA_BR_TOPOLOGY_CHANGE, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, IFLA_BR_HELLO_TIMER, IFLA_BR_TCN_TIMER, IFLA_BR_TOPOLOGY_CHANGE_TIMER, IFLA_BR_GC_TIMER, IFLA_BR_GROUP_ADDR, IFLA_BR_FDB_FLUSH, IFLA_BR_MCAST_ROUTER, IFLA_BR_MCAST_SNOOPING, IFLA_BR_MCAST_QUERY_USE_IFADDR, IFLA_BR_MCAST_QUERIER, IFLA_BR_MCAST_HASH_ELASTICITY, IFLA_BR_MCAST_HASH_MAX, IFLA_BR_MCAST_LAST_MEMBER_CNT, IFLA_BR_MCAST_STARTUP_QUERY_CNT, IFLA_BR_MCAST_LAST_MEMBER_INTVL, IFLA_BR_MCAST_MEMBERSHIP_INTVL, IFLA_BR_MCAST_QUERIER_INTVL, IFLA_BR_MCAST_QUERY_INTVL, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, IFLA_BR_NF_CALL_IPTABLES, IFLA_BR_NF_CALL_IP6TABLES, IFLA_BR_NF_CALL_ARPTABLES, IFLA_BR_VLAN_DEFAULT_PVID, IFLA_BR_PAD, IFLA_BR_VLAN_STATS_ENABLED, IFLA_BR_MCAST_STATS_ENABLED, IFLA_BR_MCAST_IGMP_VERSION, IFLA_BR_MCAST_MLD_VERSION, IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, __IFLA_BR_MAX, }; #define IFLA_BR_MAX (__IFLA_BR_MAX - 1) struct ifla_bridge_id { __u8 prio[2]; __u8 addr[6]; /* ETH_ALEN */ }; enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, }; enum { IFLA_BRPORT_UNSPEC, IFLA_BRPORT_STATE, /* Spanning tree state */ IFLA_BRPORT_PRIORITY, /* " priority */ IFLA_BRPORT_COST, /* " cost */ IFLA_BRPORT_MODE, /* mode (hairpin) */ IFLA_BRPORT_GUARD, /* bpdu guard */ IFLA_BRPORT_PROTECT, /* root port protection */ IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ IFLA_BRPORT_LEARNING, /* mac learning */ IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ IFLA_BRPORT_PROXYARP, /* proxy ARP */ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */ IFLA_BRPORT_ROOT_ID, /* designated root */ IFLA_BRPORT_BRIDGE_ID, /* designated bridge */ IFLA_BRPORT_DESIGNATED_PORT, IFLA_BRPORT_DESIGNATED_COST, IFLA_BRPORT_ID, IFLA_BRPORT_NO, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, IFLA_BRPORT_CONFIG_PENDING, IFLA_BRPORT_MESSAGE_AGE_TIMER, IFLA_BRPORT_FORWARD_DELAY_TIMER, IFLA_BRPORT_HOLD_TIMER, IFLA_BRPORT_FLUSH, IFLA_BRPORT_MULTICAST_ROUTER, IFLA_BRPORT_PAD, IFLA_BRPORT_MCAST_FLOOD, IFLA_BRPORT_MCAST_TO_UCAST, IFLA_BRPORT_VLAN_TUNNEL, IFLA_BRPORT_BCAST_FLOOD, IFLA_BRPORT_GROUP_FWD_MASK, IFLA_BRPORT_NEIGH_SUPPRESS, IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) struct ifla_cacheinfo { __u32 max_reasm_len; __u32 tstamp; /* ipv6InterfaceTable updated timestamp */ __u32 reachable_time; __u32 retrans_time; }; enum { IFLA_INFO_UNSPEC, IFLA_INFO_KIND, IFLA_INFO_DATA, IFLA_INFO_XSTATS, IFLA_INFO_SLAVE_KIND, IFLA_INFO_SLAVE_DATA, __IFLA_INFO_MAX, }; #define IFLA_INFO_MAX (__IFLA_INFO_MAX - 1) /* VLAN section */ enum { IFLA_VLAN_UNSPEC, IFLA_VLAN_ID, IFLA_VLAN_FLAGS, IFLA_VLAN_EGRESS_QOS, IFLA_VLAN_INGRESS_QOS, IFLA_VLAN_PROTOCOL, __IFLA_VLAN_MAX, }; #define IFLA_VLAN_MAX (__IFLA_VLAN_MAX - 1) struct ifla_vlan_flags { __u32 flags; __u32 mask; }; enum { IFLA_VLAN_QOS_UNSPEC, IFLA_VLAN_QOS_MAPPING, __IFLA_VLAN_QOS_MAX }; #define IFLA_VLAN_QOS_MAX (__IFLA_VLAN_QOS_MAX - 1) struct ifla_vlan_qos_mapping { __u32 from; __u32 to; }; /* MACVLAN section */ enum { IFLA_MACVLAN_UNSPEC, IFLA_MACVLAN_MODE, IFLA_MACVLAN_FLAGS, IFLA_MACVLAN_MACADDR_MODE, IFLA_MACVLAN_MACADDR, IFLA_MACVLAN_MACADDR_DATA, IFLA_MACVLAN_MACADDR_COUNT, IFLA_MACVLAN_BC_QUEUE_LEN, IFLA_MACVLAN_BC_QUEUE_LEN_USED, __IFLA_MACVLAN_MAX, }; #define IFLA_MACVLAN_MAX (__IFLA_MACVLAN_MAX - 1) enum macvlan_mode { MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */ MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */ }; enum macvlan_macaddr_mode { MACVLAN_MACADDR_ADD, MACVLAN_MACADDR_DEL, MACVLAN_MACADDR_FLUSH, MACVLAN_MACADDR_SET, }; #define MACVLAN_FLAG_NOPROMISC 1 /* VRF section */ enum { IFLA_VRF_UNSPEC, IFLA_VRF_TABLE, __IFLA_VRF_MAX }; #define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1) enum { IFLA_VRF_PORT_UNSPEC, IFLA_VRF_PORT_TABLE, __IFLA_VRF_PORT_MAX }; #define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1) /* MACSEC section */ enum { IFLA_MACSEC_UNSPEC, IFLA_MACSEC_SCI, IFLA_MACSEC_PORT, IFLA_MACSEC_ICV_LEN, IFLA_MACSEC_CIPHER_SUITE, IFLA_MACSEC_WINDOW, IFLA_MACSEC_ENCODING_SA, IFLA_MACSEC_ENCRYPT, IFLA_MACSEC_PROTECT, IFLA_MACSEC_INC_SCI, IFLA_MACSEC_ES, IFLA_MACSEC_SCB, IFLA_MACSEC_REPLAY_PROTECT, IFLA_MACSEC_VALIDATION, IFLA_MACSEC_PAD, IFLA_MACSEC_OFFLOAD, __IFLA_MACSEC_MAX, }; #define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) /* XFRM section */ enum { IFLA_XFRM_UNSPEC, IFLA_XFRM_LINK, IFLA_XFRM_IF_ID, __IFLA_XFRM_MAX }; #define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1) enum macsec_validation_type { MACSEC_VALIDATE_DISABLED = 0, MACSEC_VALIDATE_CHECK = 1, MACSEC_VALIDATE_STRICT = 2, __MACSEC_VALIDATE_END, MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, }; enum macsec_offload { MACSEC_OFFLOAD_OFF = 0, MACSEC_OFFLOAD_PHY = 1, MACSEC_OFFLOAD_MAC = 2, __MACSEC_OFFLOAD_END, MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1, }; /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, IFLA_IPVLAN_MODE, IFLA_IPVLAN_FLAGS, __IFLA_IPVLAN_MAX }; #define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1) enum ipvlan_mode { IPVLAN_MODE_L2 = 0, IPVLAN_MODE_L3, IPVLAN_MODE_L3S, IPVLAN_MODE_MAX }; #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 /* VXLAN section */ enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, IFLA_VXLAN_GROUP, /* group or remote address */ IFLA_VXLAN_LINK, IFLA_VXLAN_LOCAL, IFLA_VXLAN_TTL, IFLA_VXLAN_TOS, IFLA_VXLAN_LEARNING, IFLA_VXLAN_AGEING, IFLA_VXLAN_LIMIT, IFLA_VXLAN_PORT_RANGE, /* source port */ IFLA_VXLAN_PROXY, IFLA_VXLAN_RSC, IFLA_VXLAN_L2MISS, IFLA_VXLAN_L3MISS, IFLA_VXLAN_PORT, /* destination port */ IFLA_VXLAN_GROUP6, IFLA_VXLAN_LOCAL6, IFLA_VXLAN_UDP_CSUM, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, IFLA_VXLAN_REMCSUM_TX, IFLA_VXLAN_REMCSUM_RX, IFLA_VXLAN_GBP, IFLA_VXLAN_REMCSUM_NOPARTIAL, IFLA_VXLAN_COLLECT_METADATA, IFLA_VXLAN_LABEL, IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) struct ifla_vxlan_port_range { __be16 low; __be16 high; }; enum ifla_vxlan_df { VXLAN_DF_UNSET = 0, VXLAN_DF_SET, VXLAN_DF_INHERIT, __VXLAN_DF_END, VXLAN_DF_MAX = __VXLAN_DF_END - 1, }; /* GENEVE section */ enum { IFLA_GENEVE_UNSPEC, IFLA_GENEVE_ID, IFLA_GENEVE_REMOTE, IFLA_GENEVE_TTL, IFLA_GENEVE_TOS, IFLA_GENEVE_PORT, /* destination port */ IFLA_GENEVE_COLLECT_METADATA, IFLA_GENEVE_REMOTE6, IFLA_GENEVE_UDP_CSUM, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) enum ifla_geneve_df { GENEVE_DF_UNSET = 0, GENEVE_DF_SET, GENEVE_DF_INHERIT, __GENEVE_DF_END, GENEVE_DF_MAX = __GENEVE_DF_END - 1, }; /* Bareudp section */ enum { IFLA_BAREUDP_UNSPEC, IFLA_BAREUDP_PORT, IFLA_BAREUDP_ETHERTYPE, IFLA_BAREUDP_SRCPORT_MIN, IFLA_BAREUDP_MULTIPROTO_MODE, __IFLA_BAREUDP_MAX }; #define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1) /* PPP section */ enum { IFLA_PPP_UNSPEC, IFLA_PPP_DEV_FD, __IFLA_PPP_MAX }; #define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1) /* GTP section */ enum ifla_gtp_role { GTP_ROLE_GGSN = 0, GTP_ROLE_SGSN, }; enum { IFLA_GTP_UNSPEC, IFLA_GTP_FD0, IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) /* Bonding section */ enum { IFLA_BOND_UNSPEC, IFLA_BOND_MODE, IFLA_BOND_ACTIVE_SLAVE, IFLA_BOND_MIIMON, IFLA_BOND_UPDELAY, IFLA_BOND_DOWNDELAY, IFLA_BOND_USE_CARRIER, IFLA_BOND_ARP_INTERVAL, IFLA_BOND_ARP_IP_TARGET, IFLA_BOND_ARP_VALIDATE, IFLA_BOND_ARP_ALL_TARGETS, IFLA_BOND_PRIMARY, IFLA_BOND_PRIMARY_RESELECT, IFLA_BOND_FAIL_OVER_MAC, IFLA_BOND_XMIT_HASH_POLICY, IFLA_BOND_RESEND_IGMP, IFLA_BOND_NUM_PEER_NOTIF, IFLA_BOND_ALL_SLAVES_ACTIVE, IFLA_BOND_MIN_LINKS, IFLA_BOND_LP_INTERVAL, IFLA_BOND_PACKETS_PER_SLAVE, IFLA_BOND_AD_LACP_RATE, IFLA_BOND_AD_SELECT, IFLA_BOND_AD_INFO, IFLA_BOND_AD_ACTOR_SYS_PRIO, IFLA_BOND_AD_USER_PORT_KEY, IFLA_BOND_AD_ACTOR_SYSTEM, IFLA_BOND_TLB_DYNAMIC_LB, IFLA_BOND_PEER_NOTIF_DELAY, __IFLA_BOND_MAX, }; #define IFLA_BOND_MAX (__IFLA_BOND_MAX - 1) enum { IFLA_BOND_AD_INFO_UNSPEC, IFLA_BOND_AD_INFO_AGGREGATOR, IFLA_BOND_AD_INFO_NUM_PORTS, IFLA_BOND_AD_INFO_ACTOR_KEY, IFLA_BOND_AD_INFO_PARTNER_KEY, IFLA_BOND_AD_INFO_PARTNER_MAC, __IFLA_BOND_AD_INFO_MAX, }; #define IFLA_BOND_AD_INFO_MAX (__IFLA_BOND_AD_INFO_MAX - 1) enum { IFLA_BOND_SLAVE_UNSPEC, IFLA_BOND_SLAVE_STATE, IFLA_BOND_SLAVE_MII_STATUS, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, IFLA_BOND_SLAVE_PERM_HWADDR, IFLA_BOND_SLAVE_QUEUE_ID, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, __IFLA_BOND_SLAVE_MAX, }; #define IFLA_BOND_SLAVE_MAX (__IFLA_BOND_SLAVE_MAX - 1) /* SR-IOV virtual function management section */ enum { IFLA_VF_INFO_UNSPEC, IFLA_VF_INFO, __IFLA_VF_INFO_MAX, }; #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) enum { IFLA_VF_UNSPEC, IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VF_VLAN, /* VLAN ID and QoS */ IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */ IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */ IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */ IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query * on/off switch */ IFLA_VF_STATS, /* network device statistics */ IFLA_VF_TRUST, /* Trust VF */ IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */ IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */ IFLA_VF_BROADCAST, /* VF broadcast */ __IFLA_VF_MAX, }; #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) struct ifla_vf_mac { __u32 vf; __u8 mac[32]; /* MAX_ADDR_LEN */ }; struct ifla_vf_broadcast { __u8 broadcast[32]; }; struct ifla_vf_vlan { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ __u32 qos; }; enum { IFLA_VF_VLAN_INFO_UNSPEC, IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */ __IFLA_VF_VLAN_INFO_MAX, }; #define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1) #define MAX_VLAN_LIST_LEN 1 struct ifla_vf_vlan_info { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ __u32 qos; __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */ }; struct ifla_vf_tx_rate { __u32 vf; __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ }; struct ifla_vf_rate { __u32 vf; __u32 min_tx_rate; /* Min Bandwidth in Mbps */ __u32 max_tx_rate; /* Max Bandwidth in Mbps */ }; struct ifla_vf_spoofchk { __u32 vf; __u32 setting; }; struct ifla_vf_guid { __u32 vf; __u64 guid; }; enum { IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ IFLA_VF_LINK_STATE_ENABLE, /* link always up */ IFLA_VF_LINK_STATE_DISABLE, /* link always down */ __IFLA_VF_LINK_STATE_MAX, }; struct ifla_vf_link_state { __u32 vf; __u32 link_state; }; struct ifla_vf_rss_query_en { __u32 vf; __u32 setting; }; enum { IFLA_VF_STATS_RX_PACKETS, IFLA_VF_STATS_TX_PACKETS, IFLA_VF_STATS_RX_BYTES, IFLA_VF_STATS_TX_BYTES, IFLA_VF_STATS_BROADCAST, IFLA_VF_STATS_MULTICAST, IFLA_VF_STATS_PAD, IFLA_VF_STATS_RX_DROPPED, IFLA_VF_STATS_TX_DROPPED, __IFLA_VF_STATS_MAX, }; #define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1) struct ifla_vf_trust { __u32 vf; __u32 setting; }; /* VF ports management section * * Nested layout of set/get msg is: * * [IFLA_NUM_VF] * [IFLA_VF_PORTS] * [IFLA_VF_PORT] * [IFLA_PORT_*], ... * [IFLA_VF_PORT] * [IFLA_PORT_*], ... * ... * [IFLA_PORT_SELF] * [IFLA_PORT_*], ... */ enum { IFLA_VF_PORT_UNSPEC, IFLA_VF_PORT, /* nest */ __IFLA_VF_PORT_MAX, }; #define IFLA_VF_PORT_MAX (__IFLA_VF_PORT_MAX - 1) enum { IFLA_PORT_UNSPEC, IFLA_PORT_VF, /* __u32 */ IFLA_PORT_PROFILE, /* string */ IFLA_PORT_VSI_TYPE, /* 802.1Qbg (pre-)standard VDP */ IFLA_PORT_INSTANCE_UUID, /* binary UUID */ IFLA_PORT_HOST_UUID, /* binary UUID */ IFLA_PORT_REQUEST, /* __u8 */ IFLA_PORT_RESPONSE, /* __u16, output only */ __IFLA_PORT_MAX, }; #define IFLA_PORT_MAX (__IFLA_PORT_MAX - 1) #define PORT_PROFILE_MAX 40 #define PORT_UUID_MAX 16 #define PORT_SELF_VF -1 enum { PORT_REQUEST_PREASSOCIATE = 0, PORT_REQUEST_PREASSOCIATE_RR, PORT_REQUEST_ASSOCIATE, PORT_REQUEST_DISASSOCIATE, }; enum { PORT_VDP_RESPONSE_SUCCESS = 0, PORT_VDP_RESPONSE_INVALID_FORMAT, PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES, PORT_VDP_RESPONSE_UNUSED_VTID, PORT_VDP_RESPONSE_VTID_VIOLATION, PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION, PORT_VDP_RESPONSE_OUT_OF_SYNC, /* 0x08-0xFF reserved for future VDP use */ PORT_PROFILE_RESPONSE_SUCCESS = 0x100, PORT_PROFILE_RESPONSE_INPROGRESS, PORT_PROFILE_RESPONSE_INVALID, PORT_PROFILE_RESPONSE_BADSTATE, PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES, PORT_PROFILE_RESPONSE_ERROR, }; struct ifla_port_vsi { __u8 vsi_mgr_id; __u8 vsi_type_id[3]; __u8 vsi_type_version; __u8 pad[3]; }; /* IPoIB section */ enum { IFLA_IPOIB_UNSPEC, IFLA_IPOIB_PKEY, IFLA_IPOIB_MODE, IFLA_IPOIB_UMCAST, __IFLA_IPOIB_MAX }; enum { IPOIB_MODE_DATAGRAM = 0, /* using unreliable datagram QPs */ IPOIB_MODE_CONNECTED = 1, /* using connected QPs */ }; #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1) /* HSR/PRP section, both uses same interface */ /* Different redundancy protocols for hsr device */ enum { HSR_PROTOCOL_HSR, HSR_PROTOCOL_PRP, HSR_PROTOCOL_MAX, }; enum { IFLA_HSR_UNSPEC, IFLA_HSR_SLAVE1, IFLA_HSR_SLAVE2, IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, IFLA_HSR_VERSION, /* HSR version */ IFLA_HSR_PROTOCOL, /* Indicate different protocol than * HSR. For example PRP. */ __IFLA_HSR_MAX, }; #define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1) /* STATS section */ struct if_stats_msg { __u8 family; __u8 pad1; __u16 pad2; __u32 ifindex; __u32 filter_mask; }; /* A stats attribute can be netdev specific or a global stat. * For netdev stats, lets use the prefix IFLA_STATS_LINK_* */ enum { IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */ IFLA_STATS_LINK_64, IFLA_STATS_LINK_XSTATS, IFLA_STATS_LINK_XSTATS_SLAVE, IFLA_STATS_LINK_OFFLOAD_XSTATS, IFLA_STATS_AF_SPEC, __IFLA_STATS_MAX, }; #define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1) #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] * -> [rtnl link type specific attributes] */ enum { LINK_XSTATS_TYPE_UNSPEC, LINK_XSTATS_TYPE_BRIDGE, LINK_XSTATS_TYPE_BOND, __LINK_XSTATS_TYPE_MAX }; #define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1) /* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */ enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) #define XDP_FLAGS_SKB_MODE (1U << 1) #define XDP_FLAGS_DRV_MODE (1U << 2) #define XDP_FLAGS_HW_MODE (1U << 3) #define XDP_FLAGS_REPLACE (1U << 4) #define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \ XDP_FLAGS_DRV_MODE | \ XDP_FLAGS_HW_MODE) #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ XDP_FLAGS_MODES | XDP_FLAGS_REPLACE) /* These are stored into IFLA_XDP_ATTACHED on dump. */ enum { XDP_ATTACHED_NONE = 0, XDP_ATTACHED_DRV, XDP_ATTACHED_SKB, XDP_ATTACHED_HW, XDP_ATTACHED_MULTI, }; enum { IFLA_XDP_UNSPEC, IFLA_XDP_FD, IFLA_XDP_ATTACHED, IFLA_XDP_FLAGS, IFLA_XDP_PROG_ID, IFLA_XDP_DRV_PROG_ID, IFLA_XDP_SKB_PROG_ID, IFLA_XDP_HW_PROG_ID, IFLA_XDP_EXPECTED_FD, __IFLA_XDP_MAX, }; #define IFLA_XDP_MAX (__IFLA_XDP_MAX - 1) enum { IFLA_EVENT_NONE, IFLA_EVENT_REBOOT, /* internal reset / reboot */ IFLA_EVENT_FEATURES, /* change in offload features */ IFLA_EVENT_BONDING_FAILOVER, /* change in active slave */ IFLA_EVENT_NOTIFY_PEERS, /* re-sent grat. arp/ndisc */ IFLA_EVENT_IGMP_RESEND, /* re-sent IGMP JOIN */ IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ }; /* tun section */ enum { IFLA_TUN_UNSPEC, IFLA_TUN_OWNER, IFLA_TUN_GROUP, IFLA_TUN_TYPE, IFLA_TUN_PI, IFLA_TUN_VNET_HDR, IFLA_TUN_PERSIST, IFLA_TUN_MULTI_QUEUE, IFLA_TUN_NUM_QUEUES, IFLA_TUN_NUM_DISABLED_QUEUES, __IFLA_TUN_MAX, }; #define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1) /* rmnet section */ #define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) enum { IFLA_RMNET_UNSPEC, IFLA_RMNET_MUX_ID, IFLA_RMNET_FLAGS, __IFLA_RMNET_MAX, }; #define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1) struct ifla_rmnet_flags { __u32 flags; __u32 mask; }; #endif /* _UAPI_LINUX_IF_LINK_H */ xdp-tools-1.5.4/headers/linux/list.h0000644000175100001660000000474015003640462016720 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_LIST_H #define __LINUX_LIST_H struct list_head { struct list_head *next, *prev; }; #define LIST_HEAD_INIT(name) { &(name), &(name) } #define LIST_HEAD(name) \ struct list_head name = LIST_HEAD_INIT(name) #define POISON_POINTER_DELTA 0 #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) static inline void INIT_LIST_HEAD(struct list_head *list) { list->next = list; list->prev = list; } static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { next->prev = new; new->next = next; new->prev = prev; prev->next = new; } /** * list_add - add a new entry * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. */ static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } /* * Delete a list entry by making the prev/next entries * point to each other. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; prev->next = next; } /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ static inline void __list_del_entry(struct list_head *entry) { __list_del(entry->prev, entry->next); } static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } static inline int list_empty(const struct list_head *head) { return head->next == head; } #define list_entry(ptr, type, member) \ container_of(ptr, type, member) #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ &pos->member != (head); \ pos = list_next_entry(pos, member)) #endif xdp-tools-1.5.4/headers/linux/compiler-gcc.h0000644000175100001660000000221315003640462020302 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _TOOLS_LINUX_COMPILER_H_ #error "Please don't include directly, include instead." #endif /* * Common definitions for all gcc versions go here. */ #ifndef GCC_VERSION #define GCC_VERSION (__GNUC__ * 10000 \ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) #endif #if GCC_VERSION >= 70000 && !defined(__CHECKER__) # define __fallthrough __attribute__ ((fallthrough)) #endif #if __has_attribute(__error__) # define __compiletime_error(message) __attribute__((error(message))) #endif /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) #ifndef __pure #define __pure __attribute__((pure)) #endif #define noinline __attribute__((noinline)) #ifndef __packed #define __packed __attribute__((packed)) #endif #ifndef __noreturn #define __noreturn __attribute__((noreturn)) #endif #ifndef __aligned #define __aligned(x) __attribute__((aligned(x))) #endif #define __printf(a, b) __attribute__((format(printf, a, b))) #define __scanf(a, b) __attribute__((format(scanf, a, b))) xdp-tools-1.5.4/headers/linux/if.h0000644000175100001660000000045515003640462016342 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* Truncated header from the kernel sources that just defines the name sizes * below; used by icmp.h */ #ifndef _LINUX_IF_H #define _LINUX_IF_H #define IFNAMSIZ 16 #define IFALIASZ 256 #define ALTIFNAMSIZ 128 #endif /* _LINUX_IF_H */ xdp-tools-1.5.4/headers/linux/if_xdp.h0000644000175100001660000000573315003640462017221 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * if_xdp: XDP socket user-space interface * Copyright(c) 2018 Intel Corporation. * * Author(s): Björn Töpel * Magnus Karlsson */ #ifndef _LINUX_IF_XDP_H #define _LINUX_IF_XDP_H #include /* Options for the sxdp_flags field */ #define XDP_SHARED_UMEM (1 << 0) #define XDP_COPY (1 << 1) /* Force copy-mode */ #define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */ /* If this option is set, the driver might go sleep and in that case * the XDP_RING_NEED_WAKEUP flag in the fill and/or Tx rings will be * set. If it is set, the application need to explicitly wake up the * driver with a poll() (Rx and Tx) or sendto() (Tx only). If you are * running the driver and the application on the same core, you should * use this option so that the kernel will yield to the user space * application. */ #define XDP_USE_NEED_WAKEUP (1 << 3) /* Flags for xsk_umem_config flags */ #define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) struct sockaddr_xdp { __u16 sxdp_family; __u16 sxdp_flags; __u32 sxdp_ifindex; __u32 sxdp_queue_id; __u32 sxdp_shared_umem_fd; }; /* XDP_RING flags */ #define XDP_RING_NEED_WAKEUP (1 << 0) struct xdp_ring_offset { __u64 producer; __u64 consumer; __u64 desc; __u64 flags; }; struct xdp_mmap_offsets { struct xdp_ring_offset rx; struct xdp_ring_offset tx; struct xdp_ring_offset fr; /* Fill */ struct xdp_ring_offset cr; /* Completion */ }; /* XDP socket options */ #define XDP_MMAP_OFFSETS 1 #define XDP_RX_RING 2 #define XDP_TX_RING 3 #define XDP_UMEM_REG 4 #define XDP_UMEM_FILL_RING 5 #define XDP_UMEM_COMPLETION_RING 6 #define XDP_STATISTICS 7 #define XDP_OPTIONS 8 struct xdp_umem_reg { __u64 addr; /* Start of packet data area */ __u64 len; /* Length of packet data area */ __u32 chunk_size; __u32 headroom; __u32 flags; __u32 tx_metadata_len; }; struct xdp_statistics { __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 rx_ring_full; /* Dropped due to rx ring being full */ __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { __u32 flags; }; /* Flags for the flags field of struct xdp_options */ #define XDP_OPTIONS_ZEROCOPY (1 << 0) /* Pgoff for mmaping the rings */ #define XDP_PGOFF_RX_RING 0 #define XDP_PGOFF_TX_RING 0x80000000 #define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL #define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL /* Masks for unaligned chunks mode */ #define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48 #define XSK_UNALIGNED_BUF_ADDR_MASK \ ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1) /* Rx/Tx descriptor */ struct xdp_desc { __u64 addr; __u32 len; __u32 options; }; /* UMEM descriptor is __u64 */ #endif /* _LINUX_IF_XDP_H */ xdp-tools-1.5.4/headers/linux/netfilter.h0000644000175100001660000000375715003640462017750 0ustar runnerdocker#ifndef _LINUX_NETFILTER_H #define _LINUX_NETFILTER_H #include #include #include #include #include "hlist.h" struct flow_ports { __be16 source, dest; }; enum ip_conntrack_dir { IP_CT_DIR_ORIGINAL, IP_CT_DIR_REPLY, IP_CT_DIR_MAX }; enum flow_offload_tuple_dir { FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL, FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY, FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX, }; enum flow_offload_type { NF_FLOW_OFFLOAD_UNSPEC, NF_FLOW_OFFLOAD_ROUTE, }; enum nf_flow_flags { NF_FLOW_SNAT, NF_FLOW_DNAT, NF_FLOW_TEARDOWN, NF_FLOW_HW, NF_FLOW_HW_DYING, NF_FLOW_HW_DEAD, NF_FLOW_HW_PENDING, NF_FLOW_HW_BIDIRECTIONAL, NF_FLOW_HW_ESTABLISHED, }; enum flow_offload_xmit_type { FLOW_OFFLOAD_XMIT_UNSPEC, FLOW_OFFLOAD_XMIT_NEIGH, FLOW_OFFLOAD_XMIT_XFRM, FLOW_OFFLOAD_XMIT_DIRECT, FLOW_OFFLOAD_XMIT_TC, }; #define NF_FLOW_TABLE_ENCAP_MAX 2 struct flow_offload_tuple { union { struct in_addr src_v4; struct in6_addr src_v6; }; union { struct in_addr dst_v4; struct in6_addr dst_v6; }; struct { __be16 src_port; __be16 dst_port; }; int iifidx; __u8 l3proto; __u8 l4proto; struct { __u16 id; __be16 proto; } encap[NF_FLOW_TABLE_ENCAP_MAX]; /* All members above are keys for lookups, see flow_offload_hash(). */ struct { } __hash; __u8 dir:2, xmit_type:3, encap_num:2, in_vlan_ingress:2; __u16 mtu; union { struct { struct dst_entry *dst_cache; __u32 dst_cookie; }; struct { __u32 ifidx; __u32 hw_ifidx; __u8 h_source[ETH_ALEN]; __u8 h_dest[ETH_ALEN]; } out; struct { __u32 iifidx; } tc; }; }; struct flow_offload_tuple_rhash { struct rhash_head node; struct flow_offload_tuple tuple; }; struct flow_offload { struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX]; struct nf_conn *ct; unsigned long flags; __u16 type; __u32 timeout; }; #endif /* _LINUX_NETFILTER_H */ xdp-tools-1.5.4/headers/linux/err.h0000644000175100001660000000114215003640462016526 0ustar runnerdocker/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ #ifndef __LINUX_ERR_H #define __LINUX_ERR_H #include #include #include #define MAX_ERRNO 4095 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO) static inline void * ERR_PTR(long error_) { return (void *) error_; } static inline long PTR_ERR(const void *ptr) { return (long) ptr; } static inline bool IS_ERR(const void *ptr) { return IS_ERR_VALUE((unsigned long)ptr); } static inline bool IS_ERR_OR_NULL(const void *ptr) { return (!ptr) || IS_ERR_VALUE((unsigned long)ptr); } #endif xdp-tools-1.5.4/headers/linux/btf.h0000644000175100001660000001274615003640462016525 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (c) 2018 Facebook */ #ifndef _UAPI__LINUX_BTF_H__ #define _UAPI__LINUX_BTF_H__ #include #define BTF_MAGIC 0xeB9F #define BTF_VERSION 1 struct btf_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 type_off; /* offset of type section */ __u32 type_len; /* length of type section */ __u32 str_off; /* offset of string section */ __u32 str_len; /* length of string section */ }; /* Max # of type identifier */ #define BTF_MAX_TYPE 0x000fffff /* Max offset into the string section */ #define BTF_MAX_NAME_OFFSET 0x00ffffff /* Max # of struct/union/enum members or func args */ #define BTF_MAX_VLEN 0xffff struct btf_type { __u32 name_off; /* "info" bits arrangement * bits 0-15: vlen (e.g. # of struct's members) * bits 16-23: unused * bits 24-28: kind (e.g. int, ptr, array...etc) * bits 29-30: unused * bit 31: kind_flag, currently used by * struct, union, enum, fwd and enum64 */ __u32 info; /* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64. * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, * FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG. * "type" is a type_id referring to another type. */ union { __u32 size; __u32 type; }; }; #define BTF_INFO_KIND(info) (((info) >> 24) & 0x1f) #define BTF_INFO_VLEN(info) ((info) & 0xffff) #define BTF_INFO_KFLAG(info) ((info) >> 31) enum { BTF_KIND_UNKN = 0, /* Unknown */ BTF_KIND_INT = 1, /* Integer */ BTF_KIND_PTR = 2, /* Pointer */ BTF_KIND_ARRAY = 3, /* Array */ BTF_KIND_STRUCT = 4, /* Struct */ BTF_KIND_UNION = 5, /* Union */ BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */ BTF_KIND_FWD = 7, /* Forward */ BTF_KIND_TYPEDEF = 8, /* Typedef */ BTF_KIND_VOLATILE = 9, /* Volatile */ BTF_KIND_CONST = 10, /* Const */ BTF_KIND_RESTRICT = 11, /* Restrict */ BTF_KIND_FUNC = 12, /* Function */ BTF_KIND_FUNC_PROTO = 13, /* Function Proto */ BTF_KIND_VAR = 14, /* Variable */ BTF_KIND_DATASEC = 15, /* Section */ BTF_KIND_FLOAT = 16, /* Floating point */ BTF_KIND_DECL_TAG = 17, /* Decl Tag */ BTF_KIND_TYPE_TAG = 18, /* Type Tag */ BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */ NR_BTF_KINDS, BTF_KIND_MAX = NR_BTF_KINDS - 1, }; /* For some specific BTF_KIND, "struct btf_type" is immediately * followed by extra data. */ /* BTF_KIND_INT is followed by a u32 and the following * is the 32 bits arrangement: */ #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) #define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16) #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) /* Attributes stored in the BTF_INT_ENCODING */ #define BTF_INT_SIGNED (1 << 0) #define BTF_INT_CHAR (1 << 1) #define BTF_INT_BOOL (1 << 2) /* BTF_KIND_ENUM is followed by multiple "struct btf_enum". * The exact number of btf_enum is stored in the vlen (of the * info in "struct btf_type"). */ struct btf_enum { __u32 name_off; __s32 val; }; /* BTF_KIND_ARRAY is followed by one "struct btf_array" */ struct btf_array { __u32 type; __u32 index_type; __u32 nelems; }; /* BTF_KIND_STRUCT and BTF_KIND_UNION are followed * by multiple "struct btf_member". The exact number * of btf_member is stored in the vlen (of the info in * "struct btf_type"). */ struct btf_member { __u32 name_off; __u32 type; /* If the type info kind_flag is set, the btf_member offset * contains both member bitfield size and bit offset. The * bitfield size is set for bitfield members. If the type * info kind_flag is not set, the offset contains only bit * offset. */ __u32 offset; }; /* If the struct/union type info kind_flag is set, the * following two macros are used to access bitfield_size * and bit_offset from btf_member.offset. */ #define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24) #define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff) /* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param". * The exact number of btf_param is stored in the vlen (of the * info in "struct btf_type"). */ struct btf_param { __u32 name_off; __u32 type; }; enum { BTF_VAR_STATIC = 0, BTF_VAR_GLOBAL_ALLOCATED = 1, BTF_VAR_GLOBAL_EXTERN = 2, }; enum btf_func_linkage { BTF_FUNC_STATIC = 0, BTF_FUNC_GLOBAL = 1, BTF_FUNC_EXTERN = 2, }; /* BTF_KIND_VAR is followed by a single "struct btf_var" to describe * additional information related to the variable such as its linkage. */ struct btf_var { __u32 linkage; }; /* BTF_KIND_DATASEC is followed by multiple "struct btf_var_secinfo" * to describe all BTF_KIND_VAR types it contains along with it's * in-section offset as well as size. */ struct btf_var_secinfo { __u32 type; __u32 offset; __u32 size; }; /* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe * additional information related to the tag applied location. * If component_idx == -1, the tag is applied to a struct, union, * variable or function. Otherwise, it is applied to a struct/union * member or a func argument, and component_idx indicates which member * or argument (0 ... vlen-1). */ struct btf_decl_tag { __s32 component_idx; }; /* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64". * The exact number of btf_enum64 is stored in the vlen (of the * info in "struct btf_type"). */ struct btf_enum64 { __u32 name_off; __u32 val_lo32; __u32 val_hi32; }; #endif /* _UAPI__LINUX_BTF_H__ */ xdp-tools-1.5.4/headers/linux/compiler_types.h0000644000175100001660000000202315003640462020773 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_TYPES_H #define __LINUX_COMPILER_TYPES_H /* Builtins */ /* * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21. * In the meantime, to support gcc < 10, we implement __has_builtin * by hand. */ #ifndef __has_builtin #define __has_builtin(x) (0) #endif #ifdef __CHECKER__ /* context/locking */ # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) #else /* __CHECKER__ */ /* context/locking */ # define __must_hold(x) # define __acquires(x) # define __releases(x) # define __acquire(x) (void)0 # define __release(x) (void)0 # define __cond_lock(x,c) (c) #endif /* __CHECKER__ */ /* Compiler specific macros. */ #ifdef __GNUC__ #include #endif #endif /* __LINUX_COMPILER_TYPES_H */ xdp-tools-1.5.4/headers/linux/compiler.h0000644000175100001660000001314415003640462017555 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _TOOLS_LINUX_COMPILER_H_ #define _TOOLS_LINUX_COMPILER_H_ #include #ifndef __compiletime_error # define __compiletime_error(message) #endif #ifdef __OPTIMIZE__ # define __compiletime_assert(condition, msg, prefix, suffix) \ do { \ extern void prefix ## suffix(void) __compiletime_error(msg); \ if (!(condition)) \ prefix ## suffix(); \ } while (0) #else # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) #endif #define _compiletime_assert(condition, msg, prefix, suffix) \ __compiletime_assert(condition, msg, prefix, suffix) /** * compiletime_assert - break build and emit msg if condition is false * @condition: a compile-time constant condition to check * @msg: a message to emit if condition is false * * In tradition of POSIX assert, this macro will break the build if the * supplied condition is *false*, emitting the supplied error message if the * compiler has support to do so. */ #define compiletime_assert(condition, msg) \ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) /* Optimization barrier */ /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") #ifndef __always_inline # define __always_inline inline __attribute__((always_inline)) #endif #ifndef noinline #define noinline #endif /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) #endif #ifdef __ANDROID__ /* * Big hammer to get rid of tons of: * "warning: always_inline function might not be inlinable" * * At least on android-ndk-r12/platforms/android-24/arch-arm */ #undef __always_inline #define __always_inline inline #endif #define __user #define __rcu #define __read_mostly #ifndef __attribute_const__ # define __attribute_const__ #endif #ifndef __maybe_unused # define __maybe_unused __attribute__((unused)) #endif #ifndef __used # define __used __attribute__((__unused__)) #endif #ifndef __packed # define __packed __attribute__((__packed__)) #endif #ifndef __force # define __force #endif #ifndef __weak # define __weak __attribute__((weak)) #endif #ifndef likely # define likely(x) __builtin_expect(!!(x), 1) #endif #ifndef unlikely # define unlikely(x) __builtin_expect(!!(x), 0) #endif #ifndef __init # define __init #endif #ifndef noinline # define noinline #endif #include /* * Following functions are taken from kernel sources and * break aliasing rules in their original form. * * While kernel is compiled with -fno-strict-aliasing, * perf uses -Wstrict-aliasing=3 which makes build fail * under gcc 4.4. * * Using extra __may_alias__ type to allow aliasing * in this case. */ typedef __u8 __attribute__((__may_alias__)) __u8_alias_t; typedef __u16 __attribute__((__may_alias__)) __u16_alias_t; typedef __u32 __attribute__((__may_alias__)) __u32_alias_t; typedef __u64 __attribute__((__may_alias__)) __u64_alias_t; static __always_inline void __read_once_size(const volatile void *p, void *res, int size) { switch (size) { case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break; case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break; case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break; case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break; default: barrier(); __builtin_memcpy((void *)res, (const void *)p, size); barrier(); } } static __always_inline void __write_once_size(volatile void *p, void *res, int size) { switch (size) { case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break; case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break; case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break; case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break; default: barrier(); __builtin_memcpy((void *)p, (const void *)res, size); barrier(); } } /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some * particular ordering. One way to make the compiler aware of ordering is to * put the two invocations of READ_ONCE or WRITE_ONCE in different C * statements. * * These two macros will also work on aggregate data types like structs or * unions. If the size of the accessed data type exceeds the word size of * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will * fall back to memcpy and print a compile-time warning. * * Their two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, * and (2) Ensuring that the compiler does not fold, spindle, or otherwise * mutilate accesses that either do not require ordering or that interact * with an explicit memory barrier or atomic instruction that provides the * required ordering. */ #define READ_ONCE(x) \ ({ \ union { typeof(x) __val; char __c[1]; } __u = \ { .__c = { 0 } }; \ __read_once_size(&(x), __u.__c, sizeof(x)); \ __u.__val; \ }) #define WRITE_ONCE(x, val) \ ({ \ union { typeof(x) __val; char __c[1]; } __u = \ { .__val = (val) }; \ __write_once_size(&(x), __u.__c, sizeof(x)); \ __u.__val; \ }) #ifndef __fallthrough # define __fallthrough #endif /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ #define ___PASTE(a, b) a##b #define __PASTE(a, b) ___PASTE(a, b) #endif /* _TOOLS_LINUX_COMPILER_H */ xdp-tools-1.5.4/headers/linux/bpf.h0000644000175100001660000076160615003640462016527 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #ifndef _UAPI__LINUX_BPF_H__ #define _UAPI__LINUX_BPF_H__ #include #include /* Extended instruction set based on top of classic BPF */ /* instruction classes */ #define BPF_JMP32 0x06 /* jmp mode in word width */ #define BPF_ALU64 0x07 /* alu mode in double word width */ /* ld/ldx fields */ #define BPF_DW 0x18 /* double word (64-bit) */ #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ #define BPF_XADD 0xc0 /* exclusive add - legacy name */ /* alu/jmp fields */ #define BPF_MOV 0xb0 /* mov reg to reg */ #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ /* change endianness of a register */ #define BPF_END 0xd0 /* flags for endianness conversion: */ #define BPF_TO_LE 0x00 /* convert to little-endian */ #define BPF_TO_BE 0x08 /* convert to big-endian */ #define BPF_FROM_LE BPF_TO_LE #define BPF_FROM_BE BPF_TO_BE /* jmp encodings */ #define BPF_JNE 0x50 /* jump != */ #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ /* atomic op type fields (stored in immediate) */ #define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */ #define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ #define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ /* Register numbers */ enum { BPF_REG_0 = 0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5, BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9, BPF_REG_10, __MAX_BPF_REG, }; /* BPF has 10 general purpose 64-bit registers and stack frame. */ #define MAX_BPF_REG __MAX_BPF_REG struct bpf_insn { __u8 code; /* opcode */ __u8 dst_reg:4; /* dest register */ __u8 src_reg:4; /* source register */ __s16 off; /* signed offset */ __s32 imm; /* signed immediate constant */ }; /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ struct bpf_lpm_trie_key { __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ __u8 data[0]; /* Arbitrary size */ }; struct bpf_cgroup_storage_key { __u64 cgroup_inode_id; /* cgroup inode id */ __u32 attach_type; /* program attach type (enum bpf_attach_type) */ }; union bpf_iter_link_info { struct { __u32 map_fd; } map; }; /* BPF syscall commands, see bpf(2) man-page for more details. */ /** * DOC: eBPF Syscall Preamble * * The operation to be performed by the **bpf**\ () system call is determined * by the *cmd* argument. Each operation takes an accompanying argument, * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see * below). The size argument is the size of the union pointed to by *attr*. */ /** * DOC: eBPF Syscall Commands * * BPF_MAP_CREATE * Description * Create a map and return a file descriptor that refers to the * map. The close-on-exec file descriptor flag (see **fcntl**\ (2)) * is automatically enabled for the new file descriptor. * * Applying **close**\ (2) to the file descriptor returned by * **BPF_MAP_CREATE** will delete the map (but see NOTES). * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_MAP_LOOKUP_ELEM * Description * Look up an element with a given *key* in the map referred to * by the file descriptor *map_fd*. * * The *flags* argument may be specified as one of the * following: * * **BPF_F_LOCK** * Look up the value of a spin-locked map without * returning the lock. This must be specified if the * elements contain a spinlock. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_UPDATE_ELEM * Description * Create or update an element (key/value pair) in a specified map. * * The *flags* argument should be specified as one of the * following: * * **BPF_ANY** * Create a new element or update an existing element. * **BPF_NOEXIST** * Create a new element only if it did not exist. * **BPF_EXIST** * Update an existing element. * **BPF_F_LOCK** * Update a spin_lock-ed map element. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, * **E2BIG**, **EEXIST**, or **ENOENT**. * * **E2BIG** * The number of elements in the map reached the * *max_entries* limit specified at map creation time. * **EEXIST** * If *flags* specifies **BPF_NOEXIST** and the element * with *key* already exists in the map. * **ENOENT** * If *flags* specifies **BPF_EXIST** and the element with * *key* does not exist in the map. * * BPF_MAP_DELETE_ELEM * Description * Look up and delete an element by key in a specified map. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_GET_NEXT_KEY * Description * Look up an element by key in a specified map and return the key * of the next element. Can be used to iterate over all elements * in the map. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * The following cases can be used to iterate over all elements of * the map: * * * If *key* is not found, the operation returns zero and sets * the *next_key* pointer to the key of the first element. * * If *key* is found, the operation returns zero and sets the * *next_key* pointer to the key of the next element. * * If *key* is the last element, returns -1 and *errno* is set * to **ENOENT**. * * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or * **EINVAL** on error. * * BPF_PROG_LOAD * Description * Verify and load an eBPF program, returning a new file * descriptor associated with the program. * * Applying **close**\ (2) to the file descriptor returned by * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES). * * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is * automatically enabled for the new file descriptor. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_OBJ_PIN * Description * Pin an eBPF program or map referred by the specified *bpf_fd* * to the provided *pathname* on the filesystem. * * The *pathname* argument must not contain a dot ("."). * * On success, *pathname* retains a reference to the eBPF object, * preventing deallocation of the object when the original * *bpf_fd* is closed. This allow the eBPF object to live beyond * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent * process. * * Applying **unlink**\ (2) or similar calls to the *pathname* * unpins the object from the filesystem, removing the reference. * If no other file descriptors or filesystem nodes refer to the * same object, it will be deallocated (see NOTES). * * The filesystem type for the parent directory of *pathname* must * be **BPF_FS_MAGIC**. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_OBJ_GET * Description * Open a file descriptor for the eBPF object pinned to the * specified *pathname*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_PROG_ATTACH * Description * Attach an eBPF program to a *target_fd* at the specified * *attach_type* hook. * * The *attach_type* specifies the eBPF attachment point to * attach the program to, and must be one of *bpf_attach_type* * (see below). * * The *attach_bpf_fd* must be a valid file descriptor for a * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap * or sock_ops type corresponding to the specified *attach_type*. * * The *target_fd* must be a valid file descriptor for a kernel * object which depends on the attach type of *attach_bpf_fd*: * * **BPF_PROG_TYPE_CGROUP_DEVICE**, * **BPF_PROG_TYPE_CGROUP_SKB**, * **BPF_PROG_TYPE_CGROUP_SOCK**, * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, * **BPF_PROG_TYPE_CGROUP_SYSCTL**, * **BPF_PROG_TYPE_SOCK_OPS** * * Control Group v2 hierarchy with the eBPF controller * enabled. Requires the kernel to be compiled with * **CONFIG_CGROUP_BPF**. * * **BPF_PROG_TYPE_FLOW_DISSECTOR** * * Network namespace (eg /proc/self/ns/net). * * **BPF_PROG_TYPE_LIRC_MODE2** * * LIRC device path (eg /dev/lircN). Requires the kernel * to be compiled with **CONFIG_BPF_LIRC_MODE2**. * * **BPF_PROG_TYPE_SK_SKB**, * **BPF_PROG_TYPE_SK_MSG** * * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**). * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_PROG_DETACH * Description * Detach the eBPF program associated with the *target_fd* at the * hook specified by *attach_type*. The program must have been * previously attached using **BPF_PROG_ATTACH**. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_PROG_TEST_RUN * Description * Run the eBPF program associated with the *prog_fd* a *repeat* * number of times against a provided program context *ctx_in* and * data *data_in*, and return the modified program context * *ctx_out*, *data_out* (for example, packet data), result of the * execution *retval*, and *duration* of the test run. * * The sizes of the buffers provided as input and output * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must * be provided in the corresponding variables *ctx_size_in*, * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any * of these parameters are not provided (ie set to NULL), the * corresponding size field must be zero. * * Some program types have particular requirements: * * **BPF_PROG_TYPE_SK_LOOKUP** * *data_in* and *data_out* must be NULL. * * **BPF_PROG_TYPE_RAW_TRACEPOINT**, * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE** * * *ctx_out*, *data_in* and *data_out* must be NULL. * *repeat* must be zero. * * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * **ENOSPC** * Either *data_size_out* or *ctx_size_out* is too small. * **ENOTSUPP** * This command is not supported by the program type of * the program referred to by *prog_fd*. * * BPF_PROG_GET_NEXT_ID * Description * Fetch the next eBPF program currently loaded into the kernel. * * Looks for the eBPF program with an id greater than *start_id* * and updates *next_id* on success. If no other eBPF programs * remain with ids higher than *start_id*, returns -1 and sets * *errno* to **ENOENT**. * * Return * Returns zero on success. On error, or when no id remains, -1 * is returned and *errno* is set appropriately. * * BPF_MAP_GET_NEXT_ID * Description * Fetch the next eBPF map currently loaded into the kernel. * * Looks for the eBPF map with an id greater than *start_id* * and updates *next_id* on success. If no other eBPF maps * remain with ids higher than *start_id*, returns -1 and sets * *errno* to **ENOENT**. * * Return * Returns zero on success. On error, or when no id remains, -1 * is returned and *errno* is set appropriately. * * BPF_PROG_GET_FD_BY_ID * Description * Open a file descriptor for the eBPF program corresponding to * *prog_id*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_MAP_GET_FD_BY_ID * Description * Open a file descriptor for the eBPF map corresponding to * *map_id*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_OBJ_GET_INFO_BY_FD * Description * Obtain information about the eBPF object corresponding to * *bpf_fd*. * * Populates up to *info_len* bytes of *info*, which will be in * one of the following formats depending on the eBPF object type * of *bpf_fd*: * * * **struct bpf_prog_info** * * **struct bpf_map_info** * * **struct bpf_btf_info** * * **struct bpf_link_info** * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_PROG_QUERY * Description * Obtain information about eBPF programs associated with the * specified *attach_type* hook. * * The *target_fd* must be a valid file descriptor for a kernel * object which depends on the attach type of *attach_bpf_fd*: * * **BPF_PROG_TYPE_CGROUP_DEVICE**, * **BPF_PROG_TYPE_CGROUP_SKB**, * **BPF_PROG_TYPE_CGROUP_SOCK**, * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, * **BPF_PROG_TYPE_CGROUP_SYSCTL**, * **BPF_PROG_TYPE_SOCK_OPS** * * Control Group v2 hierarchy with the eBPF controller * enabled. Requires the kernel to be compiled with * **CONFIG_CGROUP_BPF**. * * **BPF_PROG_TYPE_FLOW_DISSECTOR** * * Network namespace (eg /proc/self/ns/net). * * **BPF_PROG_TYPE_LIRC_MODE2** * * LIRC device path (eg /dev/lircN). Requires the kernel * to be compiled with **CONFIG_BPF_LIRC_MODE2**. * * **BPF_PROG_QUERY** always fetches the number of programs * attached and the *attach_flags* which were used to attach those * programs. Additionally, if *prog_ids* is nonzero and the number * of attached programs is less than *prog_cnt*, populates * *prog_ids* with the eBPF program ids of the programs attached * at *target_fd*. * * The following flags may alter the result: * * **BPF_F_QUERY_EFFECTIVE** * Only return information regarding programs which are * currently effective at the specified *target_fd*. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_RAW_TRACEPOINT_OPEN * Description * Attach an eBPF program to a tracepoint *name* to access kernel * internal arguments of the tracepoint in their raw form. * * The *prog_fd* must be a valid file descriptor associated with * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**. * * No ABI guarantees are made about the content of tracepoint * arguments exposed to the corresponding eBPF program. * * Applying **close**\ (2) to the file descriptor returned by * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES). * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_BTF_LOAD * Description * Verify and load BPF Type Format (BTF) metadata into the kernel, * returning a new file descriptor associated with the metadata. * BTF is described in more detail at * https://www.kernel.org/doc/html/latest/bpf/btf.html. * * The *btf* parameter must point to valid memory providing * *btf_size* bytes of BTF binary metadata. * * The returned file descriptor can be passed to other **bpf**\ () * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to * associate the BTF with those objects. * * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional * parameters to specify a *btf_log_buf*, *btf_log_size* and * *btf_log_level* which allow the kernel to return freeform log * output regarding the BTF verification process. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_BTF_GET_FD_BY_ID * Description * Open a file descriptor for the BPF Type Format (BTF) * corresponding to *btf_id*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_TASK_FD_QUERY * Description * Obtain information about eBPF programs associated with the * target process identified by *pid* and *fd*. * * If the *pid* and *fd* are associated with a tracepoint, kprobe * or uprobe perf event, then the *prog_id* and *fd_type* will * be populated with the eBPF program id and file descriptor type * of type **bpf_task_fd_type**. If associated with a kprobe or * uprobe, the *probe_offset* and *probe_addr* will also be * populated. Optionally, if *buf* is provided, then up to * *buf_len* bytes of *buf* will be populated with the name of * the tracepoint, kprobe or uprobe. * * The resulting *prog_id* may be introspected in deeper detail * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_LOOKUP_AND_DELETE_ELEM * Description * Look up an element with the given *key* in the map referred to * by the file descriptor *fd*, and if found, delete the element. * * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map * types, the *flags* argument needs to be set to 0, but for other * map types, it may be specified as: * * **BPF_F_LOCK** * Look up and delete the value of a spin-locked map * without returning the lock. This must be specified if * the elements contain a spinlock. * * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types * implement this command as a "pop" operation, deleting the top * element rather than one corresponding to *key*. * The *key* and *key_len* parameters should be zeroed when * issuing this operation for these map types. * * This command is only valid for the following map types: * * **BPF_MAP_TYPE_QUEUE** * * **BPF_MAP_TYPE_STACK** * * **BPF_MAP_TYPE_HASH** * * **BPF_MAP_TYPE_PERCPU_HASH** * * **BPF_MAP_TYPE_LRU_HASH** * * **BPF_MAP_TYPE_LRU_PERCPU_HASH** * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_FREEZE * Description * Freeze the permissions of the specified map. * * Write permissions may be frozen by passing zero *flags*. * Upon success, no future syscall invocations may alter the * map state of *map_fd*. Write operations from eBPF programs * are still possible for a frozen map. * * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_BTF_GET_NEXT_ID * Description * Fetch the next BPF Type Format (BTF) object currently loaded * into the kernel. * * Looks for the BTF object with an id greater than *start_id* * and updates *next_id* on success. If no other BTF objects * remain with ids higher than *start_id*, returns -1 and sets * *errno* to **ENOENT**. * * Return * Returns zero on success. On error, or when no id remains, -1 * is returned and *errno* is set appropriately. * * BPF_MAP_LOOKUP_BATCH * Description * Iterate and fetch multiple elements in a map. * * Two opaque values are used to manage batch operations, * *in_batch* and *out_batch*. Initially, *in_batch* must be set * to NULL to begin the batched operation. After each subsequent * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant * *out_batch* as the *in_batch* for the next operation to * continue iteration from the current point. * * The *keys* and *values* are output parameters which must point * to memory large enough to hold *count* items based on the key * and value size of the map *map_fd*. The *keys* buffer must be * of *key_size* * *count*. The *values* buffer must be of * *value_size* * *count*. * * The *elem_flags* argument may be specified as one of the * following: * * **BPF_F_LOCK** * Look up the value of a spin-locked map without * returning the lock. This must be specified if the * elements contain a spinlock. * * On success, *count* elements from the map are copied into the * user buffer, with the keys copied into *keys* and the values * copied into the corresponding indices in *values*. * * If an error is returned and *errno* is not **EFAULT**, *count* * is set to the number of successfully processed elements. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * May set *errno* to **ENOSPC** to indicate that *keys* or * *values* is too small to dump an entire bucket during * iteration of a hash-based map type. * * BPF_MAP_LOOKUP_AND_DELETE_BATCH * Description * Iterate and delete all elements in a map. * * This operation has the same behavior as * **BPF_MAP_LOOKUP_BATCH** with two exceptions: * * * Every element that is successfully returned is also deleted * from the map. This is at least *count* elements. Note that * *count* is both an input and an output parameter. * * Upon returning with *errno* set to **EFAULT**, up to * *count* elements may be deleted without returning the keys * and values of the deleted elements. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_MAP_UPDATE_BATCH * Description * Update multiple elements in a map by *key*. * * The *keys* and *values* are input parameters which must point * to memory large enough to hold *count* items based on the key * and value size of the map *map_fd*. The *keys* buffer must be * of *key_size* * *count*. The *values* buffer must be of * *value_size* * *count*. * * Each element specified in *keys* is sequentially updated to the * value in the corresponding index in *values*. The *in_batch* * and *out_batch* parameters are ignored and should be zeroed. * * The *elem_flags* argument should be specified as one of the * following: * * **BPF_ANY** * Create new elements or update a existing elements. * **BPF_NOEXIST** * Create new elements only if they do not exist. * **BPF_EXIST** * Update existing elements. * **BPF_F_LOCK** * Update spin_lock-ed map elements. This must be * specified if the map value contains a spinlock. * * On success, *count* elements from the map are updated. * * If an error is returned and *errno* is not **EFAULT**, *count* * is set to the number of successfully processed elements. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or * **E2BIG**. **E2BIG** indicates that the number of elements in * the map reached the *max_entries* limit specified at map * creation time. * * May set *errno* to one of the following error codes under * specific circumstances: * * **EEXIST** * If *flags* specifies **BPF_NOEXIST** and the element * with *key* already exists in the map. * **ENOENT** * If *flags* specifies **BPF_EXIST** and the element with * *key* does not exist in the map. * * BPF_MAP_DELETE_BATCH * Description * Delete multiple elements in a map by *key*. * * The *keys* parameter is an input parameter which must point * to memory large enough to hold *count* items based on the key * size of the map *map_fd*, that is, *key_size* * *count*. * * Each element specified in *keys* is sequentially deleted. The * *in_batch*, *out_batch*, and *values* parameters are ignored * and should be zeroed. * * The *elem_flags* argument may be specified as one of the * following: * * **BPF_F_LOCK** * Look up the value of a spin-locked map without * returning the lock. This must be specified if the * elements contain a spinlock. * * On success, *count* elements from the map are updated. * * If an error is returned and *errno* is not **EFAULT**, *count* * is set to the number of successfully processed elements. If * *errno* is **EFAULT**, up to *count* elements may be been * deleted. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_LINK_CREATE * Description * Attach an eBPF program to a *target_fd* at the specified * *attach_type* hook and return a file descriptor handle for * managing the link. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_LINK_UPDATE * Description * Update the eBPF program in the specified *link_fd* to * *new_prog_fd*. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_LINK_GET_FD_BY_ID * Description * Open a file descriptor for the eBPF Link corresponding to * *link_id*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_LINK_GET_NEXT_ID * Description * Fetch the next eBPF link currently loaded into the kernel. * * Looks for the eBPF link with an id greater than *start_id* * and updates *next_id* on success. If no other eBPF links * remain with ids higher than *start_id*, returns -1 and sets * *errno* to **ENOENT**. * * Return * Returns zero on success. On error, or when no id remains, -1 * is returned and *errno* is set appropriately. * * BPF_ENABLE_STATS * Description * Enable eBPF runtime statistics gathering. * * Runtime statistics gathering for the eBPF runtime is disabled * by default to minimize the corresponding performance overhead. * This command enables statistics globally. * * Multiple programs may independently enable statistics. * After gathering the desired statistics, eBPF runtime statistics * may be disabled again by calling **close**\ (2) for the file * descriptor returned by this function. Statistics will only be * disabled system-wide when all outstanding file descriptors * returned by prior calls for this subcommand are closed. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_ITER_CREATE * Description * Create an iterator on top of the specified *link_fd* (as * previously created using **BPF_LINK_CREATE**) and return a * file descriptor that can be used to trigger the iteration. * * If the resulting file descriptor is pinned to the filesystem * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls * for that path will trigger the iterator to read kernel state * using the eBPF program attached to *link_fd*. * * Return * A new file descriptor (a nonnegative integer), or -1 if an * error occurred (in which case, *errno* is set appropriately). * * BPF_LINK_DETACH * Description * Forcefully detach the specified *link_fd* from its * corresponding attachment point. * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * BPF_PROG_BIND_MAP * Description * Bind a map to the lifetime of an eBPF program. * * The map identified by *map_fd* is bound to the program * identified by *prog_fd* and only released when *prog_fd* is * released. This may be used in cases where metadata should be * associated with a program which otherwise does not contain any * references to the map (for example, embedded in the eBPF * program instructions). * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. * * NOTES * eBPF objects (maps and programs) can be shared between processes. * * * After **fork**\ (2), the child inherits file descriptors * referring to the same eBPF objects. * * File descriptors referring to eBPF objects can be transferred over * **unix**\ (7) domain sockets. * * File descriptors referring to eBPF objects can be duplicated in the * usual way, using **dup**\ (2) and similar calls. * * File descriptors referring to eBPF objects can be pinned to the * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2). * * An eBPF object is deallocated only after all file descriptors referring * to the object have been closed and no references remain pinned to the * filesystem or attached (for example, bound to a program or device). */ enum bpf_cmd { BPF_MAP_CREATE, BPF_MAP_LOOKUP_ELEM, BPF_MAP_UPDATE_ELEM, BPF_MAP_DELETE_ELEM, BPF_MAP_GET_NEXT_KEY, BPF_PROG_LOAD, BPF_OBJ_PIN, BPF_OBJ_GET, BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, BPF_PROG_RUN = BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, BPF_MAP_GET_FD_BY_ID, BPF_OBJ_GET_INFO_BY_FD, BPF_PROG_QUERY, BPF_RAW_TRACEPOINT_OPEN, BPF_BTF_LOAD, BPF_BTF_GET_FD_BY_ID, BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE, BPF_BTF_GET_NEXT_ID, BPF_MAP_LOOKUP_BATCH, BPF_MAP_LOOKUP_AND_DELETE_BATCH, BPF_MAP_UPDATE_BATCH, BPF_MAP_DELETE_BATCH, BPF_LINK_CREATE, BPF_LINK_UPDATE, BPF_LINK_GET_FD_BY_ID, BPF_LINK_GET_NEXT_ID, BPF_ENABLE_STATS, BPF_ITER_CREATE, BPF_LINK_DETACH, BPF_PROG_BIND_MAP, }; enum bpf_map_type { BPF_MAP_TYPE_UNSPEC, BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_ARRAY, BPF_MAP_TYPE_STACK_TRACE, BPF_MAP_TYPE_CGROUP_ARRAY, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_DEVMAP, BPF_MAP_TYPE_SOCKMAP, BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_SK_STORAGE, BPF_MAP_TYPE_DEVMAP_HASH, BPF_MAP_TYPE_STRUCT_OPS, BPF_MAP_TYPE_RINGBUF, BPF_MAP_TYPE_INODE_STORAGE, BPF_MAP_TYPE_TASK_STORAGE, BPF_MAP_TYPE_BLOOM_FILTER, }; /* Note that tracing related programs such as * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} * are not subject to a stable API since kernel internal data * structures can change from release to release and may * therefore break existing tracing BPF programs. Tracing BPF * programs correspond to /a/ specific kernel which is to be * analyzed, and not /a/ specific kernel /and/ all future ones. */ enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, BPF_PROG_TYPE_SOCKET_FILTER, BPF_PROG_TYPE_KPROBE, BPF_PROG_TYPE_SCHED_CLS, BPF_PROG_TYPE_SCHED_ACT, BPF_PROG_TYPE_TRACEPOINT, BPF_PROG_TYPE_XDP, BPF_PROG_TYPE_PERF_EVENT, BPF_PROG_TYPE_CGROUP_SKB, BPF_PROG_TYPE_CGROUP_SOCK, BPF_PROG_TYPE_LWT_IN, BPF_PROG_TYPE_LWT_OUT, BPF_PROG_TYPE_LWT_XMIT, BPF_PROG_TYPE_SOCK_OPS, BPF_PROG_TYPE_SK_SKB, BPF_PROG_TYPE_CGROUP_DEVICE, BPF_PROG_TYPE_SK_MSG, BPF_PROG_TYPE_RAW_TRACEPOINT, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, BPF_PROG_TYPE_SK_REUSEPORT, BPF_PROG_TYPE_FLOW_DISSECTOR, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_PROG_TYPE_TRACING, BPF_PROG_TYPE_STRUCT_OPS, BPF_PROG_TYPE_EXT, BPF_PROG_TYPE_LSM, BPF_PROG_TYPE_SK_LOOKUP, BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */ }; enum bpf_attach_type { BPF_CGROUP_INET_INGRESS, BPF_CGROUP_INET_EGRESS, BPF_CGROUP_INET_SOCK_CREATE, BPF_CGROUP_SOCK_OPS, BPF_SK_SKB_STREAM_PARSER, BPF_SK_SKB_STREAM_VERDICT, BPF_CGROUP_DEVICE, BPF_SK_MSG_VERDICT, BPF_CGROUP_INET4_BIND, BPF_CGROUP_INET6_BIND, BPF_CGROUP_INET4_CONNECT, BPF_CGROUP_INET6_CONNECT, BPF_CGROUP_INET4_POST_BIND, BPF_CGROUP_INET6_POST_BIND, BPF_CGROUP_UDP4_SENDMSG, BPF_CGROUP_UDP6_SENDMSG, BPF_LIRC_MODE2, BPF_FLOW_DISSECTOR, BPF_CGROUP_SYSCTL, BPF_CGROUP_UDP4_RECVMSG, BPF_CGROUP_UDP6_RECVMSG, BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, BPF_TRACE_RAW_TP, BPF_TRACE_FENTRY, BPF_TRACE_FEXIT, BPF_MODIFY_RETURN, BPF_LSM_MAC, BPF_TRACE_ITER, BPF_CGROUP_INET4_GETPEERNAME, BPF_CGROUP_INET6_GETPEERNAME, BPF_CGROUP_INET4_GETSOCKNAME, BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, BPF_CGROUP_INET_SOCK_RELEASE, BPF_XDP_CPUMAP, BPF_SK_LOOKUP, BPF_XDP, BPF_SK_SKB_VERDICT, BPF_SK_REUSEPORT_SELECT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, BPF_TRACE_KPROBE_MULTI, BPF_LSM_CGROUP, __MAX_BPF_ATTACH_TYPE }; #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE enum bpf_link_type { BPF_LINK_TYPE_UNSPEC = 0, BPF_LINK_TYPE_RAW_TRACEPOINT = 1, BPF_LINK_TYPE_TRACING = 2, BPF_LINK_TYPE_CGROUP = 3, BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, MAX_BPF_LINK_TYPE, }; /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. * * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, * the program in this cgroup yields to sub-cgroup program. * * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, * that cgroup program gets run in addition to the program in this cgroup. * * Only one program is allowed to be attached to a cgroup with * NONE or BPF_F_ALLOW_OVERRIDE flag. * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will * release old program and attach the new one. Attach flags has to match. * * Multiple programs are allowed to be attached to a cgroup with * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order * (those that were attached first, run first) * The programs of sub-cgroup are executed first, then programs of * this cgroup and then programs of parent cgroup. * When children program makes decision (like picking TCP CA or sock bind) * parent program has a chance to override it. * * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of * programs for a cgroup. Though it's possible to replace an old program at * any position by also specifying BPF_F_REPLACE flag and position itself in * replace_bpf_fd attribute. Old program at this position will be released. * * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. * A cgroup with NONE doesn't allow any programs in sub-cgroups. * Ex1: * cgrp1 (MULTI progs A, B) -> * cgrp2 (OVERRIDE prog C) -> * cgrp3 (MULTI prog D) -> * cgrp4 (OVERRIDE prog E) -> * cgrp5 (NONE prog F) * the event in cgrp5 triggers execution of F,D,A,B in that order. * if prog F is detached, the execution is E,D,A,B * if prog F and D are detached, the execution is E,A,B * if prog F, E and D are detached, the execution is C,A,B * * All eligible programs are executed regardless of return code from * earlier programs. */ #define BPF_F_ALLOW_OVERRIDE (1U << 0) #define BPF_F_ALLOW_MULTI (1U << 1) #define BPF_F_REPLACE (1U << 2) /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will perform strict alignment checking as if the kernel * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, * and NET_IP_ALIGN defined to 2. */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the * verifier will allow any alignment whatsoever. On platforms * with strict alignment requirements for loads ands stores (such * as sparc and mips) the verifier validates that all loads and * stores provably follow this requirement. This flag turns that * checking and enforcement off. * * It is mostly used for testing when we want to validate the * context and memory access aspects of the verifier, but because * of an unaligned access the alignment check would trigger before * the one we are interested in. */ #define BPF_F_ANY_ALIGNMENT (1U << 1) /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. * Verifier does sub-register def/use analysis and identifies instructions whose * def only matters for low 32-bit, high 32-bit is never referenced later * through implicit zero extension. Therefore verifier notifies JIT back-ends * that it is safe to ignore clearing high 32-bit for these instructions. This * saves some back-ends a lot of code-gen. However such optimization is not * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends * hence hasn't used verifier's analysis result. But, we really want to have a * way to be able to verify the correctness of the described optimization on * x86_64 on which testsuites are frequently exercised. * * So, this flag is introduced. Once it is set, verifier will randomize high * 32-bit for those instructions who has been identified as safe to ignore them. * Then, if verifier is not doing correct analysis, such randomization will * regress tests to expose bugs. */ #define BPF_F_TEST_RND_HI32 (1U << 2) /* The verifier internal test flag. Behavior is undefined */ #define BPF_F_TEST_STATE_FREQ (1U << 3) /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will * restrict map and helper usage for such programs. Sleepable BPF programs can * only be attached to hooks where kernel execution context allows sleeping. * Such programs are allowed to use helpers that may sleep like * bpf_copy_from_user(). */ #define BPF_F_SLEEPABLE (1U << 4) /* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program * fully support xdp frags. */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ #define BPF_F_KPROBE_MULTI_RETURN (1U << 0) /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX] * insn[0].imm: map fd or fd_idx * insn[1].imm: 0 * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of map * verifier type: CONST_PTR_TO_MAP */ #define BPF_PSEUDO_MAP_FD 1 #define BPF_PSEUDO_MAP_IDX 5 /* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE * insn[0].imm: map fd or fd_idx * insn[1].imm: offset into value * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of map[0]+offset * verifier type: PTR_TO_MAP_VALUE */ #define BPF_PSEUDO_MAP_VALUE 2 #define BPF_PSEUDO_MAP_IDX_VALUE 6 /* insn[0].src_reg: BPF_PSEUDO_BTF_ID * insn[0].imm: kernel btd id of VAR * insn[1].imm: 0 * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of the kernel variable * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var * is struct/union. */ #define BPF_PSEUDO_BTF_ID 3 /* insn[0].src_reg: BPF_PSEUDO_FUNC * insn[0].imm: insn offset to the func * insn[1].imm: 0 * insn[0].off: 0 * insn[1].off: 0 * ldimm64 rewrite: address of the function * verifier type: PTR_TO_FUNC. */ #define BPF_PSEUDO_FUNC 4 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative * offset to another bpf function */ #define BPF_PSEUDO_CALL 1 /* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL, * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel */ #define BPF_PSEUDO_KFUNC_CALL 2 /* flags for BPF_MAP_UPDATE_ELEM command */ enum { BPF_ANY = 0, /* create new element or update existing */ BPF_NOEXIST = 1, /* create new element if it didn't exist */ BPF_EXIST = 2, /* update existing element */ BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ }; /* flags for BPF_MAP_CREATE command */ enum { BPF_F_NO_PREALLOC = (1U << 0), /* Instead of having one common LRU list in the * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * which can scale and perform better. * Note, the LRU nodes (including free nodes) cannot be moved * across different LRU lists. */ BPF_F_NO_COMMON_LRU = (1U << 1), /* Specify numa node during map creation */ BPF_F_NUMA_NODE = (1U << 2), /* Flags for accessing BPF object from syscall side. */ BPF_F_RDONLY = (1U << 3), BPF_F_WRONLY = (1U << 4), /* Flag for stack_map, store build_id+offset instead of pointer */ BPF_F_STACK_BUILD_ID = (1U << 5), /* Zero-initialize hash function seed. This should only be used for testing. */ BPF_F_ZERO_SEED = (1U << 6), /* Flags for accessing BPF object from program side. */ BPF_F_RDONLY_PROG = (1U << 7), BPF_F_WRONLY_PROG = (1U << 8), /* Clone map from listener for newly accepted socket */ BPF_F_CLONE = (1U << 9), /* Enable memory-mapping BPF map */ BPF_F_MMAPABLE = (1U << 10), /* Share perf_event among processes */ BPF_F_PRESERVE_ELEMS = (1U << 11), /* Create a map that is suitable to be an inner map with dynamic max entries */ BPF_F_INNER_MAP = (1U << 12), }; /* Flags for BPF_PROG_QUERY. */ /* Query effective (directly attached + inherited from ancestor cgroups) * programs that will be executed for events within a cgroup. * attach_flags with this flag are returned only for directly attached programs. */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) /* Flags for BPF_PROG_TEST_RUN */ /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ #define BPF_F_TEST_RUN_ON_CPU (1U << 0) /* If set, XDP frames will be transmitted after processing */ #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { /* enabled run_time_ns and run_cnt */ BPF_STATS_RUN_TIME = 0, }; enum bpf_stack_build_id_status { /* user space need an empty entry to identify end of a trace */ BPF_STACK_BUILD_ID_EMPTY = 0, /* with valid build_id and offset */ BPF_STACK_BUILD_ID_VALID = 1, /* couldn't get build_id, fallback to ip */ BPF_STACK_BUILD_ID_IP = 2, }; #define BPF_BUILD_ID_SIZE 20 struct bpf_stack_build_id { __s32 status; unsigned char build_id[BPF_BUILD_ID_SIZE]; union { __u64 offset; __u64 ip; }; }; #define BPF_OBJ_NAME_LEN 16U union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ __u32 key_size; /* size of key in bytes */ __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ __u32 map_flags; /* BPF_MAP_CREATE related * flags defined above. */ __u32 inner_map_fd; /* fd pointing to the inner map */ __u32 numa_node; /* numa node (effective only if * BPF_F_NUMA_NODE is set). */ char map_name[BPF_OBJ_NAME_LEN]; __u32 map_ifindex; /* ifindex of netdev to create on */ __u32 btf_fd; /* fd pointing to a BTF type data */ __u32 btf_key_type_id; /* BTF type_id of the key */ __u32 btf_value_type_id; /* BTF type_id of the value */ __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- * struct stored as the * map value */ /* Any per-map-type extra fields * * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the * number of hash functions (if 0, the bloom filter will default * to using 5 hash functions). */ __u64 map_extra; }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ __u32 map_fd; __aligned_u64 key; union { __aligned_u64 value; __aligned_u64 next_key; }; __u64 flags; }; struct { /* struct used by BPF_MAP_*_BATCH commands */ __aligned_u64 in_batch; /* start batch, * NULL to start from beginning */ __aligned_u64 out_batch; /* output: next start batch */ __aligned_u64 keys; __aligned_u64 values; __u32 count; /* input/output: * input: # of key/value * elements * output: # of filled elements */ __u32 map_fd; __u64 elem_flags; __u64 flags; } batch; struct { /* anonymous struct used by BPF_PROG_LOAD command */ __u32 prog_type; /* one of enum bpf_prog_type */ __u32 insn_cnt; __aligned_u64 insns; __aligned_u64 license; __u32 log_level; /* verbosity level of verifier */ __u32 log_size; /* size of user buffer */ __aligned_u64 log_buf; /* user supplied buffer */ __u32 kern_version; /* not used */ __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; __u32 prog_ifindex; /* ifindex of netdev to prep for */ /* For some prog types expected attach type must be known at * load time to verify attach type specific parts of prog * (context accesses, allowed helpers, etc). */ __u32 expected_attach_type; __u32 prog_btf_fd; /* fd pointing to BTF type data */ __u32 func_info_rec_size; /* userspace bpf_func_info size */ __aligned_u64 func_info; /* func info */ __u32 func_info_cnt; /* number of bpf_func_info records */ __u32 line_info_rec_size; /* userspace bpf_line_info size */ __aligned_u64 line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ union { /* valid prog_fd to attach to bpf prog */ __u32 attach_prog_fd; /* or valid module BTF object fd or 0 to attach to vmlinux */ __u32 attach_btf_obj_fd; }; __u32 core_relo_cnt; /* number of bpf_core_relo */ __aligned_u64 fd_array; /* array of FDs */ __aligned_u64 core_relos; __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ __aligned_u64 pathname; __u32 bpf_fd; __u32 file_flags; }; struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ __u32 target_fd; /* container object to attach to */ __u32 attach_bpf_fd; /* eBPF program to attach */ __u32 attach_type; __u32 attach_flags; __u32 replace_bpf_fd; /* previously attached eBPF * program to replace if * BPF_F_REPLACE is used */ }; struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ __u32 prog_fd; __u32 retval; __u32 data_size_in; /* input: len of data_in */ __u32 data_size_out; /* input/output: len of data_out * returns ENOSPC if data_out * is too small. */ __aligned_u64 data_in; __aligned_u64 data_out; __u32 repeat; __u32 duration; __u32 ctx_size_in; /* input: len of ctx_in */ __u32 ctx_size_out; /* input/output: len of ctx_out * returns ENOSPC if ctx_out * is too small. */ __aligned_u64 ctx_in; __aligned_u64 ctx_out; __u32 flags; __u32 cpu; __u32 batch_size; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ union { __u32 start_id; __u32 prog_id; __u32 map_id; __u32 btf_id; __u32 link_id; }; __u32 next_id; __u32 open_flags; }; struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ __u32 bpf_fd; __u32 info_len; __aligned_u64 info; } info; struct { /* anonymous struct used by BPF_PROG_QUERY command */ __u32 target_fd; /* container object to query */ __u32 attach_type; __u32 query_flags; __u32 attach_flags; __aligned_u64 prog_ids; __u32 prog_cnt; __aligned_u64 prog_attach_flags; /* output: per-program attach_flags */ } query; struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ __u64 name; __u32 prog_fd; } raw_tracepoint; struct { /* anonymous struct for BPF_BTF_LOAD */ __aligned_u64 btf; __aligned_u64 btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; }; struct { __u32 pid; /* input: pid */ __u32 fd; /* input: fd */ __u32 flags; /* input: flags */ __u32 buf_len; /* input/output: buf len */ __aligned_u64 buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe */ __u32 prog_id; /* output: prod_id */ __u32 fd_type; /* output: BPF_FD_TYPE_* */ __u64 probe_offset; /* output: probe_offset */ __u64 probe_addr; /* output: probe_addr */ } task_fd_query; struct { /* struct used by BPF_LINK_CREATE command */ __u32 prog_fd; /* eBPF program to attach */ union { __u32 target_fd; /* object to attach to */ __u32 target_ifindex; /* target ifindex */ }; __u32 attach_type; /* attach type */ __u32 flags; /* extra flags */ union { __u32 target_btf_id; /* btf_id of target to attach to */ struct { __aligned_u64 iter_info; /* extra bpf_iter_link_info */ __u32 iter_info_len; /* iter_info length */ }; struct { /* black box user-provided value passed through * to BPF program at the execution time and * accessible through bpf_get_attach_cookie() BPF helper */ __u64 bpf_cookie; } perf_event; struct { __u32 flags; __u32 cnt; __aligned_u64 syms; __aligned_u64 addrs; __aligned_u64 cookies; } kprobe_multi; struct { /* this is overlaid with the target_btf_id above. */ __u32 target_btf_id; /* black box user-provided value passed through * to BPF program at the execution time and * accessible through bpf_get_attach_cookie() BPF helper */ __u64 cookie; } tracing; }; } link_create; struct { /* struct used by BPF_LINK_UPDATE command */ __u32 link_fd; /* link fd */ /* new program fd to update link with */ __u32 new_prog_fd; __u32 flags; /* extra flags */ /* expected link's program fd; is specified only if * BPF_F_REPLACE flag is set in flags */ __u32 old_prog_fd; } link_update; struct { __u32 link_fd; } link_detach; struct { /* struct used by BPF_ENABLE_STATS command */ __u32 type; } enable_stats; struct { /* struct used by BPF_ITER_CREATE command */ __u32 link_fd; __u32 flags; } iter_create; struct { /* struct used by BPF_PROG_BIND_MAP command */ __u32 prog_fd; __u32 map_fd; __u32 flags; /* extra flags */ } prog_bind_map; } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF * developers about the multiple available eBPF helper functions. It can be * parsed and used to produce a manual page. The workflow is the following, * and requires the rst2man utility: * * $ ./scripts/bpf_doc.py \ * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 * $ man /tmp/bpf-helpers.7 * * Note that in order to produce this external documentation, some RST * formatting is used in the descriptions to get "bold" and "italics" in * manual pages. Also note that the few trailing white spaces are * intentional, removing them would break paragraphs for rst2man. * * Start of BPF helper function descriptions: * * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) * Description * Perform a lookup in *map* for an entry associated to *key*. * Return * Map value associated to *key*, or **NULL** if no entry was * found. * * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * Flag value **BPF_NOEXIST** cannot be used for maps of types * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all * elements always exist), the helper would return an error. * Return * 0 on success, or a negative error in case of failure. * * long bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. * * Generally, use **bpf_probe_read_user**\ () or * **bpf_probe_read_kernel**\ () instead. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_ktime_get_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. * Does not include time the system was suspended. * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) * Return * Current *ktime*. * * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if * available. It can take up to three additional **u64** * arguments (as an eBPF helpers, the total number of arguments is * limited to five). * * Each time the helper is called, it appends a line to the trace. * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. * The format of the trace is customizable, and the exact output * one will get depends on the options set in * *\/sys/kernel/debug/tracing/trace_options* (see also the * *README* file under the same directory). However, it usually * defaults to something like: * * :: * * telnet-470 [001] .N.. 419421.045894: 0x00000001: * * In the above: * * * ``telnet`` is the name of the current task. * * ``470`` is the PID of the current task. * * ``001`` is the CPU number on which the task is * running. * * In ``.N..``, each character refers to a set of * options (whether irqs are enabled, scheduling * options, whether hard/softirqs are running, level of * preempt_disabled respectively). **N** means that * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** * are set. * * ``419421.045894`` is a timestamp. * * ``0x00000001`` is a fake value used by BPF for the * instruction pointer register. * * ```` is the message formatted with * *fmt*. * * The conversion specifiers supported by *fmt* are similar, but * more limited than for printk(). They are **%d**, **%i**, * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size * of field, padding with zeroes, etc.) is available, and the * helper will return **-EINVAL** (but print nothing) if it * encounters an unknown specifier. * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice * block (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values * to user space, perf events should be preferred. * Return * The number of bytes written to the buffer, or a negative error * in case of failure. * * u32 bpf_get_prandom_u32(void) * Description * Get a pseudo-random number. * * From a security point of view, this helper uses its own * pseudo-random internal state, and cannot be used to infer the * seed of other random functions in the kernel. However, it is * essential to note that the generator used by the helper is not * cryptographically secure. * Return * A random 32-bit unsigned value. * * u32 bpf_get_smp_processor_id(void) * Description * Get the SMP (symmetric multiprocessing) processor id. Note that * all programs run with migration disabled, which means that the * SMP processor id is stable during all the execution of the * program. * Return * The SMP id of the processor running the program. * * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the * checksum for the packet after storing the bytes) and * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **->swhash** and *skb*\ **->l4hash** to 0). * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper * must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored in *size*. * Alternatively, it is possible to store the difference between * the previous and the new values of the header field in *to*, by * setting *from* and *size* to 0. For both methods, *offset* * indicates the location of the IP checksum within the packet. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the * helper must know the former value of the header field that was * modified (*from*), the new value of this field (*to*), and the * number of bytes (2 or 4) for this field, stored on the lowest * four bits of *flags*. Alternatively, it is possible to store * the difference between the previous and the new values of the * header field in *to*, by setting *from* and the four lowest * bits of *flags* to 0. For both methods, *offset* indicates the * location of the IP checksum within the packet. In addition to * the size of the field, *flags* can be added (bitwise OR) actual * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and * for updates resulting in a null checksum the value is set to * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates * the checksum is to be computed against a pseudo-header. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack * frame is used (but values on stack and in registers for the * caller are not accessible to the callee). This mechanism allows * for program chaining, either for raising the maximum number of * available eBPF instructions, or to execute given programs in * conditional blocks. For security reasons, there is an upper * limit to the number of successive tail calls that can be * performed. * * Upon call of this helper, the program attempts to jump into a * program referenced at index *index* in *prog_array_map*, a * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes * *ctx*, a pointer to the context. * * If the call succeeds, the kernel immediately runs the first * instruction of the new program. This is not a function call, * and it never returns to the previous program. If the call * fails, then the helper has no effect, and the caller continues * to run its subsequent instructions. A call can fail if the * destination program for the jump does not exist (i.e. *index* * is superior to the number of entries in *prog_array_map*), or * if the maximum number of tail calls has been reached for this * chain of programs. This limit is defined in the kernel by the * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), * which is currently set to 33. * Return * 0 on success, or a negative error in case of failure. * * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress * interfaces can be used for redirection. The **BPF_F_INGRESS** * value in *flags* is used to make the distinction (ingress path * is selected if the flag is present, egress path otherwise). * This is the only flag supported for now. * * In comparison with **bpf_redirect**\ () helper, * **bpf_clone_redirect**\ () has the associated cost of * duplicating the packet buffer, but this can be executed out of * the eBPF program. Conversely, **bpf_redirect**\ () is more * efficient, but it is handled through an action code where the * redirection happens only after the eBPF program has returned. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_get_current_pid_tgid(void) * Description * Get the current pid and tgid. * Return * A 64-bit integer containing the current tgid and pid, and * created as such: * *current_task*\ **->tgid << 32 \|** * *current_task*\ **->pid**. * * u64 bpf_get_current_uid_gid(void) * Description * Get the current uid and gid. * Return * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * * long bpf_get_current_comm(void *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of * the executable (excluding the path) for the current task. The * *size_of_buf* must be strictly positive. On success, the * helper makes sure that the *buf* is NUL-terminated. On failure, * it is filled with zeroes. * Return * 0 on success, or a negative error in case of failure. * * u32 bpf_get_cgroup_classid(struct sk_buff *skb) * Description * Retrieve the classid for the current task, i.e. for the net_cls * cgroup to which *skb* belongs. * * This helper can be used on TC egress path, but not on ingress. * * The net_cls cgroup provides an interface to tag network packets * based on a user-provided identifier for all traffic coming from * the tasks belonging to the related cgroup. See also the related * kernel documentation, available from the Linux sources in file * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. * * The Linux kernel has two versions for cgroups: there are * cgroups v1 and cgroups v2. Both are available to users, who can * use a mixture of them, but note that the net_cls cgroup is for * cgroup v1 only. This makes it incompatible with BPF programs * run on cgroups, which is a cgroup-v2-only feature (a socket can * only hold data for one version of cgroups at a time). * * This helper is only available is the kernel was compiled with * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to * "**y**" or to "**m**". * Return * The classid, or 0 for the default unconfigured classid. * * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update * the checksum. Note that if *vlan_proto* is different from * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * be **ETH_P_8021Q**. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be * filled with tunnel metadata for the packet associated to *skb*. * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which * indicates that the tunnel is based on IPv6 protocol instead of * IPv4. * * The **struct bpf_tunnel_key** is an object that generalizes the * principal parameters used by various tunneling protocols into a * single struct. This way, it can be used to easily make a * decision based on the contents of the encapsulation header, * "summarized" in this struct. In particular, it holds the IP * address of the remote end (IPv4 or IPv6, depending on the case) * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, * this struct exposes the *key*\ **->tunnel_id**, which is * generally mapped to a VNI (Virtual Network Identifier), making * it programmable together with the **bpf_skb_set_tunnel_key**\ * () helper. * * Let's imagine that the following code is part of a program * attached to the TC ingress interface, on one end of a GRE * tunnel, and is supposed to filter out all messages coming from * remote ends with IPv4 address other than 10.0.0.1: * * :: * * int ret; * struct bpf_tunnel_key key = {}; * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices * that can operate in "collect metadata" mode: instead of having * one network device per specific configuration, the "collect * metadata" mode only requires a single device where the * configuration can be extracted from this helper. * * This can be used together with various tunnels such as VXLan, * Geneve, GRE or IP in IP (IPIP). * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The * *flags* can be set to a combination of the following values: * * **BPF_F_TUNINFO_IPV6** * Indicate that the tunnel is based on IPv6 protocol * instead of IPv4. * **BPF_F_ZERO_CSUM_TX** * For IPv4 packets, add a flag to tunnel metadata * indicating that checksum computation should be skipped * and checksum set to zeroes. * **BPF_F_DONT_FRAGMENT** * Add a flag to tunnel metadata indicating that the * packet should not be fragmented. * **BPF_F_SEQ_NUMBER** * Add a flag to tunnel metadata indicating that a * sequence number should be added to tunnel header before * sending the packet. This flag was added for GRE * encapsulation, but might be used with other protocols * as well in the future. * * Here is a typical usage on the transmit path: * * :: * * struct bpf_tunnel_key key; * populate key ... * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); * * See also the description of the **bpf_skb_get_tunnel_key**\ () * helper for additional information. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) * Description * Read the value of a perf event counter. This helper relies on a * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of * the perf event counter is selected when *map* is updated with * perf event file descriptors. The *map* is an array whose size * is the number of available CPUs, and each cell contains a value * relative to one CPU. The value to retrieve is indicated by * *flags*, that contains the index of the CPU to look up, masked * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * Note that before Linux 4.13, only hardware perf event can be * retrieved. * * Also, be aware that the newer helper * **bpf_perf_event_read_value**\ () is recommended over * **bpf_perf_event_read**\ () in general. The latter has some ABI * quirks where error and counter value are used as a return code * (which is wrong to do since ranges may overlap). This issue is * fixed with **bpf_perf_event_read_value**\ (), which at the same * time provides more features over the **bpf_perf_event_read**\ * () interface. Please refer to the description of * **bpf_perf_event_read_value**\ () for details. * Return * The value of the perf event counter read from the map, or a * negative error code in case of failure. * * long bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ * (), except that the packet is not cloned, which provides * increased performance. * * Except for XDP, both ingress and egress interfaces can be used * for redirection. The **BPF_F_INGRESS** value in *flags* is used * to make the distinction (ingress path is selected if the flag * is present, egress path otherwise). Currently, XDP only * supports redirection to the egress interface, and accepts no * flag at all. * * The same effect can also be attained with the more generic * **bpf_redirect_map**\ (), which uses a BPF map to store the * redirect target instead of providing it directly to the helper. * Return * For XDP, the helper returns **XDP_REDIRECT** on success or * **XDP_ABORTED** on error. For other program types, the values * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on * error. * * u32 bpf_get_route_realm(struct sk_buff *skb) * Description * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The * identifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. * * Retrieving this identifier works with the clsact TC egress hook * (see also **tc-bpf(8)**), or alternatively on conventional * classful egress qdiscs, but not on TC ingress path. In case of * clsact TC egress hook, this has the advantage that, internally, * the destination entry has not been dropped yet in the transmit * path. Therefore, the destination entry does not need to be * artificially held via **netif_keep_dst**\ () for a classful * qdisc until the *skb* is freed. * * This helper is available only if the kernel was compiled with * **CONFIG_IP_ROUTE_CLASSID** configuration option. * Return * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * The context of the program *ctx* needs also be passed to the * helper. * * On user space, a program willing to read the values needs to * call **perf_event_open**\ () on the perf event (either for * one or for all CPUs) and to store the file descriptor into the * *map*. This must be done before the eBPF program can send data * into it. An example is available in file * *samples/bpf/trace_output_user.c* in the Linux kernel source * tree (the eBPF program counterpart is in * *samples/bpf/trace_output_kern.c*). * * **bpf_perf_event_output**\ () achieves better performance * than **bpf_trace_printk**\ () for sharing data with user * space, and is much better suitable for streaming data from eBPF * programs. * * Note that this helper is not restricted to tracing use cases * and can be used with programs attached to TC or XDP as well, * where it allows for passing data to user space listeners. Data * can be: * * * Only custom structs, * * Only the packet payload, or * * A combination of both. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from * the packet associated to *skb*, into the buffer pointed by * *to*. * * Since Linux 4.7, usage of this helper has mostly been replaced * by "direct packet access", enabling packet data to be * manipulated with *skb*\ **->data** and *skb*\ **->data_end** * pointing respectively to the first byte of packet data and to * the byte after the last byte of packet data. However, it * remains useful if one wishes to read large quantities of data * at once from a packet into the eBPF stack. * Return * 0 on success, or a negative error in case of failure. * * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context * on which the tracing program is executed, and a pointer to a * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * a combination of the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_FAST_STACK_CMP** * Compare stacks by hash only. * **BPF_F_REUSE_STACKID** * If two different stacks hash into the same *stackid*, * discard the old one. * * The stack id retrieved is a 32 bit long integer handle which * can be further combined with other data (including other stack * ids) and used as a key into maps. This can be useful for * generating a variety of graphs (such as flame graphs or off-cpu * graphs). * * For walking a stack, this helper is an improvement over * **bpf_probe_read**\ (), which can be used with unrolled loops * but is not efficient and consumes a lot of eBPF instructions. * Instead, **bpf_get_stackid**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * The positive or null stack id on success, or a negative error * in case of failure. * * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) * Description * Compute a checksum difference, from the raw buffer pointed by * *from*, of length *from_size* (that must be a multiple of 4), * towards the raw buffer pointed by *to*, of size *to_size* * (same remark). An optional *seed* can be added to the value * (this can be cascaded, the seed may come from a previous call * to the helper). * * This is flexible enough to be used in several ways: * * * With *from_size* == 0, *to_size* > 0 and *seed* set to * checksum, it can be used when pushing new data. * * With *from_size* > 0, *to_size* == 0 and *seed* set to * checksum, it can be used when removing data from a packet. * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it * can be used to compute a diff. Note that *from_size* and * *to_size* do not need to be equal. * * This helper can be used in combination with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to * which one can feed in the difference computed with * **bpf_csum_diff**\ (). * Return * The checksum result, or a negative error code in case of * failure. * * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* * of *size*. * * This helper can be used with encapsulation devices that can * operate in "collect metadata" mode (please refer to the related * note in the description of **bpf_skb_get_tunnel_key**\ () for * more details). A particular example where this can be used is * in combination with the Geneve encapsulation protocol, where it * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) * and retrieving arbitrary TLVs (Type-Length-Value headers) from * the eBPF program. This allows for full customization of these * headers. * Return * The size of the option data retrieved. * * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. * * See also the description of the **bpf_skb_get_tunnel_opt**\ () * helper for additional information. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to * IPv4. The helper takes care of the groundwork for the * transition, including resizing the socket buffer. The eBPF * program is expected to fill the new headers, if any, via * **skb_store_bytes**\ () and to recompute the checksums with * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ * (). The main case for this helper is to perform NAT64 * operations out of an eBPF program. * * Internally, the GSO type is marked as dodgy so that headers are * checked and segments are recalculated by the GSO/GRO engine. * The size for GSO target is adapted as well. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except * the eBPF program does not have a write access to *skb*\ * **->pkt_type** beside this helper. Using a helper here allows * for graceful handling of errors. * * The major use case is to change incoming *skb*s to * **PACKET_HOST** in a programmatic way instead of having to * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for * example. * * Note that *type* only allows certain values. At this time, they * are: * * **PACKET_HOST** * Packet is for us. * **PACKET_BROADCAST** * Send packet to all. * **PACKET_MULTICAST** * Send packet to group. * **PACKET_OTHERHOST** * Send packet to someone else. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * Return * The return value depends on the result of the test, and can be: * * * 0, if the *skb* failed the cgroup2 descendant test. * * 1, if the *skb* succeeded the cgroup2 descendant test. * * A negative error code, if an error occurred. * * u32 bpf_get_hash_recalc(struct sk_buff *skb) * Description * Retrieve the hash of the packet, *skb*\ **->hash**. If it is * not set, in particular if the hash was cleared due to mangling, * recompute this hash. Later accesses to the hash can be done * directly with *skb*\ **->hash**. * * Calling **bpf_set_hash_invalid**\ (), changing a packet * prototype with **bpf_skb_change_proto**\ (), or calling * **bpf_skb_store_bytes**\ () with the * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear * the hash and to trigger a new computation for the next call to * **bpf_get_hash_recalc**\ (). * Return * The 32-bit hash. * * u64 bpf_get_current_task(void) * Description * Get the current task. * Return * A pointer to the current task struct. * * long bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in * user context, and *dst* must be a valid user space address. * * This helper should not be used to implement any kind of * security mechanism because of TOC-TOU attacks, but rather to * debug, divert, and manipulate execution of semi-cooperative * processes. * * Keep in mind that this feature is meant for experiments, and it * has a risk of crashing the system and running programs. * Therefore, when an eBPF program using this helper is attached, * a warning including PID and process name is printed to kernel * logs. * Return * 0 on success, or a negative error in case of failure. * * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. * Return * The return value depends on the result of the test, and can be: * * * 1, if current task belongs to the cgroup2. * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must * be left at zero. * * The basic idea is that the helper performs the needed work to * change the size of the packet, then the eBPF program rewrites * the rest via helpers like **bpf_skb_store_bytes**\ (), * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () * and others. This helper is a slow path utility intended for * replies with control messages. And because it is targeted for * slow path, the helper itself can afford to be slow: it * implicitly linearizes, unclones and drops offloads from the * *skb*. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes * from *skb* readable and writable. If a zero value is passed for * *len*, then all bytes in the linear part of *skb* will be made * readable and writable. * * This helper is only needed for reading and writing with direct * packet access. * * For direct packet access, testing that offsets to access * are within packet boundaries (test on *skb*\ **->data_end**) is * susceptible to fail if offsets are invalid, or if the requested * data is in non-linear parts of the *skb*. On failure the * program can just bail out, or in the case of a non-linear * buffer, use a helper to make the data available. The * **bpf_skb_load_bytes**\ () helper is a first solution to access * the data. Another one consists in using **bpf_skb_pull_data** * to pull in once the non-linear parts, then retesting and * eventually access the data. * * At the same time, this also makes sure the *skb* is uncloned, * which is a necessary condition for direct write. As this needs * to be an invariant for the write part only, the verifier * detects writes and adds a prologue that is calling * **bpf_skb_pull_data()** to effectively unclone the *skb* from * the very beginning in case it is indeed cloned. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) * Description * Add the checksum *csum* into *skb*\ **->csum** in case the * driver has supplied a checksum for the entire packet into that * field. Return an error otherwise. This helper is intended to be * used in combination with **bpf_csum_diff**\ (), in particular * when the checksum needs to be updated after data has been * written into the packet through direct packet access. * Return * The checksum on success, or a negative error code in case of * failure. * * void bpf_set_hash_invalid(struct sk_buff *skb) * Description * Invalidate the current *skb*\ **->hash**. It can be used after * mangling on headers through direct packet access, in order to * indicate that the hash is outdated and to trigger a * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * Return * void. * * long bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA * node, when the program is attached to sockets using the * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), * but the helper is also available to other eBPF program types, * similarly to **bpf_get_smp_processor_id**\ (). * Return * The id of current NUMA node. * * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of * space. It automatically extends and reallocates memory as * required. * * This helper can be used on a layer 3 *skb* to push a MAC header * for redirection into a layer 2 device. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper * can be used to prepare the packet for pushing or popping * headers. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for * more details. * * Generally, use **bpf_probe_read_user_str**\ () or * **bpf_probe_read_kernel_str**\ () instead. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative * value. * * u64 bpf_get_socket_cookie(struct sk_buff *skb) * Description * If the **struct sk_buff** pointed by *skb* has a known socket, * retrieve the cookie (generated by the kernel) of this socket. * If no cookie has been set yet, generate a new cookie. Once * generated, the socket cookie remains stable for the life of the * socket. This helper can be useful for monitoring per socket * networking traffic statistics as it provides a global socket * identifier that can be assumed unique. * Return * A 8-byte long unique number on success, or 0 if the socket * field is missing inside *skb*. * * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) * Description * Equivalent to bpf_get_socket_cookie() helper that accepts * *skb*, but gets socket from **struct bpf_sock_addr** context. * Return * A 8-byte long unique number. * * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) * Description * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts * *skb*, but gets socket from **struct bpf_sock_ops** context. * Return * A 8-byte long unique number. * * u64 bpf_get_socket_cookie(struct sock *sk) * Description * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts * *sk*, but gets socket from a BTF **struct sock**. This helper * also works for sleepable programs. * Return * A 8-byte long unique number or 0 if *sk* is NULL. * * u32 bpf_get_socket_uid(struct sk_buff *skb) * Description * Get the owner UID of the socked associated to *skb*. * Return * The owner UID of the socket associated to *skb*. If the socket * is **NULL**, or if it is not a full socket (i.e. if it is a * time-wait or a request socket instead), **overflowuid** value * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * * long bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **setsockopt(2)** for more information. * The option value of length *optlen* is pointed by *optval*. * * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** * and **BPF_CGROUP_INET6_CONNECT**. * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. * * By default, the helper will reset any offloaded checksum * indicator of the skb to CHECKSUM_NONE. This can be avoided * by the following flag: * * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded * checksum data of the skb to CHECKSUM_NONE. * * There are two supported modes at this time: * * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer * (room space is added or removed below the layer 2 header). * * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer * (room space is added or removed below the layer 3 header). * * The following flags are supported at this time: * * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. * Adjusting mss in this way is not allowed for datagrams. * * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: * Any new space is reserved to hold a tunnel header. * Configure skb offsets and other fields accordingly. * * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: * Use with ENCAP_L3 flags to further specify the tunnel type. * * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): * Use with ENCAP_L3/L4 flags to further specify the tunnel * type; *len* is the length of the inner MAC header. * * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the * L2 type as Ethernet. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain * references to net devices (for forwarding packets through other * ports), or to CPUs (for redirecting XDP frames to another CPU; * but this is only implemented for native XDP (with driver * support) as of this writing). * * The lower two bits of *flags* are used as the return code if * the map lookup fails. This is so that the return value can be * one of the XDP program return codes up to **XDP_TX**, as chosen * by the caller. The higher bits of *flags* can be set to * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. * * With BPF_F_BROADCAST the packet will be broadcasted to all the * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress * interface will be excluded when do broadcasting. * * See also **bpf_redirect**\ (), which only supports redirecting * to an ifindex, but doesn't require a map to do so. * Return * **XDP_REDIRECT** on success, or the value of the two lower bits * of the *flags* argument on error. * * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * Return * 0 on success, or a negative error in case of failure. * * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this * operation modifies the address stored in *xdp_md*\ **->data**, * so the latter must be loaded only after the helper has been * called. * * The use of *xdp_md*\ **->data_meta** is optional and programs * are not required to use it. The rationale is that when the * packet is processed with XDP (e.g. as DoS filter), it is * possible to push further meta data along with it before passing * to the stack, and to give the guarantee that an ingress eBPF * program attached as a TC classifier on the same device can pick * this up for further post-processing. Since TC works with socket * buffers, it remains possible to set from XDP the **mark** or * **priority** pointers, or other pointers for the socket buffer. * Having this scratch space generic and programmable allows for * more flexibility as the user is free to store whatever meta * data they need. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event * counter is selected when *map* is updated with perf event file * descriptors. The *map* is an array whose size is the number of * available CPUs, and each cell contains a value relative to one * CPU. The value to retrieve is indicated by *flags*, that * contains the index of the CPU to look up, masked with * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to * **BPF_F_CURRENT_CPU** to indicate that the value for the * current CPU should be retrieved. * * This helper behaves in a way close to * **bpf_perf_event_read**\ () helper, save that instead of * just returning the value observed, it fills the *buf* * structure. This allows for additional data to be retrieved: in * particular, the enabled and running times (in *buf*\ * **->enabled** and *buf*\ **->running**, respectively) are * copied. In general, **bpf_perf_event_read_value**\ () is * recommended over **bpf_perf_event_read**\ (), which has some * ABI issues and provides fewer functionalities. * * These values are interesting, because hardware PMU (Performance * Monitoring Unit) counters are limited resources. When there are * more PMU based perf events opened than available counters, * kernel will multiplex these events so each event gets certain * percentage (but not all) of the PMU time. In case that * multiplexing happens, the number of samples or counter value * will not reflect the case compared to when no multiplexing * occurs. This makes comparison between different runs difficult. * Typically, the counter value should be normalized before * comparing to other experiments. The usual normalization is done * as follows. * * :: * * normalized_counter = counter * t_enabled / t_running * * Where t_enabled is the time enabled for event and t_running is * the time running for event since last normalization. The * enabled and running times are accumulated since the perf event * open. To achieve scaling factor between two invocations of an * eBPF program, users can use CPU id as the key (which is * typical for perf array usage model) to remember the previous * value and do the calculation inside the eBPF program. * Return * 0 on success, or a negative error in case of failure. * * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in * the structure pointed by *buf* and of size *buf_size*. Enabled * and running times are also stored in the structure (see * description of helper **bpf_perf_event_read_value**\ () for * more details). * Return * 0 on success, or a negative error in case of failure. * * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at * which the option resides and the name *optname* of the option * must be specified, see **getsockopt(2)** for more information. * The retrieved value is stored in the structure pointed by * *opval* and of length *optlen*. * * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** * and **BPF_CGROUP_INET6_CONNECT**. * * This helper actually implements a subset of **getsockopt()**. * It supports the following *level*\ s: * * * **IPPROTO_TCP**, which supports *optname* * **TCP_CONGESTION**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return * 0 on success, or a negative error in case of failure. * * long bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. * The first argument is the context *regs* on which the kprobe * works. * * This helper works by setting the PC (program counter) * to an override function which is run in place of the original * probed function. This means the probed function is not run at * all. The replacement function just returns with the required * value. * * This helper has security implications, and thus is subject to * restrictions. It is only available if the kernel was compiled * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration * option, and in this case it only works on functions tagged with * **ALLOW_ERROR_INJECTION** in the kernel code. * * Also, the helper is only available for the architectures having * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, * x86 architecture is the only one to support this feature. * Return * 0 * * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to * *argval*. * * The primary use of this field is to determine if there should * be calls to eBPF programs of type * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP * code. A program of the same type can change its value, per * connection and as necessary, when the connection is * established. This field is directly accessible for reading, but * this helper must be used for updates in order to return an * error if an eBPF program tries to set a callback that is not * supported in the current kernel. * * *argval* is a flag array which can combine these flags: * * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) * * Therefore, this function can be used to clear a callback flag by * setting the appropriate bit to zero. e.g. to disable the RTO * callback: * * **bpf_sock_ops_cb_flags_set(bpf_sock,** * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** * * Here are some examples of where one could call such eBPF * program: * * * When RTO fires. * * When a packet is retransmitted. * * When the connection terminates. * * When a packet is sent. * * When a packet is received. * Return * Code **-EINVAL** if the socket is not a full TCP socket; * otherwise, a positive number containing the bits that could not * be set is returned (which comes down to 0 if all bits were set * as required). * * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. * * For example, this helper can be used in the following cases: * * * A single **sendmsg**\ () or **sendfile**\ () system call * contains multiple logical messages that the eBPF program is * supposed to read and for which it should apply a verdict. * * An eBPF program only cares to read the first *bytes* of a * *msg*. If the message has a large payload, then setting up * and calling the eBPF program repeatedly for all bytes, even * though the verdict is already known, would create unnecessary * overhead. * * When called from within an eBPF program, the helper sets a * counter internal to the BPF infrastructure, that is used to * apply the last verdict to the next *bytes*. If *bytes* is * smaller than the current data being processed from a * **sendmsg**\ () or **sendfile**\ () system call, the first * *bytes* will be sent and the eBPF program will be re-run with * the pointer for start of data pointing to byte number *bytes* * **+ 1**. If *bytes* is larger than the current data being * processed, then the eBPF verdict will be applied to multiple * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are * consumed. * * Note that if a socket closes with the internal counter holding * a non-zero value, this is not a problem because data is not * being buffered for *bytes* and is sent as it is received. * Return * 0 * * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been * accumulated. * * This can be used when one needs a specific number of bytes * before a verdict can be assigned, even if the data spans * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme * case would be a user calling **sendmsg**\ () repeatedly with * 1-byte long message segments. Obviously, this is bad for * performance, but it is still valid. If the eBPF program needs * *bytes* bytes to validate a header, this helper can be used to * prevent the eBPF program to be called again until *bytes* have * been accumulated. * Return * 0 * * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ * **->data_end** to *start* and *end* bytes offsets into *msg*, * respectively. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it can only parse data that the (**data**, **data_end**) * pointers have already consumed. For **sendmsg**\ () hooks this * is likely the first scatterlist element. But for calls relying * on the **sendpage** handler (e.g. **sendfile**\ ()) this will * be the range (**0**, **0**) because the data is shared with * user space and by default the objective is to avoid allowing * user space to modify data while (or after) eBPF verdict is * being decided. This helper can be used to pull in data and to * set the start and end pointer to given values. Data will be * copied if necessary (i.e. if data was not linear and if start * and end pointers do not point to the same chunk). * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * * All values for *flags* are reserved for future usage, and must * be left at zero. * Return * 0 on success, or a negative error in case of failure. * * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing * connection from the desired IP address, which can be useful for * example when all processes inside a cgroup should use one * single IP address on a host that has multiple IP configured. * * This helper works for IPv4 and IPv6, TCP and UDP sockets. The * domain (*addr*\ **->sa_family**) must be **AF_INET** (or * **AF_INET6**). It's advised to pass zero port (**sin_port** * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like * behavior and lets the kernel efficiently pick up an unused * port as long as 4-tuple is unique. Passing non-zero port might * lead to degraded performance. * Return * 0 on success, or a negative error in case of failure. * * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * possible to both shrink and grow the packet tail. * Shrink done via *delta* being a negative integer. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. * * The retrieved value is stored in the **struct bpf_xfrm_state** * pointed by *xfrm_state* and of length *size*. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_XFRM** configuration option. * Return * 0 on success, or a negative error in case of failure. * * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer * to the context on which the tracing program is executed. * To store the stacktrace, the bpf program provides *buf* with * a nonnegative *size*. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_USER_BUILD_ID** * Collect buildid+offset instead of ips for user stack, * only valid if **BPF_F_USER_STACK** is also specified. * * **bpf_get_stack**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * to sufficient large buffer size. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * The non-negative copied *buf* length equal to or less than * *size* on success, or a negative error in case of failure. * * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* * from the packet associated to *skb*, into the buffer pointed * by *to*. The difference to **bpf_skb_load_bytes**\ () is that * a fifth argument *start_header* exists in order to select a * base offset to start from. *start_header* can be one of: * * **BPF_HDR_START_MAC** * Base offset to load data from is *skb*'s mac header. * **BPF_HDR_START_NET** * Base offset to load data from is *skb*'s network header. * * In general, "direct packet access" is the preferred method to * access packet data, however, this helper is in particular useful * in socket filters where *skb*\ **->data** does not always point * to the start of the mac header and where "direct packet access" * is not available. * Return * 0 on success, or a negative error in case of failure. * * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be * forwarded, the neighbor tables are searched for the nexthop. * If successful (ie., FIB lookup shows forwarding and nexthop * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric * is set to metric from route (IPv4/IPv6 only), and ifindex * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the * following values: * * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB * rules. * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * Return * * < 0 if any input argument is invalid * * 0 on success (packet is forwarded, nexthop neighbor exists) * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU * was exceeded and output params->mtu_result contains the MTU. * * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to * *key*. *flags* is one of: * * **BPF_NOEXIST** * The entry for *key* must not exist in the map. * **BPF_EXIST** * The entry for *key* must already exist in the map. * **BPF_ANY** * No condition on the existence of the entry for *key*. * * If the *map* has eBPF programs (parser and verdict), those will * be inherited by the socket being added. If the socket is * already attached to eBPF programs, this results in an error. * Return * 0 on success, or a negative error in case of failure. * * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if * the verdict eBPF program returns **SK_PASS**), redirect it to * the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress path otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. * if the verdict eBPF program returns **SK_PASS**), redirect it * to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The * **BPF_F_INGRESS** value in *flags* is used to make the * distinction (ingress path is selected if the flag is present, * egress otherwise). This is the only flag supported for now. * Return * **SK_PASS** on success, or **SK_DROP** on error. * * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at * address *hdr*, with *len* its size in bytes. *type* indicates * the protocol of the header and can be one of: * * **BPF_LWT_ENCAP_SEG6** * IPv6 encapsulation with Segment Routing Header * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, * the IPv6 header is computed by the kernel. * **BPF_LWT_ENCAP_SEG6_INLINE** * Only works if *skb* contains an IPv6 packet. Insert a * Segment Routing Header (**struct ipv6_sr_hdr**) inside * the IPv6 header. * **BPF_LWT_ENCAP_IP** * IP encapsulation (GRE/GUE/IPIP/etc). The outer header * must be IPv4 or IPv6, followed by zero or more * additional headers, up to **LWT_BPF_MAX_HEADROOM** * total bytes in all prepended headers. Please note that * if **skb_is_gso**\ (*skb*) is true, no more than two * headers can be prepended, and the inner header, if * present, should be either GRE or UDP/GUE. * * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and * **BPF_PROG_TYPE_LWT_XMIT**. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs * inside the outermost IPv6 Segment Routing Header can be * modified through this helper. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to * *skb*, at position *offset* by *delta* bytes. Only offsets * after the segments are accepted. *delta* can be as well * positive (growing) as negative (shrinking). * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter * contained at address *param*, and of length *param_len* bytes. * *action* can be one of: * * **SEG6_LOCAL_ACTION_END_X** * End.X action: Endpoint with Layer-3 cross-connect. * Type of *param*: **struct in6_addr**. * **SEG6_LOCAL_ACTION_END_T** * End.T action: Endpoint with specific IPv6 table lookup. * Type of *param*: **int**. * **SEG6_LOCAL_ACTION_END_B6** * End.B6 action: Endpoint bound to an SRv6 policy. * Type of *param*: **struct ipv6_sr_hdr**. * **SEG6_LOCAL_ACTION_END_B6_ENCAP** * End.B6.Encap action: Endpoint bound to an SRv6 * encapsulation policy. * Type of *param*: **struct ipv6_sr_hdr**. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with * direct packet access. * Return * 0 on success, or a negative error in case of failure. * * long bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays * the generation of a key up event for previously generated * key down event. * * Some IR protocols like NEC have a special IR message for * repeating last button, for when a button is held down. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, * *toggle* value in the given *protocol*. The scancode will be * translated to a keycode using the rc keymap, and reported as * an input key down event. After a period a key up event is * generated. This period can be extended by calling either * **bpf_rc_keydown**\ () again with the same values, or calling * **bpf_rc_repeat**\ (). * * Some protocols include a toggle bit, in case the button was * released and pressed again between consecutive scancodes. * * The *ctx* should point to the lirc sample as passed into * the program. * * The *protocol* is the decoded protocol number (see * **enum rc_proto** for some predefined values). * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * u64 bpf_skb_cgroup_id(struct sk_buff *skb) * Description * Return the cgroup v2 id of the socket associated with the *skb*. * This is roughly similar to the **bpf_get_cgroup_classid**\ () * helper for cgroup v1 by providing a tag resp. identifier that * can be matched on or used for map lookups e.g. to implement * policy. The cgroup v2 id of a given path in the hierarchy is * exposed in user space through the f_handle API in order to get * to the same 64-bit id. * * This helper can be used on TC egress path, but not on ingress, * and is available only if the kernel was compiled with the * **CONFIG_SOCK_CGROUP_DATA** configuration option. * Return * The id is returned or 0 in case the id could not be retrieved. * * u64 bpf_get_current_cgroup_id(void) * Description * Get the current cgroup id based on the cgroup within which * the current task is running. * Return * A 64-bit integer containing the current cgroup id based * on the cgroup within which the current task is running. * * void *bpf_get_local_storage(void *map, u64 flags) * Description * Get the pointer to the local storage area. * The type and the size of the local storage is defined * by the *map* argument. * The *flags* meaning is specific for each map type, * and has to be 0 for cgroup local storage. * * Depending on the BPF program type, a local storage area * can be shared between multiple instances of the BPF program, * running simultaneously. * * A user should care about the synchronization by himself. * For example, by using the **BPF_ATOMIC** instructions to alter * the shared data. * Return * A pointer to the local storage area. * * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. * It checks the selected socket is matching the incoming * request in the socket buffer. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) * Description * Return id of cgroup v2 that is ancestor of cgroup associated * with the *skb* at the *ancestor_level*. The root cgroup is at * *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with *skb*, then return value will be same as that * of **bpf_skb_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with *skb*. * * The format of returned id and helper limitations are same as in * **bpf_skb_cgroup_id**\ (). * Return * The id is returned or 0 in case the id could not be retrieved. * * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for UDP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used * to determine the base network namespace for the lookup. * * *tuple_size* must be one of: * * **sizeof**\ (*tuple*\ **->ipv4**) * Look for an IPv4 socket. * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or * equal to zero then it specifies the ID of the netns relative to * the netns associated with the *ctx*. *netns* values beyond the * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * * long bpf_sk_release(void *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from * **bpf_sk_lookup_xxx**\ (). * Return * 0 on success, or a negative error in case of failure. * * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * * **BPF_EXIST** * If the queue/stack is full, the oldest element is * removed to make room for this. * Return * 0 on success, or a negative error in case of failure. * * long bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a * *msg* it may want to insert metadata or options into the *msg*. * This can later be read and used by any of the lower layer BPF * hooks. * * This helper may fail if under memory pressure (a malloc * fails) in these cases BPF programs will get an appropriate * error and BPF programs will need to handle them. * Return * 0 on success, or a negative error in case of failure. * * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if * an allocation and copy are required due to a full ring buffer. * However, the helper will try to avoid doing the allocation * if possible. Other errors can occur if input parameters are * invalid either due to *start* byte not being valid part of *msg* * payload and/or *pop* value being to large. * Return * 0 on success, or a negative error in case of failure. * * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. * * The *ctx* should point to the lirc sample as passed into * the program. * * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". * Return * 0 * * long bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to * safely update the rest of the fields in that value. The * spinlock can (and must) later be released with a call to * **bpf_spin_unlock**\ (\ *lock*\ ). * * Spinlocks in BPF programs come with a number of restrictions * and constraints: * * * **bpf_spin_lock** objects are only allowed inside maps of * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this * list could be extended in the future). * * BTF description of the map is mandatory. * * The BPF program can take ONE lock at a time, since taking two * or more could cause dead locks. * * Only one **struct bpf_spin_lock** is allowed per map element. * * When the lock is taken, calls (either BPF to BPF or helpers) * are not allowed. * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not * allowed inside a spinlock-ed region. * * The BPF program MUST call **bpf_spin_unlock**\ () to release * the lock, on all execution paths, before it returns. * * The BPF program can access **struct bpf_spin_lock** only via * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () * helpers. Loading or storing data into the **struct * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. * * To use the **bpf_spin_lock**\ () helper, the BTF description * of the map value must be a struct and have **struct * bpf_spin_lock** *anyname*\ **;** field at the top level. * Nested lock inside another struct is not allowed. * * The **struct bpf_spin_lock** *lock* field in a map value must * be aligned on a multiple of 4 bytes in that value. * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy * the **bpf_spin_lock** field to user space. * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from * a BPF program, do not update the **bpf_spin_lock** field. * * **bpf_spin_lock** cannot be on the stack or inside a * networking packet (it can only be inside of a map values). * * **bpf_spin_lock** is available to root only. * * Tracing programs and socket filter programs cannot use * **bpf_spin_lock**\ () due to insufficient preemption checks * (but this may change in the future). * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. * Return * 0 * * long bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). * Return * 0 * * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) * Description * This helper gets a **struct bpf_sock** pointer such * that all the fields in this **bpf_sock** can be accessed. * Return * A **struct bpf_sock** pointer on success, or **NULL** in * case of failure. * * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) * Description * This helper gets a **struct bpf_tcp_sock** pointer from a * **struct bpf_sock** pointer. * Return * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * * long bpf_skb_ecn_set_ce(struct sk_buff *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 * and IPv4. * Return * 1 if the **CE** flag is set (either by the current helper call * or because it was already present), 0 if it is not set. * * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) * Description * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. * **bpf_sk_release**\ () is unnecessary and not allowed. * Return * A **struct bpf_sock** pointer on success, or **NULL** in * case of failure. * * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * * This function is identical to **bpf_sk_lookup_tcp**\ (), except * that it also returns timewait or request sockets. Use * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the * full structure. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. * * *iph* points to the start of the IPv4 or IPv6 header, while * *iph_len* contains **sizeof**\ (**struct iphdr**) or * **sizeof**\ (**struct ipv6hdr**). * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header (at least * **sizeof**\ (**struct tcphdr**)). * Return * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. * * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. * * The buffer is always NUL terminated, unless it's zero-sized. * * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name * only (e.g. "tcp_mem"). * Return * Number of character copied (not including the trailing NUL). * * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided * by program buffer *buf* of size *buf_len*. * * The whole value is copied, no matter what file position user * space issued e.g. sys_read at. * * The buffer is always NUL terminated, unless it's zero-sized. * Return * Number of character copied (not including the trailing NUL). * * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. * * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into * provided by program buffer *buf* of size *buf_len*. * * User space may write new value at file position > 0. * * The buffer is always NUL terminated, unless it's zero-sized. * Return * Number of character copied (not including the trailing NUL). * * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * * **-EINVAL** if sysctl is being read. * * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) * Description * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. * * *buf* should contain a string in same form as provided by user * space on sysctl write. * * User space may write new value at file position > 0. To override * the whole sysctl value file position should be set to zero. * Return * 0 on success. * * **-E2BIG** if the *buf_len* is too big. * * **-EINVAL** if sysctl is being read. * * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base * and save the result in *res*. * * The string may begin with an arbitrary amount of white space * (as determined by **isspace**\ (3)) followed by a single * optional '**-**' sign. * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically * similar to user space **strtol**\ (3). * Return * Number of characters consumed on success. Must be positive but * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. * * **-ERANGE** if resulting value was out of range. * * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the * given base and save the result in *res*. * * The string may begin with an arbitrary amount of white space * (as determined by **isspace**\ (3)). * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically * similar to user space **strtoul**\ (3). * Return * Number of characters consumed on success. Must be positive but * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. * * **-ERANGE** if resulting value was out of range. * * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) * Description * Get a bpf-local-storage from a *sk*. * * Logically, it could be thought of getting the value from * a *map* with *sk* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this * helper enforces the key must be a full socket and the map must * be a **BPF_MAP_TYPE_SK_STORAGE** also. * * Underneath, the value is stored locally at *sk* instead of * the *map*. The *map* is used as the bpf-local-storage * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf-local-storages residing at *sk*. * * *sk* is a kernel **struct sock** pointer for LSM program. * *sk* is a **struct bpf_sock** pointer for other program types. * * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be * used such that a new bpf-local-storage will be * created if one does not exist. *value* can be used * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf-local-storage. If *value* is * **NULL**, the new bpf-local-storage will be zero initialized. * Return * A bpf-local-storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * * long bpf_sk_storage_delete(struct bpf_map *map, void *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return * 0 on success. * * **-ENOENT** if the bpf-local-storage cannot be found. * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). * * long bpf_send_signal(u32 sig) * Description * Send signal *sig* to the process of the current task. * The signal may be delivered to any of this process's threads. * Return * 0 on success or successfully queued. * * **-EBUSY** if work queue under nmi is full. * * **-EINVAL** if *sig* is invalid. * * **-EPERM** if no permission to send the *sig*. * * **-EAGAIN** if bpf program can try again. * * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Try to issue a SYN cookie for the packet with corresponding * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. * * *iph* points to the start of the IPv4 or IPv6 header, while * *iph_len* contains **sizeof**\ (**struct iphdr**) or * **sizeof**\ (**struct ipv6hdr**). * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header with options (at least * **sizeof**\ (**struct tcphdr**)). * Return * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, * and the top 16 bits are unused. * * On failure, the returned value is one of the following: * * **-EINVAL** SYN cookie cannot be issued due to error * * **-ENOENT** SYN cookie should not be issued (no SYN flood) * * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * *ctx* is a pointer to in-kernel struct sk_buff. * * This helper is similar to **bpf_perf_event_output**\ () but * restricted to raw_tracepoint bpf programs. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from user space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from kernel space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe user address * *unsafe_ptr* to *dst*. The *size* should include the * terminating NUL byte. In case the string length is smaller than * *size*, the target is not padded with further NUL bytes. If the * string length is larger than *size*, just *size*-1 bytes are * copied and the last byte is set to NUL. * * On success, returns the number of bytes that were written, * including the terminal NUL. This makes this helper useful in * tracing programs for reading strings, and more importantly to * get its length at runtime. See the following snippet: * * :: * * SEC("kprobe/sys_open") * void bpf_sys_open(struct pt_regs *ctx) * { * char buf[PATHLEN]; // PATHLEN is defined to 256 * int res = bpf_probe_read_user_str(buf, sizeof(buf), * ctx->di); * * // Consume buf, for example push it to * // userspace via bpf_perf_event_output(); we * // can use res (the string length) as event * // size, after checking its boundaries. * } * * In comparison, using **bpf_probe_read_user**\ () helper here * instead to read the string would require to estimate the length * at compile time, and would often result in copying more memory * than necessary. * * Another useful use case is when parsing individual process * arguments or individual environment variables navigating * *current*\ **->mm->arg_start** and *current*\ * **->mm->env_start**: using this helper and the return value, * one can quickly iterate at the right offset of the memory area. * Return * On success, the strictly positive length of the output string, * including the trailing NUL character. On error, a negative * value. * * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. * Return * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. * * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) * Description * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * Return * 0 on success, or a negative error in case of failure. * * long bpf_send_signal_thread(u32 sig) * Description * Send signal *sig* to the thread corresponding to the current task. * Return * 0 on success or successfully queued. * * **-EBUSY** if work queue under nmi is full. * * **-EINVAL** if *sig* is invalid. * * **-EPERM** if no permission to send the *sig*. * * **-EAGAIN** if bpf program can try again. * * u64 bpf_jiffies64(void) * Description * Obtain the 64bit jiffies * Return * The 64 bit jiffies * * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) * Description * For an eBPF program attached to a perf event, retrieve the * branch records (**struct perf_branch_entry**) associated to *ctx* * and store it in the buffer pointed by *buf* up to size * *size* bytes. * Return * On success, number of bytes written to *buf*. On error, a * negative value. * * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to * instead return the number of bytes required to store all the * branch entries. If this flag is set, *buf* may be NULL. * * **-EINVAL** if arguments invalid or **size** not a multiple * of **sizeof**\ (**struct perf_branch_entry**\ ). * * **-ENOENT** if architecture does not support branch records. * * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) * Description * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. * Return * 0 on success, or one of the following in case of failure: * * **-EINVAL** if dev and inum supplied don't match dev_t and inode number * with nsfs of current task, or if dev conversion to dev_t lost high bits. * * **-ENOENT** if pidns does not exists for the current task. * * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf * event must have the following attributes: **PERF_SAMPLE_RAW** * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. * * The *flags* are used to indicate the index in *map* for which * the value must be put, masked with **BPF_F_INDEX_MASK**. * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** * to indicate that the index of the current CPU core should be * used. * * The value to write, of *size*, is passed through eBPF stack and * pointed by *data*. * * *ctx* is a pointer to in-kernel struct xdp_buff. * * This helper is similar to **bpf_perf_eventoutput**\ () but * restricted to raw_tracepoint bpf programs. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_get_netns_cookie(void *ctx) * Description * Retrieve the cookie (generated by the kernel) of the network * namespace the input *ctx* is associated with. The network * namespace cookie remains stable for its lifetime and provides * a global identifier that can be assumed unique. If *ctx* is * NULL, then the helper returns the cookie for the initial * network namespace. The cookie itself is very similar to that * of **bpf_get_socket_cookie**\ () helper, but for network * namespaces instead of sockets. * Return * A 8-byte long opaque number. * * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) * Description * Return id of cgroup v2 that is ancestor of the cgroup associated * with the current task at the *ancestor_level*. The root cgroup * is at *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with the current task, then return value will be the * same as that of **bpf_get_current_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with the current task. * * The format of returned id and helper limitations are same as in * **bpf_get_current_cgroup_id**\ (). * Return * The id is returned or 0 in case the id could not be retrieved. * * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags) * Description * Helper is overloaded depending on BPF program type. This * description applies to **BPF_PROG_TYPE_SCHED_CLS** and * **BPF_PROG_TYPE_SCHED_ACT** programs. * * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, * will cause *skb* to be delivered to the specified socket. * Subsequent redirection of *skb* via **bpf_redirect**\ (), * **bpf_clone_redirect**\ () or other methods outside of BPF may * interfere with successful delivery to the socket. * * This operation is only valid from TC ingress path. * * The *flags* argument must be zero. * Return * 0 on success, or a negative error in case of failure: * * **-EINVAL** if specified *flags* are not supported. * * **-ENOENT** if the socket is unavailable for assignment. * * **-ENETUNREACH** if the socket is unreachable (wrong netns). * * **-EOPNOTSUPP** if the operation is not supported, for example * a call from outside of TC ingress. * * **-ESOCKTNOSUPPORT** if the socket type is not supported * (reuseport). * * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) * Description * Helper is overloaded depending on BPF program type. This * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. * * Select the *sk* as a result of a socket lookup. * * For the operation to succeed passed socket must be compatible * with the packet description provided by the *ctx* object. * * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must * be an exact match. While IP family (**AF_INET** or * **AF_INET6**) must be compatible, that is IPv6 sockets * that are not v6-only can be selected for IPv4 packets. * * Only TCP listeners and UDP unconnected sockets can be * selected. *sk* can also be NULL to reset any previous * selection. * * *flags* argument can combination of following values: * * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous * socket selection, potentially done by a BPF program * that ran before us. * * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip * load-balancing within reuseport group for the socket * being selected. * * On success *ctx->sk* will point to the selected socket. * * Return * 0 on success, or a negative errno in case of failure. * * * **-EAFNOSUPPORT** if socket family (*sk->family*) is * not compatible with packet family (*ctx->family*). * * * **-EEXIST** if socket has been already selected, * potentially by another program, and * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. * * * **-EINVAL** if unsupported flags were specified. * * * **-EPROTOTYPE** if socket L4 protocol * (*sk->protocol*) doesn't match packet protocol * (*ctx->protocol*). * * * **-ESOCKTNOSUPPORT** if socket is not in allowed * state (TCP listening or UDP unconnected). * * u64 bpf_ktime_get_boot_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. * Does include the time the system was suspended. * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) * Return * Current *ktime*. * * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print * out the format string. * The *m* represents the seq_file. The *fmt* and *fmt_size* are for * the format string itself. The *data* and *data_len* are format string * arguments. The *data* are a **u64** array and corresponding format string * values are stored in the array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* array. * The *data_len* is the size of *data* in bytes - must be a multiple of 8. * * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. * Reading kernel memory may fail due to either invalid address or * valid address but requiring a major memory fault. If reading kernel memory * fails, the string for **%s** will be an empty string, and the ip * address for **%p{i,I}{4,6}** will be 0. Not returning error to * bpf program is consistent with what **bpf_trace_printk**\ () does for now. * Return * 0 on success, or a negative error in case of failure: * * **-EBUSY** if per-CPU memory copy buffer is busy, can try again * by returning 1 from bpf program. * * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. * * **-E2BIG** if *fmt* contains too many format specifiers. * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) * Description * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. * The *m* represents the seq_file. The *data* and *len* represent the * data to write in bytes. * Return * 0 on success, or a negative error in case of failure: * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * * u64 bpf_sk_cgroup_id(void *sk) * Description * Return the cgroup v2 id of the socket *sk*. * * *sk* must be a non-**NULL** pointer to a socket, e.g. one * returned from **bpf_sk_lookup_xxx**\ (), * **bpf_sk_fullsock**\ (), etc. The format of returned id is * same as in **bpf_skb_cgroup_id**\ (). * * This helper is available only if the kernel was compiled with * the **CONFIG_SOCK_CGROUP_DATA** configuration option. * Return * The id is returned or 0 in case the id could not be retrieved. * * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level) * Description * Return id of cgroup v2 that is ancestor of cgroup associated * with the *sk* at the *ancestor_level*. The root cgroup is at * *ancestor_level* zero and each step down the hierarchy * increments the level. If *ancestor_level* == level of cgroup * associated with *sk*, then return value will be same as that * of **bpf_sk_cgroup_id**\ (). * * The helper is useful to implement policies based on cgroups * that are upper in hierarchy than immediate cgroup associated * with *sk*. * * The format of returned id and helper limitations are same as in * **bpf_sk_cgroup_id**\ (). * Return * The id is returned or 0 in case the id could not be retrieved. * * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. * If **0** is specified in *flags*, an adaptive notification * of new data availability is sent. * * An adaptive notification is a notification sent whenever the user-space * process has caught up and consumed all available payloads. In case the user-space * process is still processing a previous payload, then no notification is needed * as it will process the newly added payload automatically. * Return * 0 on success, or a negative error in case of failure. * * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) * Description * Reserve *size* bytes of payload in a ring buffer *ringbuf*. * *flags* must be 0. * Return * Valid pointer with *size* bytes of memory available; NULL, * otherwise. * * void bpf_ringbuf_submit(void *data, u64 flags) * Description * Submit reserved ring buffer sample, pointed to by *data*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. * If **0** is specified in *flags*, an adaptive notification * of new data availability is sent. * * See 'bpf_ringbuf_output()' for the definition of adaptive notification. * Return * Nothing. Always succeeds. * * void bpf_ringbuf_discard(void *data, u64 flags) * Description * Discard reserved ring buffer sample, pointed to by *data*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. * If **0** is specified in *flags*, an adaptive notification * of new data availability is sent. * * See 'bpf_ringbuf_output()' for the definition of adaptive notification. * Return * Nothing. Always succeeds. * * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) * Description * Query various characteristics of provided ring buffer. What * exactly is queries is determined by *flags*: * * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. * * **BPF_RB_RING_SIZE**: The size of ring buffer. * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). * * Data returned is just a momentary snapshot of actual values * and could be inaccurate, so this facility should be used to * power heuristics and for reporting, not to make 100% correct * calculation. * Return * Requested value, or 0, if *flags* are not recognized. * * long bpf_csum_level(struct sk_buff *skb, u64 level) * Description * Change the skbs checksum level by one layer up or down, or * reset it entirely to none in order to have the stack perform * checksum validation. The level is applicable to the following * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | * through **bpf_skb_adjust_room**\ () helper with passing in * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since * the UDP header is removed. Similarly, an encap of the latter * into the former could be accompanied by a helper call to * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the * skb is still intended to be processed in higher layers of the * stack instead of just egressing at tc. * * There are three supported level settings at this time: * * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs * with CHECKSUM_UNNECESSARY. * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs * with CHECKSUM_UNNECESSARY. * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and * sets CHECKSUM_NONE to force checksum validation by the stack. * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current * skb->csum_level. * Return * 0 on success, or a negative error in case of failure. In the * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level * is returned or the error code -EACCES in case the skb is not * subject to CHECKSUM_UNNECESSARY. * * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *task*, which is a valid * pointer to **struct task_struct**. To store the stacktrace, the * bpf program provides *buf* with a nonnegative *size*. * * The last argument, *flags*, holds the number of stack frames to * skip (from 0 to 255), masked with * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set * the following flags: * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. * **BPF_F_USER_BUILD_ID** * Collect buildid+offset instead of ips for user stack, * only valid if **BPF_F_USER_STACK** is also specified. * * **bpf_get_task_stack**\ () can collect up to * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject * to sufficient large buffer size. Note that * this limit can be controlled with the **sysctl** program, and * that it should be manually increased in order to profile long * user stacks (such as stacks for Java programs). To do so, use: * * :: * * # sysctl kernel.perf_event_max_stack= * Return * The non-negative copied *buf* length equal to or less than * *size* on success, or a negative error in case of failure. * * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) * Description * Load header option. Support reading a particular TCP header * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). * * If *flags* is 0, it will search the option from the * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** * has details on what skb_data contains under different * *skops*\ **->op**. * * The first byte of the *searchby_res* specifies the * kind that it wants to search. * * If the searching kind is an experimental kind * (i.e. 253 or 254 according to RFC6994). It also * needs to specify the "magic" which is either * 2 bytes or 4 bytes. It then also needs to * specify the size of the magic by using * the 2nd byte which is "kind-length" of a TCP * header option and the "kind-length" also * includes the first 2 bytes "kind" and "kind-length" * itself as a normal TCP header option also does. * * For example, to search experimental kind 254 with * 2 byte magic 0xeB9F, the searchby_res should be * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. * * To search for the standard window scale option (3), * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. * Note, kind-length must be 0 for regular option. * * Searching for No-Op (0) and End-of-Option-List (1) are * not supported. * * *len* must be at least 2 bytes which is the minimal size * of a header option. * * Supported flags: * * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the * saved_syn packet or the just-received syn packet. * * Return * > 0 when found, the header option is copied to *searchby_res*. * The return value is the total length copied. On failure, a * negative error code is returned: * * **-EINVAL** if a parameter is invalid. * * **-ENOMSG** if the option is not found. * * **-ENOENT** if no syn packet is available when * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. * * **-ENOSPC** if there is not enough space. Only *len* number of * bytes are copied. * * **-EFAULT** on failure to parse the header options in the * packet. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. * * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags) * Description * Store header option. The data will be copied * from buffer *from* with length *len* to the TCP header. * * The buffer *from* should have the whole option that * includes the kind, kind-length, and the actual * option data. The *len* must be at least kind-length * long. The kind-length does not have to be 4 byte * aligned. The kernel will take care of the padding * and setting the 4 bytes aligned value to th->doff. * * This helper will check for duplicated option * by searching the same option in the outgoing skb. * * This helper can only be called during * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. * * Return * 0 on success, or negative error in case of failure: * * **-EINVAL** If param is invalid. * * **-ENOSPC** if there is not enough space in the header. * Nothing has been written * * **-EEXIST** if the option already exists. * * **-EFAULT** on failrue to parse the existing header options. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. * * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags) * Description * Reserve *len* bytes for the bpf header option. The * space will be used by **bpf_store_hdr_opt**\ () later in * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. * * If **bpf_reserve_hdr_opt**\ () is called multiple times, * the total number of bytes will be reserved. * * This helper can only be called during * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. * * Return * 0 on success, or negative error in case of failure: * * **-EINVAL** if a parameter is invalid. * * **-ENOSPC** if there is not enough space in the header. * * **-EPERM** if the helper cannot be used under the current * *skops*\ **->op**. * * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags) * Description * Get a bpf_local_storage from an *inode*. * * Logically, it could be thought of as getting the value from * a *map* with *inode* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this * helper enforces the key must be an inode and the map must also * be a **BPF_MAP_TYPE_INODE_STORAGE**. * * Underneath, the value is stored locally at *inode* instead of * the *map*. The *map* is used as the bpf-local-storage * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf_local_storage residing at *inode*. * * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be * used such that a new bpf_local_storage will be * created if one does not exist. *value* can be used * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf_local_storage. If *value* is * **NULL**, the new bpf_local_storage will be zero initialized. * Return * A bpf_local_storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf_local_storage. * * int bpf_inode_storage_delete(struct bpf_map *map, void *inode) * Description * Delete a bpf_local_storage from an *inode*. * Return * 0 on success. * * **-ENOENT** if the bpf_local_storage cannot be found. * * long bpf_d_path(struct path *path, char *buf, u32 sz) * Description * Return full path for given **struct path** object, which * needs to be the kernel BTF *path* object. The path is * returned in the provided buffer *buf* of size *sz* and * is zero terminated. * * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative * value. * * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr) * Description * Read *size* bytes from user space address *user_ptr* and store * the data in *dst*. This is a wrapper of **copy_from_user**\ (). * Return * 0 on success, or a negative error in case of failure. * * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags) * Description * Use BTF to store a string representation of *ptr*->ptr in *str*, * using *ptr*->type_id. This value should specify the type * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) * can be used to look up vmlinux BTF type ids. Traversing the * data structure using BTF, the type information and values are * stored in the first *str_size* - 1 bytes of *str*. Safe copy of * the pointer data is carried out to avoid kernel crashes during * operation. Smaller types can use string space on the stack; * larger programs can use map data to store the string * representation. * * The string can be subsequently shared with userspace via * bpf_perf_event_output() or ring buffer interfaces. * bpf_trace_printk() is to be avoided as it places too small * a limit on string size to be useful. * * *flags* is a combination of * * **BTF_F_COMPACT** * no formatting around type information * **BTF_F_NONAME** * no struct/union member names/types * **BTF_F_PTR_RAW** * show raw (unobfuscated) pointer values; * equivalent to printk specifier %px. * **BTF_F_ZERO** * show zero-valued struct/union members; they * are not displayed by default * * Return * The number of bytes that were written (or would have been * written if output had to be truncated due to string size), * or a negative error in cases of failure. * * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags) * Description * Use BTF to write to seq_write a string representation of * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). * *flags* are identical to those used for bpf_snprintf_btf. * Return * 0 on success or a negative error in case of failure. * * u64 bpf_skb_cgroup_classid(struct sk_buff *skb) * Description * See **bpf_get_cgroup_classid**\ () for the main description. * This helper differs from **bpf_get_cgroup_classid**\ () in that * the cgroup v1 net_cls class is retrieved only from the *skb*'s * associated socket instead of the current process. * Return * The id is returned or 0 in case the id could not be retrieved. * * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags) * Description * Redirect the packet to another net device of index *ifindex* * and fill in L2 addresses from neighboring subsystem. This helper * is somewhat similar to **bpf_redirect**\ (), except that it * populates L2 addresses as well, meaning, internally, the helper * relies on the neighbor lookup for the L2 address of the nexthop. * * The helper will perform a FIB lookup based on the skb's * networking header to get the address of the next hop, unless * this is supplied by the caller in the *params* argument. The * *plen* argument indicates the len of *params* and should be set * to 0 if *params* is NULL. * * The *flags* argument is reserved and must be 0. The helper is * currently only supported for tc BPF program types, and enabled * for IPv4 and IPv6 protocols. * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. * * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu) * Description * Take a pointer to a percpu ksym, *percpu_ptr*, and return a * pointer to the percpu kernel variable on *cpu*. A ksym is an * extern variable decorated with '__ksym'. For ksym, there is a * global var (either static or global) defined of the same name * in the kernel. The ksym is percpu if the global var is percpu. * The returned pointer points to the global percpu var on *cpu*. * * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the * kernel, except that bpf_per_cpu_ptr() may return NULL. This * happens if *cpu* is larger than nr_cpu_ids. The caller of * bpf_per_cpu_ptr() must check the returned value. * Return * A pointer pointing to the kernel percpu variable on *cpu*, or * NULL, if *cpu* is invalid. * * void *bpf_this_cpu_ptr(const void *percpu_ptr) * Description * Take a pointer to a percpu ksym, *percpu_ptr*, and return a * pointer to the percpu kernel variable on this cpu. See the * description of 'ksym' in **bpf_per_cpu_ptr**\ (). * * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would * never return NULL. * Return * A pointer pointing to the kernel percpu variable on this cpu. * * long bpf_redirect_peer(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_redirect**\ (), except * that the redirection happens to the *ifindex*' peer device and * the netns switch takes place from ingress to ingress without * going through the CPU's backlog queue. * * The *flags* argument is reserved and must be 0. The helper is * currently only supported for tc BPF program types at the ingress * hook and for veth device types. The peer device must reside in a * different network namespace. * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. * * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags) * Description * Get a bpf_local_storage from the *task*. * * Logically, it could be thought of as getting the value from * a *map* with *task* as the **key**. From this * perspective, the usage is not much different from * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this * helper enforces the key must be an task_struct and the map must also * be a **BPF_MAP_TYPE_TASK_STORAGE**. * * Underneath, the value is stored locally at *task* instead of * the *map*. The *map* is used as the bpf-local-storage * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf_local_storage residing at *task*. * * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be * used such that a new bpf_local_storage will be * created if one does not exist. *value* can be used * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf_local_storage. If *value* is * **NULL**, the new bpf_local_storage will be zero initialized. * Return * A bpf_local_storage pointer is returned on success. * * **NULL** if not found or there was an error in adding * a new bpf_local_storage. * * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task) * Description * Delete a bpf_local_storage from a *task*. * Return * 0 on success. * * **-ENOENT** if the bpf_local_storage cannot be found. * * struct task_struct *bpf_get_current_task_btf(void) * Description * Return a BTF pointer to the "current" task. * This pointer can also be used in helpers that accept an * *ARG_PTR_TO_BTF_ID* of type *task_struct*. * Return * Pointer to the current task. * * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags) * Description * Set or clear certain options on *bprm*: * * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit * which sets the **AT_SECURE** auxv for glibc. The bit * is cleared if the flag is not specified. * Return * **-EINVAL** if invalid *flags* are passed, zero otherwise. * * u64 bpf_ktime_get_coarse_ns(void) * Description * Return a coarse-grained version of the time elapsed since * system boot, in nanoseconds. Does not include time the system * was suspended. * * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**) * Return * Current *ktime*. * * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size) * Description * Returns the stored IMA hash of the *inode* (if it's avaialable). * If the hash is larger than *size*, then only *size* * bytes will be copied to *dst* * Return * The **hash_algo** is returned on success, * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if * invalid arguments are passed. * * struct socket *bpf_sock_from_file(struct file *file) * Description * If the given file represents a socket, returns the associated * socket. * Return * A pointer to a struct socket on success or NULL if the file is * not a socket. * * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) * Description * Check packet size against exceeding MTU of net device (based * on *ifindex*). This helper will likely be used in combination * with helpers that adjust/change the packet size. * * The argument *len_diff* can be used for querying with a planned * size change. This allows to check MTU prior to changing packet * ctx. Providing an *len_diff* adjustment that is larger than the * actual packet size (resulting in negative packet size) will in * principle not exceed the MTU, why it is not considered a * failure. Other BPF-helpers are needed for performing the * planned size change, why the responsability for catch a negative * packet size belong in those helpers. * * Specifying *ifindex* zero means the MTU check is performed * against the current net device. This is practical if this isn't * used prior to redirect. * * On input *mtu_len* must be a valid pointer, else verifier will * reject BPF program. If the value *mtu_len* is initialized to * zero then the ctx packet size is use. When value *mtu_len* is * provided as input this specify the L3 length that the MTU check * is done against. Remember XDP and TC length operate at L2, but * this value is L3 as this correlate to MTU and IP-header tot_len * values which are L3 (similar behavior as bpf_fib_lookup). * * The Linux kernel route table can configure MTUs on a more * specific per route level, which is not provided by this helper. * For route level MTU checks use the **bpf_fib_lookup**\ () * helper. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** for tc cls_act programs. * * The *flags* argument can be a combination of one or more of the * following values: * * **BPF_MTU_CHK_SEGS** * This flag will only works for *ctx* **struct sk_buff**. * If packet context contains extra packet segment buffers * (often knows as GSO skb), then MTU check is harder to * check at this point, because in transmit path it is * possible for the skb packet to get re-segmented * (depending on net device features). This could still be * a MTU violation, so this flag enables performing MTU * check against segments, with a different violation * return code to tell it apart. Check cannot use len_diff. * * On return *mtu_len* pointer contains the MTU value of the net * device. Remember the net device configured MTU is the L3 size, * which is returned here and XDP and TC length operate at L2. * Helper take this into account for you, but remember when using * MTU value in your BPF-code. * * Return * * 0 on success, and populate MTU value in *mtu_len* pointer. * * * < 0 if any input argument is invalid (*mtu_len* not updated) * * MTU violations return positive values, but also populate MTU * value in *mtu_len* pointer, as this can be needed for * implementing PMTU handing: * * * **BPF_MTU_CHK_RET_FRAG_NEEDED** * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** * * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags) * Description * For each element in **map**, call **callback_fn** function with * **map**, **callback_ctx** and other map-specific parameters. * The **callback_fn** should be a static function and * the **callback_ctx** should be a pointer to the stack. * The **flags** is used to control certain aspects of the helper. * Currently, the **flags** must be 0. * * The following are a list of supported map types and their * respective expected callback signatures: * * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY * * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); * * For per_cpu maps, the map_value is the value on the cpu where the * bpf_prog is running. * * If **callback_fn** return 0, the helper will continue to the next * element. If return value is 1, the helper will skip the rest of * elements and return. Other return values are not used now. * * Return * The number of traversed map elements for success, **-EINVAL** for * invalid **flags**. * * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len) * Description * Outputs a string into the **str** buffer of size **str_size** * based on a format string stored in a read-only map pointed by * **fmt**. * * Each format specifier in **fmt** corresponds to one u64 element * in the **data** array. For strings and pointers where pointees * are accessed, only the pointer values are stored in the *data* * array. The *data_len* is the size of *data* in bytes - must be * a multiple of 8. * * Formats **%s** and **%p{i,I}{4,6}** require to read kernel * memory. Reading kernel memory may fail due to either invalid * address or valid address but requiring a major memory fault. If * reading kernel memory fails, the string for **%s** will be an * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. * Not returning error to bpf program is consistent with what * **bpf_trace_printk**\ () does for now. * * Return * The strictly positive length of the formatted string, including * the trailing zero character. If the return value is greater than * **str_size**, **str** contains a truncated string, guaranteed to * be zero-terminated except when **str_size** is 0. * * Or **-EBUSY** if the per-CPU memory copy buffer is busy. * * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size) * Description * Execute bpf syscall with given arguments. * Return * A syscall result. * * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags) * Description * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. * Return * Returns btf_id and btf_obj_fd in lower and upper 32 bits. * * long bpf_sys_close(u32 fd) * Description * Execute close syscall for given FD. * Return * A syscall result. * * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) * Description * Initialize the timer. * First 4 bits of *flags* specify clockid. * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. * All other bits of *flags* are reserved. * The verifier will reject the program if *timer* is not from * the same *map*. * Return * 0 on success. * **-EBUSY** if *timer* is already initialized. * **-EINVAL** if invalid *flags* are passed. * **-EPERM** if *timer* is in a map that doesn't have any user references. * The user space should either hold a file descriptor to a map with timers * or pin such map in bpffs. When map is unpinned or file descriptor is * closed all timers in the map will be cancelled and freed. * * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) * Description * Configure the timer to call *callback_fn* static function. * Return * 0 on success. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. * **-EPERM** if *timer* is in a map that doesn't have any user references. * The user space should either hold a file descriptor to a map with timers * or pin such map in bpffs. When map is unpinned or file descriptor is * closed all timers in the map will be cancelled and freed. * * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) * Description * Set timer expiration N nanoseconds from the current time. The * configured callback will be invoked in soft irq context on some cpu * and will not repeat unless another bpf_timer_start() is made. * In such case the next invocation can migrate to a different cpu. * Since struct bpf_timer is a field inside map element the map * owns the timer. The bpf_timer_set_callback() will increment refcnt * of BPF program to make sure that callback_fn code stays valid. * When user space reference to a map reaches zero all timers * in a map are cancelled and corresponding program's refcnts are * decremented. This is done to make sure that Ctrl-C of a user * process doesn't leave any timers running. If map is pinned in * bpffs the callback_fn can re-arm itself indefinitely. * bpf_map_update/delete_elem() helpers and user space sys_bpf commands * cancel and free the timer in the given map element. * The map can contain timers that invoke callback_fn-s from different * programs. The same callback_fn can serve different timers from * different maps if key/value layout matches across maps. * Every bpf_timer_set_callback() can have different callback_fn. * * Return * 0 on success. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier * or invalid *flags* are passed. * * long bpf_timer_cancel(struct bpf_timer *timer) * Description * Cancel the timer and wait for callback_fn to finish if it was running. * Return * 0 if the timer was not active. * 1 if the timer was active. * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its * own timer which would have led to a deadlock otherwise. * * u64 bpf_get_func_ip(void *ctx) * Description * Get address of the traced function (for tracing and kprobe programs). * Return * Address of the traced function. * * u64 bpf_get_attach_cookie(void *ctx) * Description * Get bpf_cookie value provided (optionally) during the program * attachment. It might be different for each individual * attachment, even if BPF program itself is the same. * Expects BPF program context *ctx* as a first argument. * * Supported for the following program types: * - kprobe/uprobe; * - tracepoint; * - perf_event. * Return * Value specified by user at BPF link creation/attachment time * or 0, if it was not specified. * * long bpf_task_pt_regs(struct task_struct *task) * Description * Get the struct pt_regs associated with **task**. * Return * A pointer to struct pt_regs. * * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags) * Description * Get branch trace from hardware engines like Intel LBR. The * hardware engine is stopped shortly after the helper is * called. Therefore, the user need to filter branch entries * based on the actual use case. To capture branch trace * before the trigger point of the BPF program, the helper * should be called at the beginning of the BPF program. * * The data is stored as struct perf_branch_entry into output * buffer *entries*. *size* is the size of *entries* in bytes. * *flags* is reserved for now and must be zero. * * Return * On success, number of bytes written to *buf*. On error, a * negative value. * * **-EINVAL** if *flags* is not zero. * * **-ENOENT** if architecture does not support branch records. * * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64 * to format and can handle more format args as a result. * * Arguments are to be used as in **bpf_seq_printf**\ () helper. * Return * The number of bytes written to the buffer, or a negative error * in case of failure. * * struct unix_sock *bpf_skc_to_unix_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *unix_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res) * Description * Get the address of a kernel symbol, returned in *res*. *res* is * set to 0 if the symbol is not found. * Return * On success, zero. On error, a negative value. * * **-EINVAL** if *flags* is not zero. * * **-EINVAL** if string *name* is not the same size as *name_sz*. * * **-ENOENT** if symbol is not found. * * **-EPERM** if caller does not have permission to obtain kernel address. * * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags) * Description * Find vma of *task* that contains *addr*, call *callback_fn* * function with *task*, *vma*, and *callback_ctx*. * The *callback_fn* should be a static function and * the *callback_ctx* should be a pointer to the stack. * The *flags* is used to control certain aspects of the helper. * Currently, the *flags* must be 0. * * The expected callback signature is * * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx); * * Return * 0 on success. * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. * **-EBUSY** if failed to try lock mmap_lock. * **-EINVAL** for invalid **flags**. * * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags) * Description * For **nr_loops**, call **callback_fn** function * with **callback_ctx** as the context parameter. * The **callback_fn** should be a static function and * the **callback_ctx** should be a pointer to the stack. * The **flags** is used to control certain aspects of the helper. * Currently, the **flags** must be 0. Currently, nr_loops is * limited to 1 << 23 (~8 million) loops. * * long (\*callback_fn)(u32 index, void \*ctx); * * where **index** is the current index in the loop. The index * is zero-indexed. * * If **callback_fn** returns 0, the helper will continue to the next * loop. If return value is 1, the helper will skip the rest of * the loops and return. Other return values are not used now, * and will be rejected by the verifier. * * Return * The number of loops performed, **-EINVAL** for invalid **flags**, * **-E2BIG** if **nr_loops** exceeds the maximum number of loops. * * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2) * Description * Do strncmp() between **s1** and **s2**. **s1** doesn't need * to be null-terminated and **s1_sz** is the maximum storage * size of **s1**. **s2** must be a read-only string. * Return * An integer less than, equal to, or greater than zero * if the first **s1_sz** bytes of **s1** is found to be * less than, to match, or be greater than **s2**. * * long bpf_get_func_arg(void *ctx, u32 n, u64 *value) * Description * Get **n**-th argument (zero based) of the traced function (for tracing programs) * returned in **value**. * * Return * 0 on success. * **-EINVAL** if n >= arguments count of traced function. * * long bpf_get_func_ret(void *ctx, u64 *value) * Description * Get return value of the traced function (for tracing programs) * in **value**. * * Return * 0 on success. * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN. * * long bpf_get_func_arg_cnt(void *ctx) * Description * Get number of arguments of the traced function (for tracing programs). * * Return * The number of arguments of the traced function. * * int bpf_get_retval(void) * Description * Get the syscall's return value that will be returned to userspace. * * This helper is currently supported by cgroup programs only. * Return * The syscall's return value. * * int bpf_set_retval(int retval) * Description * Set the syscall's return value that will be returned to userspace. * * This helper is currently supported by cgroup programs only. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md) * Description * Get the total size of a given xdp buff (linear and paged area) * Return * The total size of a given xdp buffer. * * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) * Description * This helper is provided as an easy way to load data from a * xdp buffer. It can be used to load *len* bytes from *offset* from * the frame associated to *xdp_md*, into the buffer pointed by * *buf*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) * Description * Store *len* bytes from buffer *buf* into the frame * associated to *xdp_md*, at *offset*. * Return * 0 on success, or a negative error in case of failure. * * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags) * Description * Read *size* bytes from user space address *user_ptr* in *tsk*'s * address space, and stores the data in *dst*. *flags* is not * used yet and is provided for future extensibility. This helper * can only be used by sleepable programs. * Return * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. * * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) * Description * Change the __sk_buff->tstamp_type to *tstamp_type* * and set *tstamp* to the __sk_buff->tstamp together. * * If there is no need to change the __sk_buff->tstamp_type, * the tstamp value can be directly written to __sk_buff->tstamp * instead. * * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that * will be kept during bpf_redirect_*(). A non zero * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO * *tstamp_type*. * * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used * with a zero *tstamp*. * * Only IPv4 and IPv6 skb->protocol are supported. * * This function is most useful when it needs to set a * mono delivery time to __sk_buff->tstamp and then * bpf_redirect_*() to the egress of an iface. For example, * changing the (rcv) timestamp in __sk_buff->tstamp at * ingress to a mono delivery time and then bpf_redirect_*() * to sch_fq@phy-dev. * Return * 0 on success. * **-EINVAL** for invalid input * **-EOPNOTSUPP** for unsupported protocol * * long bpf_ima_file_hash(struct file *file, void *dst, u32 size) * Description * Returns a calculated IMA hash of the *file*. * If the hash is larger than *size*, then only *size* * bytes will be copied to *dst* * Return * The **hash_algo** is returned on success, * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if * invalid arguments are passed. * * void *bpf_kptr_xchg(void *map_value, void *ptr) * Description * Exchange kptr at pointer *map_value* with *ptr*, and return the * old value. *ptr* can be NULL, otherwise it must be a referenced * pointer which will be released when this helper is called. * Return * The old value of kptr (which can be NULL). The returned pointer * if not NULL, is a reference which must be released using its * corresponding release function, or moved into a BPF map before * program exit. * * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu) * Description * Perform a lookup in *percpu map* for an entry associated to * *key* on *cpu*. * Return * Map value associated to *key* on *cpu*, or **NULL** if no entry * was found or *cpu* is invalid. * * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk) * Description * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer. * Return * *sk* if casting is valid, or **NULL** otherwise. * * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr) * Description * Get a dynptr to local memory *data*. * * *data* must be a ptr to a map value. * The maximum *size* supported is DYNPTR_MAX_SIZE. * *flags* is currently unused. * Return * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE, * -EINVAL if flags is not 0. * * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr) * Description * Reserve *size* bytes of payload in a ring buffer *ringbuf* * through the dynptr interface. *flags* must be 0. * * Please note that a corresponding bpf_ringbuf_submit_dynptr or * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the * reservation fails. This is enforced by the verifier. * Return * 0 on success, or a negative error in case of failure. * * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags) * Description * Submit reserved ring buffer sample, pointed to by *data*, * through the dynptr interface. This is a no-op if the dynptr is * invalid/null. * * For more information on *flags*, please see * 'bpf_ringbuf_submit'. * Return * Nothing. Always succeeds. * * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags) * Description * Discard reserved ring buffer sample through the dynptr * interface. This is a no-op if the dynptr is invalid/null. * * For more information on *flags*, please see * 'bpf_ringbuf_discard'. * Return * Nothing. Always succeeds. * * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags) * Description * Read *len* bytes from *src* into *dst*, starting from *offset* * into *src*. * *flags* is currently unused. * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if * *flags* is not 0. * * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags) * Description * Write *len* bytes from *src* into *dst*, starting from *offset* * into *dst*. * *flags* is currently unused. * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* * is a read-only dynptr or if *flags* is not 0. * * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) * Description * Get a pointer to the underlying dynptr data. * * *len* must be a statically known value. The returned data slice * is invalidated whenever the dynptr is invalidated. * Return * Pointer to the underlying dynptr data, NULL if the dynptr is * read-only, if the dynptr is invalid, or if the offset and length * is out of bounds. * * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len) * Description * Try to issue a SYN cookie for the packet with corresponding * IPv4/TCP headers, *iph* and *th*, without depending on a * listening socket. * * *iph* points to the IPv4 header. * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header (at least * **sizeof**\ (**struct tcphdr**)). * Return * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, * and the top 16 bits are unused. * * On failure, the returned value is one of the following: * * **-EINVAL** if *th_len* is invalid. * * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len) * Description * Try to issue a SYN cookie for the packet with corresponding * IPv6/TCP headers, *iph* and *th*, without depending on a * listening socket. * * *iph* points to the IPv6 header. * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header (at least * **sizeof**\ (**struct tcphdr**)). * Return * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, * and the top 16 bits are unused. * * On failure, the returned value is one of the following: * * **-EINVAL** if *th_len* is invalid. * * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. * * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK * without depending on a listening socket. * * *iph* points to the IPv4 header. * * *th* points to the TCP header. * Return * 0 if *iph* and *th* are a valid SYN cookie ACK. * * On failure, the returned value is one of the following: * * **-EACCES** if the SYN cookie is not valid. * * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK * without depending on a listening socket. * * *iph* points to the IPv6 header. * * *th* points to the TCP header. * Return * 0 if *iph* and *th* are a valid SYN cookie ACK. * * On failure, the returned value is one of the following: * * **-EACCES** if the SYN cookie is not valid. * * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ FN(map_lookup_elem), \ FN(map_update_elem), \ FN(map_delete_elem), \ FN(probe_read), \ FN(ktime_get_ns), \ FN(trace_printk), \ FN(get_prandom_u32), \ FN(get_smp_processor_id), \ FN(skb_store_bytes), \ FN(l3_csum_replace), \ FN(l4_csum_replace), \ FN(tail_call), \ FN(clone_redirect), \ FN(get_current_pid_tgid), \ FN(get_current_uid_gid), \ FN(get_current_comm), \ FN(get_cgroup_classid), \ FN(skb_vlan_push), \ FN(skb_vlan_pop), \ FN(skb_get_tunnel_key), \ FN(skb_set_tunnel_key), \ FN(perf_event_read), \ FN(redirect), \ FN(get_route_realm), \ FN(perf_event_output), \ FN(skb_load_bytes), \ FN(get_stackid), \ FN(csum_diff), \ FN(skb_get_tunnel_opt), \ FN(skb_set_tunnel_opt), \ FN(skb_change_proto), \ FN(skb_change_type), \ FN(skb_under_cgroup), \ FN(get_hash_recalc), \ FN(get_current_task), \ FN(probe_write_user), \ FN(current_task_under_cgroup), \ FN(skb_change_tail), \ FN(skb_pull_data), \ FN(csum_update), \ FN(set_hash_invalid), \ FN(get_numa_node_id), \ FN(skb_change_head), \ FN(xdp_adjust_head), \ FN(probe_read_str), \ FN(get_socket_cookie), \ FN(get_socket_uid), \ FN(set_hash), \ FN(setsockopt), \ FN(skb_adjust_room), \ FN(redirect_map), \ FN(sk_redirect_map), \ FN(sock_map_update), \ FN(xdp_adjust_meta), \ FN(perf_event_read_value), \ FN(perf_prog_read_value), \ FN(getsockopt), \ FN(override_return), \ FN(sock_ops_cb_flags_set), \ FN(msg_redirect_map), \ FN(msg_apply_bytes), \ FN(msg_cork_bytes), \ FN(msg_pull_data), \ FN(bind), \ FN(xdp_adjust_tail), \ FN(skb_get_xfrm_state), \ FN(get_stack), \ FN(skb_load_bytes_relative), \ FN(fib_lookup), \ FN(sock_hash_update), \ FN(msg_redirect_hash), \ FN(sk_redirect_hash), \ FN(lwt_push_encap), \ FN(lwt_seg6_store_bytes), \ FN(lwt_seg6_adjust_srh), \ FN(lwt_seg6_action), \ FN(rc_repeat), \ FN(rc_keydown), \ FN(skb_cgroup_id), \ FN(get_current_cgroup_id), \ FN(get_local_storage), \ FN(sk_select_reuseport), \ FN(skb_ancestor_cgroup_id), \ FN(sk_lookup_tcp), \ FN(sk_lookup_udp), \ FN(sk_release), \ FN(map_push_elem), \ FN(map_pop_elem), \ FN(map_peek_elem), \ FN(msg_push_data), \ FN(msg_pop_data), \ FN(rc_pointer_rel), \ FN(spin_lock), \ FN(spin_unlock), \ FN(sk_fullsock), \ FN(tcp_sock), \ FN(skb_ecn_set_ce), \ FN(get_listener_sock), \ FN(skc_lookup_tcp), \ FN(tcp_check_syncookie), \ FN(sysctl_get_name), \ FN(sysctl_get_current_value), \ FN(sysctl_get_new_value), \ FN(sysctl_set_new_value), \ FN(strtol), \ FN(strtoul), \ FN(sk_storage_get), \ FN(sk_storage_delete), \ FN(send_signal), \ FN(tcp_gen_syncookie), \ FN(skb_output), \ FN(probe_read_user), \ FN(probe_read_kernel), \ FN(probe_read_user_str), \ FN(probe_read_kernel_str), \ FN(tcp_send_ack), \ FN(send_signal_thread), \ FN(jiffies64), \ FN(read_branch_records), \ FN(get_ns_current_pid_tgid), \ FN(xdp_output), \ FN(get_netns_cookie), \ FN(get_current_ancestor_cgroup_id), \ FN(sk_assign), \ FN(ktime_get_boot_ns), \ FN(seq_printf), \ FN(seq_write), \ FN(sk_cgroup_id), \ FN(sk_ancestor_cgroup_id), \ FN(ringbuf_output), \ FN(ringbuf_reserve), \ FN(ringbuf_submit), \ FN(ringbuf_discard), \ FN(ringbuf_query), \ FN(csum_level), \ FN(skc_to_tcp6_sock), \ FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ FN(skc_to_tcp_request_sock), \ FN(skc_to_udp6_sock), \ FN(get_task_stack), \ FN(load_hdr_opt), \ FN(store_hdr_opt), \ FN(reserve_hdr_opt), \ FN(inode_storage_get), \ FN(inode_storage_delete), \ FN(d_path), \ FN(copy_from_user), \ FN(snprintf_btf), \ FN(seq_printf_btf), \ FN(skb_cgroup_classid), \ FN(redirect_neigh), \ FN(per_cpu_ptr), \ FN(this_cpu_ptr), \ FN(redirect_peer), \ FN(task_storage_get), \ FN(task_storage_delete), \ FN(get_current_task_btf), \ FN(bprm_opts_set), \ FN(ktime_get_coarse_ns), \ FN(ima_inode_hash), \ FN(sock_from_file), \ FN(check_mtu), \ FN(for_each_map_elem), \ FN(snprintf), \ FN(sys_bpf), \ FN(btf_find_by_name_kind), \ FN(sys_close), \ FN(timer_init), \ FN(timer_set_callback), \ FN(timer_start), \ FN(timer_cancel), \ FN(get_func_ip), \ FN(get_attach_cookie), \ FN(task_pt_regs), \ FN(get_branch_snapshot), \ FN(trace_vprintk), \ FN(skc_to_unix_sock), \ FN(kallsyms_lookup_name), \ FN(find_vma), \ FN(loop), \ FN(strncmp), \ FN(get_func_arg), \ FN(get_func_ret), \ FN(get_func_arg_cnt), \ FN(get_retval), \ FN(set_retval), \ FN(xdp_get_buff_len), \ FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ FN(skb_set_tstamp), \ FN(ima_file_hash), \ FN(kptr_xchg), \ FN(map_lookup_percpu_elem), \ FN(skc_to_mptcp_sock), \ FN(dynptr_from_mem), \ FN(ringbuf_reserve_dynptr), \ FN(ringbuf_submit_dynptr), \ FN(ringbuf_discard_dynptr), \ FN(dynptr_read), \ FN(dynptr_write), \ FN(dynptr_data), \ FN(tcp_raw_gen_syncookie_ipv4), \ FN(tcp_raw_gen_syncookie_ipv6), \ FN(tcp_raw_check_syncookie_ipv4), \ FN(tcp_raw_check_syncookie_ipv6), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call */ #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x enum bpf_func_id { __BPF_FUNC_MAPPER(__BPF_ENUM_FN) __BPF_FUNC_MAX_ID, }; #undef __BPF_ENUM_FN /* All flags used by eBPF helper functions, placed here. */ /* BPF_FUNC_skb_store_bytes flags. */ enum { BPF_F_RECOMPUTE_CSUM = (1ULL << 0), BPF_F_INVALIDATE_HASH = (1ULL << 1), }; /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. * First 4 bits are for passing the header field size. */ enum { BPF_F_HDR_FIELD_MASK = 0xfULL, }; /* BPF_FUNC_l4_csum_replace flags. */ enum { BPF_F_PSEUDO_HDR = (1ULL << 4), BPF_F_MARK_MANGLED_0 = (1ULL << 5), BPF_F_MARK_ENFORCE = (1ULL << 6), }; /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ enum { BPF_F_INGRESS = (1ULL << 0), }; /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ enum { BPF_F_TUNINFO_IPV6 = (1ULL << 0), }; /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ enum { BPF_F_SKIP_FIELD_MASK = 0xffULL, BPF_F_USER_STACK = (1ULL << 8), /* flags used by BPF_FUNC_get_stackid only. */ BPF_F_FAST_STACK_CMP = (1ULL << 9), BPF_F_REUSE_STACKID = (1ULL << 10), /* flags used by BPF_FUNC_get_stack only. */ BPF_F_USER_BUILD_ID = (1ULL << 11), }; /* BPF_FUNC_skb_set_tunnel_key flags. */ enum { BPF_F_ZERO_CSUM_TX = (1ULL << 1), BPF_F_DONT_FRAGMENT = (1ULL << 2), BPF_F_SEQ_NUMBER = (1ULL << 3), }; /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. */ enum { BPF_F_INDEX_MASK = 0xffffffffULL, BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, /* BPF_FUNC_perf_event_output for sk_buff input context. */ BPF_F_CTXLEN_MASK = (0xfffffULL << 32), }; /* Current network namespace */ enum { BPF_F_CURRENT_NETNS = (-1L), }; /* BPF_FUNC_csum_level level values. */ enum { BPF_CSUM_LEVEL_QUERY, BPF_CSUM_LEVEL_INC, BPF_CSUM_LEVEL_DEC, BPF_CSUM_LEVEL_RESET, }; /* BPF_FUNC_skb_adjust_room flags. */ enum { BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6), }; enum { BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, }; #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ BPF_ADJ_ROOM_ENCAP_L2_MASK) \ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) /* BPF_FUNC_sysctl_get_name flags. */ enum { BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), }; /* BPF_FUNC__storage_get flags */ enum { BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0), /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead. */ BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE, }; /* BPF_FUNC_read_branch_records flags. */ enum { BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), }; /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and * BPF_FUNC_bpf_ringbuf_output flags. */ enum { BPF_RB_NO_WAKEUP = (1ULL << 0), BPF_RB_FORCE_WAKEUP = (1ULL << 1), }; /* BPF_FUNC_bpf_ringbuf_query flags */ enum { BPF_RB_AVAIL_DATA = 0, BPF_RB_RING_SIZE = 1, BPF_RB_CONS_POS = 2, BPF_RB_PROD_POS = 3, }; /* BPF ring buffer constants */ enum { BPF_RINGBUF_BUSY_BIT = (1U << 31), BPF_RINGBUF_DISCARD_BIT = (1U << 30), BPF_RINGBUF_HDR_SZ = 8, }; /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ enum { BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), }; /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, BPF_ADJ_ROOM_MAC, }; /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ enum bpf_hdr_start_off { BPF_HDR_START_MAC, BPF_HDR_START_NET, }; /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ enum bpf_lwt_encap_mode { BPF_LWT_ENCAP_SEG6, BPF_LWT_ENCAP_SEG6_INLINE, BPF_LWT_ENCAP_IP, }; /* Flags for bpf_bprm_opts_set helper */ enum { BPF_F_BPRM_SECUREEXEC = (1ULL << 0), }; /* Flags for bpf_redirect_map helper */ enum { BPF_F_BROADCAST = (1ULL << 3), BPF_F_EXCLUDE_INGRESS = (1ULL << 4), }; #define __bpf_md_ptr(type, name) \ union { \ type name; \ __u64 :64; \ } __attribute__((aligned(8))) enum { BPF_SKB_TSTAMP_UNSPEC, BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC * and try to deduce it by ingress, egress or skb->sk->sk_clockid. */ }; /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ struct __sk_buff { __u32 len; __u32 pkt_type; __u32 mark; __u32 queue_mapping; __u32 protocol; __u32 vlan_present; __u32 vlan_tci; __u32 vlan_proto; __u32 priority; __u32 ingress_ifindex; __u32 ifindex; __u32 tc_index; __u32 cb[5]; __u32 hash; __u32 tc_classid; __u32 data; __u32 data_end; __u32 napi_id; /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ /* ... here. */ __u32 data_meta; __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); __u64 tstamp; __u32 wire_len; __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; struct bpf_tunnel_key { __u32 tunnel_id; union { __u32 remote_ipv4; __u32 remote_ipv6[4]; }; __u8 tunnel_tos; __u8 tunnel_ttl; __u16 tunnel_ext; /* Padding, future use. */ __u32 tunnel_label; union { __u32 local_ipv4; __u32 local_ipv6[4]; }; }; /* user accessible mirror of in-kernel xfrm_state. * new fields can only be added to the end of this structure */ struct bpf_xfrm_state { __u32 reqid; __u32 spi; /* Stored in network byte order */ __u16 family; __u16 ext; /* Padding, future use. */ union { __u32 remote_ipv4; /* Stored in network byte order */ __u32 remote_ipv6[4]; /* Stored in network byte order */ }; }; /* Generic BPF return codes which all BPF program types may support. * The values are binary compatible with their TC_ACT_* counter-part to * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT * programs. * * XDP is handled seprately, see XDP_*. */ enum bpf_ret_code { BPF_OK = 0, /* 1 reserved */ BPF_DROP = 2, /* 3-6 reserved */ BPF_REDIRECT = 7, /* >127 are reserved for prog type specific return codes. * * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been * changed and should be routed based on its new L3 header. * (This is an L3 redirect, as opposed to L2 redirect * represented by BPF_REDIRECT above). */ BPF_LWT_REROUTE = 128, }; struct bpf_sock { __u32 bound_dev_if; __u32 family; __u32 type; __u32 protocol; __u32 mark; __u32 priority; /* IP address also allows 1 and 2 bytes access */ __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; /* host byte order */ __be16 dst_port; /* network byte order */ __u16 :16; /* zero padding */ __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; __s32 rx_queue_mapping; }; struct bpf_tcp_sock { __u32 snd_cwnd; /* Sending congestion window */ __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ __u32 rtt_min; __u32 snd_ssthresh; /* Slow start size threshold */ __u32 rcv_nxt; /* What we want to receive next */ __u32 snd_nxt; /* Next sequence we send */ __u32 snd_una; /* First byte we want an ack for */ __u32 mss_cache; /* Cached effective mss, not including SACKS */ __u32 ecn_flags; /* ECN status bits. */ __u32 rate_delivered; /* saved rate sample: packets delivered */ __u32 rate_interval_us; /* saved rate sample: time elapsed */ __u32 packets_out; /* Packets which are "in flight" */ __u32 retrans_out; /* Retransmitted packets out */ __u32 total_retrans; /* Total retransmits for entire connection */ __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn * total number of segments in. */ __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn * total number of data segments in. */ __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut * The total number of segments sent. */ __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut * total number of data segments sent. */ __u32 lost_out; /* Lost packets */ __u32 sacked_out; /* SACK'd packets */ __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived * sum(delta(rcv_nxt)), or how many bytes * were acked. */ __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked * sum(delta(snd_una)), or how many bytes * were acked. */ __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups * total number of DSACK blocks received */ __u32 delivered; /* Total data packets delivered incl. rexmits */ __u32 delivered_ce; /* Like the above but only ECE marked packets */ __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ }; struct bpf_sock_tuple { union { struct { __be32 saddr; __be32 daddr; __be16 sport; __be16 dport; } ipv4; struct { __be32 saddr[4]; __be32 daddr[4]; __be16 sport; __be16 dport; } ipv6; }; }; struct bpf_xdp_sock { __u32 queue_id; }; #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. * A valid XDP program must return one of these defined values. All other * return codes are reserved for future use. Unknown return codes will * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). */ enum xdp_action { XDP_ABORTED = 0, XDP_DROP, XDP_PASS, XDP_TX, XDP_REDIRECT, }; /* user accessible metadata for XDP packet hook * new fields must be added to the end of this structure */ struct xdp_md { __u32 data; __u32 data_end; __u32 data_meta; /* Below access go through struct xdp_rxq_info */ __u32 ingress_ifindex; /* rxq->dev->ifindex */ __u32 rx_queue_index; /* rxq->queue_index */ __u32 egress_ifindex; /* txq->dev->ifindex */ }; /* DEVMAP map-value layout * * The struct data-layout of map-value is a configuration interface. * New members can only be added to the end of this structure. */ struct bpf_devmap_val { __u32 ifindex; /* device index */ union { int fd; /* prog fd on map write */ __u32 id; /* prog id on map read */ } bpf_prog; }; /* CPUMAP map-value layout * * The struct data-layout of map-value is a configuration interface. * New members can only be added to the end of this structure. */ struct bpf_cpumap_val { __u32 qsize; /* queue size to remote target CPU */ union { int fd; /* prog fd on map write */ __u32 id; /* prog id on map read */ } bpf_prog; }; enum sk_action { SK_DROP = 0, SK_PASS, }; /* user accessible metadata for SK_MSG packet hook, new fields must * be added to the end of this structure */ struct sk_msg_md { __bpf_md_ptr(void *, data); __bpf_md_ptr(void *, data_end); __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 size; /* Total size of sk_msg */ __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ }; struct sk_reuseport_md { /* * Start of directly accessible data. It begins from * the tcp/udp header. */ __bpf_md_ptr(void *, data); /* End of directly accessible data */ __bpf_md_ptr(void *, data_end); /* * Total length of packet (starting from the tcp/udp header). * Note that the directly accessible bytes (data_end - data) * could be less than this "len". Those bytes could be * indirectly read by a helper "bpf_skb_load_bytes()". */ __u32 len; /* * Eth protocol in the mac header (network byte order). e.g. * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) */ __u32 eth_protocol; __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ /* When reuse->migrating_sk is NULL, it is selecting a sk for the * new incoming connection request (e.g. selecting a listen sk for * the received SYN in the TCP case). reuse->sk is one of the sk * in the reuseport group. The bpf prog can use reuse->sk to learn * the local listening ip/port without looking into the skb. * * When reuse->migrating_sk is not NULL, reuse->sk is closed and * reuse->migrating_sk is the socket that needs to be migrated * to another listening socket. migrating_sk could be a fullsock * sk that is fully established or a reqsk that is in-the-middle * of 3-way handshake. */ __bpf_md_ptr(struct bpf_sock *, sk); __bpf_md_ptr(struct bpf_sock *, migrating_sk); }; #define BPF_TAG_SIZE 8 struct bpf_prog_info { __u32 type; __u32 id; __u8 tag[BPF_TAG_SIZE]; __u32 jited_prog_len; __u32 xlated_prog_len; __aligned_u64 jited_prog_insns; __aligned_u64 xlated_prog_insns; __u64 load_time; /* ns since boottime */ __u32 created_by_uid; __u32 nr_map_ids; __aligned_u64 map_ids; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; __u32 :31; /* alignment pad */ __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; __u32 nr_jited_func_lens; __aligned_u64 jited_ksyms; __aligned_u64 jited_func_lens; __u32 btf_id; __u32 func_info_rec_size; __aligned_u64 func_info; __u32 nr_func_info; __u32 nr_line_info; __aligned_u64 line_info; __aligned_u64 jited_line_info; __u32 nr_jited_line_info; __u32 line_info_rec_size; __u32 jited_line_info_rec_size; __u32 nr_prog_tags; __aligned_u64 prog_tags; __u64 run_time_ns; __u64 run_cnt; __u64 recursion_misses; __u32 verified_insns; __u32 attach_btf_obj_id; __u32 attach_btf_id; } __attribute__((aligned(8))); struct bpf_map_info { __u32 type; __u32 id; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 btf_vmlinux_value_type_id; __u64 netns_dev; __u64 netns_ino; __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 :32; /* alignment pad */ __u64 map_extra; } __attribute__((aligned(8))); struct bpf_btf_info { __aligned_u64 btf; __u32 btf_size; __u32 id; __aligned_u64 name; __u32 name_len; __u32 kernel_btf; } __attribute__((aligned(8))); struct bpf_link_info { __u32 type; __u32 id; __u32 prog_id; union { struct { __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ __u32 tp_name_len; /* in/out: tp_name buffer len */ } raw_tracepoint; struct { __u32 attach_type; __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */ __u32 target_btf_id; /* BTF type id inside the object */ } tracing; struct { __u64 cgroup_id; __u32 attach_type; } cgroup; struct { __aligned_u64 target_name; /* in/out: target_name buffer ptr */ __u32 target_name_len; /* in/out: target_name buffer len */ union { struct { __u32 map_id; } map; }; } iter; struct { __u32 netns_ino; __u32 attach_type; } netns; struct { __u32 ifindex; } xdp; }; } __attribute__((aligned(8))); /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on * attach type). */ struct bpf_sock_addr { __u32 user_family; /* Allows 4-byte read, but no write. */ __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. * Stored in network byte order. */ __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order */ __u32 family; /* Allows 4-byte read, but no write */ __u32 type; /* Allows 4-byte read, but no write */ __u32 protocol; /* Allows 4-byte read, but no write */ __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. * Stored in network byte order. */ __bpf_md_ptr(struct bpf_sock *, sk); }; /* User bpf_sock_ops struct to access socket values and specify request ops * and their replies. * Some of this fields are in network (bigendian) byte order and may need * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). * New fields can only be added at the end of this structure */ struct bpf_sock_ops { __u32 op; union { __u32 args[4]; /* Optionally passed to bpf program */ __u32 reply; /* Returned by bpf program */ __u32 replylong[4]; /* Optionally returned by bpf prog */ }; __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ __u32 remote_ip6[4]; /* Stored in network byte order */ __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 is_fullsock; /* Some TCP fields are only valid if * there is a full socket. If not, the * fields read as zero. */ __u32 snd_cwnd; __u32 srtt_us; /* Averaged RTT << 3 in usecs */ __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ __u32 state; __u32 rtt_min; __u32 snd_ssthresh; __u32 rcv_nxt; __u32 snd_nxt; __u32 snd_una; __u32 mss_cache; __u32 ecn_flags; __u32 rate_delivered; __u32 rate_interval_us; __u32 packets_out; __u32 retrans_out; __u32 total_retrans; __u32 segs_in; __u32 data_segs_in; __u32 segs_out; __u32 data_segs_out; __u32 lost_out; __u32 sacked_out; __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; __bpf_md_ptr(struct bpf_sock *, sk); /* [skb_data, skb_data_end) covers the whole TCP header. * * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the * header has not been written. * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have * been written so far. * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes * the 3WHS. * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes * the 3WHS. * * bpf_load_hdr_opt() can also be used to read a particular option. */ __bpf_md_ptr(void *, skb_data); __bpf_md_ptr(void *, skb_data_end); __u32 skb_len; /* The total length of a packet. * It includes the header, options, * and payload. */ __u32 skb_tcp_flags; /* tcp_flags of the header. It provides * an easy way to check for tcp_flags * without parsing skb_data. * * In particular, the skb_tcp_flags * will still be available in * BPF_SOCK_OPS_HDR_OPT_LEN even though * the outgoing header has not * been written yet. */ }; /* Definitions for bpf_sock_ops_cb_flags */ enum { BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), /* Call bpf for all received TCP headers. The bpf prog will be * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB * * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB * for the header option related helpers that will be useful * to the bpf programs. * * It could be used at the client/active side (i.e. connect() side) * when the server told it that the server was in syncookie * mode and required the active side to resend the bpf-written * options. The active side can keep writing the bpf-options until * it received a valid packet from the server side to confirm * the earlier packet (and options) has been received. The later * example patch is using it like this at the active side when the * server is in syncookie mode. * * The bpf prog will usually turn this off in the common cases. */ BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4), /* Call bpf when kernel has received a header option that * the kernel cannot handle. The bpf prog will be called under * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB. * * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB * for the header option related helpers that will be useful * to the bpf programs. */ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5), /* Call bpf when the kernel is writing header options for the * outgoing packet. The bpf prog will first be called * to reserve space in a skb under * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then * the bpf prog will be called to write the header option(s) * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB. * * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option * related helpers that will be useful to the bpf programs. * * The kernel gets its chance to reserve space and write * options first before the BPF program does. */ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6), /* Mask of all currently supported cb flags */ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, }; /* List of known BPF sock_ops operators. * New entries can only be added at the end */ enum { BPF_SOCK_OPS_VOID, BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or * -1 if default value should be used */ BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized * window (in packets) or -1 if default * value should be used */ BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an * active connection is initialized */ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an * active connection is * established */ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a * passive connection is * established */ BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control * needs ECN */ BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is * based on the path and may be * dependent on the congestion control * algorithm. In general it indicates * a congestion threshold. RTTs above * this indicate congestion */ BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. * Arg1: value of icsk_retransmits * Arg2: value of icsk_rto * Arg3: whether RTO has expired */ BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. * Arg1: sequence number of 1st byte * Arg2: # segments * Arg3: return value of * tcp_transmit_skb (0 => success) */ BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. * Arg1: old_state * Arg2: new_state */ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after * socket transition to LISTEN state. */ BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. */ BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option. * It will be called to handle * the packets received at * an already established * connection. * * sock_ops->skb_data: * Referring to the received skb. * It covers the TCP header only. * * bpf_load_hdr_opt() can also * be used to search for a * particular option. */ BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the * header option later in * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. * Arg1: bool want_cookie. (in * writing SYNACK only) * * sock_ops->skb_data: * Not available because no header has * been written yet. * * sock_ops->skb_tcp_flags: * The tcp_flags of the * outgoing skb. (e.g. SYN, ACK, FIN). * * bpf_reserve_hdr_opt() should * be used to reserve space. */ BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options * Arg1: bool want_cookie. (in * writing SYNACK only) * * sock_ops->skb_data: * Referring to the outgoing skb. * It covers the TCP header * that has already been written * by the kernel and the * earlier bpf-progs. * * sock_ops->skb_tcp_flags: * The tcp_flags of the outgoing * skb. (e.g. SYN, ACK, FIN). * * bpf_store_hdr_opt() should * be used to write the * option. * * bpf_load_hdr_opt() can also * be used to search for a * particular option that * has already been written * by the kernel or the * earlier bpf-progs. */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect * changes between the TCP and BPF versions. Ideally this should never happen. * If it does, we need to add code to convert them before calling * the BPF sock_ops function. */ enum { BPF_TCP_ESTABLISHED = 1, BPF_TCP_SYN_SENT, BPF_TCP_SYN_RECV, BPF_TCP_FIN_WAIT1, BPF_TCP_FIN_WAIT2, BPF_TCP_TIME_WAIT, BPF_TCP_CLOSE, BPF_TCP_CLOSE_WAIT, BPF_TCP_LAST_ACK, BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ BPF_TCP_NEW_SYN_RECV, BPF_TCP_MAX_STATES /* Leave at the end! */ }; enum { TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */ TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */ /* Copy the SYN pkt to optval * * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit * to only getting from the saved_syn. It can either get the * syn packet from: * * 1. the just-received SYN packet (only available when writing the * SYNACK). It will be useful when it is not necessary to * save the SYN packet for latter use. It is also the only way * to get the SYN during syncookie mode because the syn * packet cannot be saved during syncookie. * * OR * * 2. the earlier saved syn which was done by * bpf_setsockopt(TCP_SAVE_SYN). * * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the * SYN packet is obtained. * * If the bpf-prog does not need the IP[46] header, the * bpf-prog can avoid parsing the IP header by using * TCP_BPF_SYN. Otherwise, the bpf-prog can get both * IP[46] and TCP header by using TCP_BPF_SYN_IP. * * >0: Total number of bytes copied * -ENOSPC: Not enough space in optval. Only optlen number of * bytes is copied. * -ENOENT: The SYN skb is not available now and the earlier SYN pkt * is not saved by setsockopt(TCP_SAVE_SYN). */ TCP_BPF_SYN = 1005, /* Copy the TCP header */ TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ }; enum { BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0), }; /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. */ enum { BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the * total option spaces * required for an established * sk in order to calculate the * MSS. No skb is actually * sent. */ BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode * when sending a SYN. */ }; struct bpf_perf_event_value { __u64 counter; __u64 enabled; __u64 running; }; enum { BPF_DEVCG_ACC_MKNOD = (1ULL << 0), BPF_DEVCG_ACC_READ = (1ULL << 1), BPF_DEVCG_ACC_WRITE = (1ULL << 2), }; enum { BPF_DEVCG_DEV_BLOCK = (1ULL << 0), BPF_DEVCG_DEV_CHAR = (1ULL << 1), }; struct bpf_cgroup_dev_ctx { /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ __u32 access_type; __u32 major; __u32 minor; }; struct bpf_raw_tracepoint_args { __u64 args[0]; }; /* DIRECT: Skip the FIB rules and go to FIB table associated with device * OUTPUT: Do lookup from egress perspective; default is ingress */ enum { BPF_FIB_LOOKUP_DIRECT = (1U << 0), BPF_FIB_LOOKUP_OUTPUT = (1U << 1), }; enum { BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ }; struct bpf_fib_lookup { /* input: network family for lookup (AF_INET, AF_INET6) * output: network family of egress nexthop */ __u8 family; /* set if lookup is to consider L4 data - e.g., FIB rules */ __u8 l4_protocol; __be16 sport; __be16 dport; union { /* used for MTU check */ /* input to lookup */ __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */ /* output: MTU value */ __u16 mtu_result; }; /* input: L3 device index for lookup * output: device index from FIB lookup */ __u32 ifindex; union { /* inputs to lookup */ __u8 tos; /* AF_INET */ __be32 flowinfo; /* AF_INET6, flow_label + priority */ /* output: metric of fib result (IPv4/IPv6 only) */ __u32 rt_metric; }; union { __be32 ipv4_src; __u32 ipv6_src[4]; /* in6_addr; network order */ }; /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in * network header. output: bpf_fib_lookup sets to gateway address * if FIB lookup returns gateway route */ union { __be32 ipv4_dst; __u32 ipv6_dst[4]; /* in6_addr; network order */ }; /* output */ __be16 h_vlan_proto; __be16 h_vlan_TCI; __u8 smac[6]; /* ETH_ALEN */ __u8 dmac[6]; /* ETH_ALEN */ }; struct bpf_redir_neigh { /* network family for lookup (AF_INET, AF_INET6) */ __u32 nh_family; /* network address of nexthop; skips fib lookup to find gateway */ union { __be32 ipv4_nh; __u32 ipv6_nh[4]; /* in6_addr; network order */ }; }; /* bpf_check_mtu flags*/ enum bpf_check_mtu_flags { BPF_MTU_CHK_SEGS = (1U << 0), }; enum bpf_check_mtu_ret { BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */ BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */ BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */ }; enum bpf_task_fd_type { BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ BPF_FD_TYPE_TRACEPOINT, /* tp name */ BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ BPF_FD_TYPE_UPROBE, /* filename + offset */ BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; enum { BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), }; struct bpf_flow_keys { __u16 nhoff; __u16 thoff; __u16 addr_proto; /* ETH_P_* of valid addrs */ __u8 is_frag; __u8 is_first_frag; __u8 is_encap; __u8 ip_proto; __be16 n_proto; __be16 sport; __be16 dport; union { struct { __be32 ipv4_src; __be32 ipv4_dst; }; struct { __u32 ipv6_src[4]; /* in6_addr; network order */ __u32 ipv6_dst[4]; /* in6_addr; network order */ }; }; __u32 flags; __be32 flow_label; }; struct bpf_func_info { __u32 insn_off; __u32 type_id; }; #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) struct bpf_line_info { __u32 insn_off; __u32 file_name_off; __u32 line_off; __u32 line_col; }; struct bpf_spin_lock { __u32 val; }; struct bpf_timer { __u64 :64; __u64 :64; } __attribute__((aligned(8))); struct bpf_dynptr { __u64 :64; __u64 :64; } __attribute__((aligned(8))); struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. */ __u32 file_pos; /* Sysctl file position to read from, write to. * Allows 1,2,4-byte read an 4-byte write. */ }; struct bpf_sockopt { __bpf_md_ptr(struct bpf_sock *, sk); __bpf_md_ptr(void *, optval); __bpf_md_ptr(void *, optval_end); __s32 level; __s32 optname; __s32 optlen; __s32 retval; }; struct bpf_pidns_info { __u32 pid; __u32 tgid; }; /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ struct bpf_sk_lookup { union { __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */ }; __u32 family; /* Protocol family (AF_INET, AF_INET6) */ __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ __u32 remote_ip4; /* Network byte order */ __u32 remote_ip6[4]; /* Network byte order */ __be16 remote_port; /* Network byte order */ __u16 :16; /* Zero padding */ __u32 local_ip4; /* Network byte order */ __u32 local_ip6[4]; /* Network byte order */ __u32 local_port; /* Host byte order */ __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */ }; /* * struct btf_ptr is used for typed pointer representation; the * type id is used to render the pointer data as the appropriate type * via the bpf_snprintf_btf() helper described above. A flags field - * potentially to specify additional details about the BTF pointer * (rather than its mode of display) - is included for future use. * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately. */ struct btf_ptr { void *ptr; __u32 type_id; __u32 flags; /* BTF ptr flags; unused at present. */ }; /* * Flags to control bpf_snprintf_btf() behaviour. * - BTF_F_COMPACT: no formatting around type information * - BTF_F_NONAME: no struct/union member names/types * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values; * equivalent to %px. * - BTF_F_ZERO: show zero-valued struct/union members; they * are not displayed by default */ enum { BTF_F_COMPACT = (1ULL << 0), BTF_F_NONAME = (1ULL << 1), BTF_F_PTR_RAW = (1ULL << 2), BTF_F_ZERO = (1ULL << 3), }; /* bpf_core_relo_kind encodes which aspect of captured field/type/enum value * has to be adjusted by relocations. It is emitted by llvm and passed to * libbpf and later to the kernel. */ enum bpf_core_relo_kind { BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */ BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */ BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */ BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */ BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */ BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */ }; /* * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf * and from libbpf to the kernel. * * CO-RE relocation captures the following data: * - insn_off - instruction offset (in bytes) within a BPF program that needs * its insn->imm field to be relocated with actual field info; * - type_id - BTF type ID of the "root" (containing) entity of a relocatable * type or field; * - access_str_off - offset into corresponding .BTF string section. String * interpretation depends on specific relocation kind: * - for field-based relocations, string encodes an accessed field using * a sequence of field and array indices, separated by colon (:). It's * conceptually very close to LLVM's getelementptr ([0]) instruction's * arguments for identifying offset to a field. * - for type-based relocations, strings is expected to be just "0"; * - for enum value-based relocations, string contains an index of enum * value within its enum type; * - kind - one of enum bpf_core_relo_kind; * * Example: * struct sample { * int a; * struct { * int b[10]; * }; * }; * * struct sample *s = ...; * int *x = &s->a; // encoded as "0:0" (a is field #0) * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, * // b is field #0 inside anon struct, accessing elem #5) * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) * * type_id for all relocs in this example will capture BTF type id of * `struct sample`. * * Such relocation is emitted when using __builtin_preserve_access_index() * Clang built-in, passing expression that captures field address, e.g.: * * bpf_probe_read(&dst, sizeof(dst), * __builtin_preserve_access_index(&src->a.b.c)); * * In this case Clang will emit field relocation recording necessary data to * be able to find offset of embedded `a.b.c` field within `src` struct. * * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction */ struct bpf_core_relo { __u32 insn_off; __u32 type_id; __u32 access_str_off; enum bpf_core_relo_kind kind; }; #endif /* _UAPI__LINUX_BPF_H__ */ xdp-tools-1.5.4/headers/linux/perf-sys.h0000644000175100001660000000265215003640462017515 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ /* Copied from $(LINUX)/tools/perf/perf-sys.h (kernel 4.18) */ #ifndef _PERF_SYS_H #define _PERF_SYS_H #include #include #include #include #include /* * remove the following headers to allow for userspace program compilation * #include * #include */ #ifdef __powerpc__ #define CPUINFO_PROC {"cpu"} #endif #ifdef __s390__ #define CPUINFO_PROC {"vendor_id"} #endif #ifdef __sh__ #define CPUINFO_PROC {"cpu type"} #endif #ifdef __hppa__ #define CPUINFO_PROC {"cpu"} #endif #ifdef __sparc__ #define CPUINFO_PROC {"cpu"} #endif #ifdef __alpha__ #define CPUINFO_PROC {"cpu model"} #endif #ifdef __arm__ #define CPUINFO_PROC {"model name", "Processor"} #endif #ifdef __mips__ #define CPUINFO_PROC {"cpu model"} #endif #ifdef __arc__ #define CPUINFO_PROC {"Processor"} #endif #ifdef __xtensa__ #define CPUINFO_PROC {"core ID"} #endif #ifndef CPUINFO_PROC #define CPUINFO_PROC { "model name", } #endif static inline int sys_perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu, int group_fd, unsigned long flags) { int fd; fd = syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); #ifdef HAVE_ATTR_TEST if (unlikely(test_attr__enabled)) test_attr__open(attr, pid, cpu, fd, group_fd, flags); #endif return fd; } #endif /* _PERF_SYS_H */ xdp-tools-1.5.4/headers/linux/icmp.h0000644000175100001660000001130015003640462016663 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the ICMP protocol. * * Version: @(#)icmp.h 1.0.3 04/28/93 * * Author: Fred N. van Kempen, * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _UAPI_LINUX_ICMP_H #define _UAPI_LINUX_ICMP_H #include #include #include #include #define ICMP_ECHOREPLY 0 /* Echo Reply */ #define ICMP_DEST_UNREACH 3 /* Destination Unreachable */ #define ICMP_SOURCE_QUENCH 4 /* Source Quench */ #define ICMP_REDIRECT 5 /* Redirect (change route) */ #define ICMP_ECHO 8 /* Echo Request */ #define ICMP_TIME_EXCEEDED 11 /* Time Exceeded */ #define ICMP_PARAMETERPROB 12 /* Parameter Problem */ #define ICMP_TIMESTAMP 13 /* Timestamp Request */ #define ICMP_TIMESTAMPREPLY 14 /* Timestamp Reply */ #define ICMP_INFO_REQUEST 15 /* Information Request */ #define ICMP_INFO_REPLY 16 /* Information Reply */ #define ICMP_ADDRESS 17 /* Address Mask Request */ #define ICMP_ADDRESSREPLY 18 /* Address Mask Reply */ #define NR_ICMP_TYPES 18 /* Codes for UNREACH. */ #define ICMP_NET_UNREACH 0 /* Network Unreachable */ #define ICMP_HOST_UNREACH 1 /* Host Unreachable */ #define ICMP_PROT_UNREACH 2 /* Protocol Unreachable */ #define ICMP_PORT_UNREACH 3 /* Port Unreachable */ #define ICMP_FRAG_NEEDED 4 /* Fragmentation Needed/DF set */ #define ICMP_SR_FAILED 5 /* Source Route failed */ #define ICMP_NET_UNKNOWN 6 #define ICMP_HOST_UNKNOWN 7 #define ICMP_HOST_ISOLATED 8 #define ICMP_NET_ANO 9 #define ICMP_HOST_ANO 10 #define ICMP_NET_UNR_TOS 11 #define ICMP_HOST_UNR_TOS 12 #define ICMP_PKT_FILTERED 13 /* Packet filtered */ #define ICMP_PREC_VIOLATION 14 /* Precedence violation */ #define ICMP_PREC_CUTOFF 15 /* Precedence cut off */ #define NR_ICMP_UNREACH 15 /* instead of hardcoding immediate value */ /* Codes for REDIRECT. */ #define ICMP_REDIR_NET 0 /* Redirect Net */ #define ICMP_REDIR_HOST 1 /* Redirect Host */ #define ICMP_REDIR_NETTOS 2 /* Redirect Net for TOS */ #define ICMP_REDIR_HOSTTOS 3 /* Redirect Host for TOS */ /* Codes for TIME_EXCEEDED. */ #define ICMP_EXC_TTL 0 /* TTL count exceeded */ #define ICMP_EXC_FRAGTIME 1 /* Fragment Reass time exceeded */ /* Codes for EXT_ECHO (PROBE) */ #define ICMP_EXT_ECHO 42 #define ICMP_EXT_ECHOREPLY 43 #define ICMP_EXT_CODE_MAL_QUERY 1 /* Malformed Query */ #define ICMP_EXT_CODE_NO_IF 2 /* No such Interface */ #define ICMP_EXT_CODE_NO_TABLE_ENT 3 /* No such Table Entry */ #define ICMP_EXT_CODE_MULT_IFS 4 /* Multiple Interfaces Satisfy Query */ /* Constants for EXT_ECHO (PROBE) */ #define ICMP_EXT_ECHOREPLY_ACTIVE (1 << 2)/* active bit in reply message */ #define ICMP_EXT_ECHOREPLY_IPV4 (1 << 1)/* ipv4 bit in reply message */ #define ICMP_EXT_ECHOREPLY_IPV6 1 /* ipv6 bit in reply message */ #define ICMP_EXT_ECHO_CTYPE_NAME 1 #define ICMP_EXT_ECHO_CTYPE_INDEX 2 #define ICMP_EXT_ECHO_CTYPE_ADDR 3 #define ICMP_AFI_IP 1 /* Address Family Identifier for ipv4 */ #define ICMP_AFI_IP6 2 /* Address Family Identifier for ipv6 */ struct icmphdr { __u8 type; __u8 code; __sum16 checksum; union { struct { __be16 id; __be16 sequence; } echo; __be32 gateway; struct { __be16 __unused; __be16 mtu; } frag; __u8 reserved[4]; } un; }; /* * constants for (set|get)sockopt */ #define ICMP_FILTER 1 struct icmp_filter { __u32 data; }; /* RFC 4884 extension struct: one per message */ struct icmp_ext_hdr { #if defined(__LITTLE_ENDIAN_BITFIELD) __u8 reserved1:4, version:4; #elif defined(__BIG_ENDIAN_BITFIELD) __u8 version:4, reserved1:4; #else #error "Please fix " #endif __u8 reserved2; __sum16 checksum; }; /* RFC 4884 extension object header: one for each object */ struct icmp_extobj_hdr { __be16 length; __u8 class_num; __u8 class_type; }; /* RFC 8335: 2.1 Header for c-type 3 payload */ struct icmp_ext_echo_ctype3_hdr { __be16 afi; __u8 addrlen; __u8 reserved; }; /* RFC 8335: 2.1 Interface Identification Object */ struct icmp_ext_echo_iio { struct icmp_extobj_hdr extobj_hdr; union { char name[IFNAMSIZ]; __be32 ifindex; struct { struct icmp_ext_echo_ctype3_hdr ctype3_hdr; union { __be32 ipv4_addr; struct in6_addr ipv6_addr; } ip_addr; } addr; } ident; }; #endif /* _UAPI_LINUX_ICMP_H */ xdp-tools-1.5.4/headers/linux/netdev.h0000644000175100001660000000335415003640462017232 0ustar runnerdocker/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/netdev.yaml */ /* YNL-GEN uapi header */ #ifndef _UAPI_LINUX_NETDEV_H #define _UAPI_LINUX_NETDEV_H #define NETDEV_FAMILY_NAME "netdev" #define NETDEV_FAMILY_VERSION 1 /** * enum netdev_xdp_act * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX) * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements * ndo_xdp_xmit callback. * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP * in zero copy mode. * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw * offloading. * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear * XDP buffer support in the driver napi callback. * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements * non-linear XDP buffer support in ndo_xdp_xmit callback. */ enum netdev_xdp_act { NETDEV_XDP_ACT_BASIC = 1, NETDEV_XDP_ACT_REDIRECT = 2, NETDEV_XDP_ACT_NDO_XMIT = 4, NETDEV_XDP_ACT_XSK_ZEROCOPY = 8, NETDEV_XDP_ACT_HW_OFFLOAD = 16, NETDEV_XDP_ACT_RX_SG = 32, NETDEV_XDP_ACT_NDO_XMIT_SG = 64, NETDEV_XDP_ACT_MASK = 127, }; enum { NETDEV_A_DEV_IFINDEX = 1, NETDEV_A_DEV_PAD, NETDEV_A_DEV_XDP_FEATURES, __NETDEV_A_DEV_MAX, NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1) }; enum { NETDEV_CMD_DEV_GET = 1, NETDEV_CMD_DEV_ADD_NTF, NETDEV_CMD_DEV_DEL_NTF, NETDEV_CMD_DEV_CHANGE_NTF, __NETDEV_CMD_MAX, NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1) }; #define NETDEV_MCGRP_MGMT "mgmt" #endif /* _UAPI_LINUX_NETDEV_H */ xdp-tools-1.5.4/headers/linux/hlist.h0000644000175100001660000001203015003640462017057 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_HLIST_H #define __LINUX_HLIST_H struct list_head; struct rhash_head { struct rhash_head *next; }; #define HLIST_POISON_POINTER_DELTA 0 #define HLIST_POISON1 ((void *) 0x100 + HLIST_POISON_POINTER_DELTA) #define HLIST_POISON2 ((void *) 0x200 + HLIST_POISON_POINTER_DELTA) /* * Double linked lists with a single pointer list head. * Mostly useful for hash tables where the two pointer list head is * too wasteful. * You lose the ability to access the tail in O(1). */ struct hlist_head { struct hlist_node *first; }; struct hlist_node { struct hlist_node *next, **pprev; }; #define HLIST_HEAD_INIT { .first = NULL } #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) static inline void INIT_HLIST_NODE(struct hlist_node *h) { h->next = NULL; h->pprev = NULL; } static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } static inline int hlist_empty(const struct hlist_head *h) { return !h->first; } static inline void __hlist_del(struct hlist_node *n) { struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; __atomic_store_n(pprev, next, __ATOMIC_RELAXED); if (next) next->pprev = pprev; } static inline void hlist_del(struct hlist_node *n) { __hlist_del(n); n->next = HLIST_POISON1; n->pprev = HLIST_POISON2; } static inline void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); INIT_HLIST_NODE(n); } } static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; if (first) first->pprev = &n->next; h->first = n; n->pprev = &h->first; } /* next must be != NULL */ static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; next->pprev = &n->next; *(n->pprev) = n; } static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; prev->next = n; n->pprev = &prev->next; if (n->next) n->next->pprev = &n->next; } /* after that we'll appear to be on some hlist and hlist_del will work */ static inline void hlist_add_fake(struct hlist_node *n) { n->pprev = &n->next; } static inline bool hlist_fake(struct hlist_node *h) { return h->pprev == &h->next; } /* * Move a list from one list head to another. Fixup the pprev * reference of the first entry if it exists. */ static inline void hlist_move_list(struct hlist_head *old, struct hlist_head *new) { new->first = old->first; if (new->first) new->first->pprev = &new->first; old->first = NULL; } #define hlist_entry(ptr, type, member) container_of(ptr,type,member) #define hlist_for_each(pos, head) \ for (pos = (head)->first; pos ; pos = pos->next) #define hlist_for_each_safe(pos, n, head) \ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ pos = n) #define hlist_entry_safe(ptr, type, member) \ ({ typeof(ptr) ____ptr = (ptr); \ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ }) /** * hlist_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry(pos, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_continue - iterate over a hlist continuing after current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue(pos, member) \ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_from - iterate over a hlist continuing from current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from(pos, member) \ for (; pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. * @n: another &struct hlist_node to use as temporary storage * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_safe(pos, n, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ pos && ({ n = pos->member.next; 1; }); \ pos = hlist_entry_safe(n, typeof(*pos), member)) /** * list_for_each_from - iterate over a list from one of its nodes * @pos: the &struct list_head to use as a loop cursor, from where to start * @head: the head for your list. */ #define list_for_each_from(pos, head) \ for (; pos != (head); pos = pos->next) #endif xdp-tools-1.5.4/headers/linux/hashtable.h0000644000175100001660000001277015003640462017702 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ /* * Statically sized hash table implementation * (C) 2012 Sasha Levin */ #ifndef _LINUX_HASHTABLE_H #define _LINUX_HASHTABLE_H #include #include #include #include #include #include #include #include "../lib/util/util.h" #define HASH_GOLDEN_RATIO_32 0x61C88647 #define HASH_GOLDEN_RATIO_64 0x61C8864680B583EBull #if (__SIZEOF_LONG__ * __CHAR_BIT__) == 32 #define HASH_GOLDEN_RATIO_PRIME HASH_GOLDEN_RATIO_32 #define hash_long(val, bits) hash_32(val, bits) #elif (__SIZEOF_LONG__ * __CHAR_BIT__) == 64 #define hash_long(val, bits) hash_64(val, bits) #define HASH_GOLDEN_RATIO_PRIME HASH_GOLDEN_RATIO_64 #else #error "Wordsize not 32 or 64" #endif static inline uint32_t __hash_32(uint32_t val) { return val * HASH_GOLDEN_RATIO_32; } static inline uint32_t hash_32(uint32_t val, unsigned int bits) { /* High bits are more random, so use them. */ return __hash_32(val) >> (32 - bits); } static inline uint32_t hash_64(uint64_t val, unsigned int bits) { #if LONG_TYPE_SIZE * CHAR_BIT == 64 /* 64x64-bit multiply is efficient on all 64-bit processors */ return val * HASH_GOLDEN_RATIO_64 >> (64 - bits); #else /* Hash 64 bits using only 32x32-bit multiply. */ return hash_32((uint32_t)val ^ __hash_32(val >> 32), bits); #endif } #define DEFINE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] = \ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } #define DECLARE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] #define HASH_SIZE(name) (ARRAY_SIZE(name)) #define HASH_BITS(name) ilogb(HASH_SIZE(name)) /* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ #define hash_min(val, bits) \ (sizeof(val) <= 4 ? hash_32((uint32_t)val, bits) : hash_long((uint64_t)val, bits)) static inline void __hash_init(struct hlist_head *ht, unsigned int sz) { unsigned int i; for (i = 0; i < sz; i++) INIT_HLIST_HEAD(&ht[i]); } /** * hash_init - initialize a hash table * @hashtable: hashtable to be initialized * * Calculates the size of the hashtable from the given parameter, otherwise * same as hash_init_size. * * This has to be a macro since HASH_BITS() will not work on pointers since * it calculates the size during preprocessing. */ #define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) /** * hash_add - add an object to a hashtable * @hashtable: hashtable to add to * @node: the &struct hlist_node of the object to be added * @key: the key of the object to be added */ #define hash_add(hashtable, node, key) \ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) /** * hash_hashed - check whether an object is in any hashtable * @node: the &struct hlist_node of the object to be checked */ static inline bool hash_hashed(struct hlist_node *node) { return !hlist_unhashed(node); } static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) { unsigned int i; for (i = 0; i < sz; i++) if (!hlist_empty(&ht[i])) return false; return true; } /** * hash_empty - check whether a hashtable is empty * @hashtable: hashtable to check * * This has to be a macro since HASH_BITS() will not work on pointers since * it calculates the size during preprocessing. */ #define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) /** * hash_del - remove an object from a hashtable * @node: &struct hlist_node of the object to remove */ static inline void hash_del(struct hlist_node *node) { hlist_del_init(node); } /** * hash_for_each - iterate over a hashtable * @name: hashtable to iterate * @bkt: integer to use as bucket loop cursor * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct */ #define hash_for_each(name, bkt, obj, member) \ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ (bkt)++)\ hlist_for_each_entry(obj, &name[bkt], member) /** * hash_for_each_safe - iterate over a hashtable safe against removal of * hash entry * @name: hashtable to iterate * @bkt: integer to use as bucket loop cursor * @tmp: a &struct used for temporary storage * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct */ #define hash_for_each_safe(name, bkt, tmp, obj, member) \ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ (bkt)++)\ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) /** * hash_for_each_possible - iterate over all possible objects hashing to the * same bucket * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ #define hash_for_each_possible(name, obj, member, key) \ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) /** * hash_for_each_possible_safe - iterate over all possible objects hashing to the * same bucket safe against removals * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry * @tmp: a &struct used for temporary storage * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ #define hash_for_each_possible_safe(name, obj, tmp, member, key) \ hlist_for_each_entry_safe(obj, tmp,\ &name[hash_min(key, HASH_BITS(name))], member) #endif xdp-tools-1.5.4/headers/linux/jhash.h0000644000175100001660000001064715003640462017045 0ustar runnerdocker#ifndef _LINUX_JHASH_H #define _LINUX_JHASH_H /* Copied from $(LINUX)/include/linux/jhash.h (kernel 4.18) */ /* jhash.h: Jenkins hash support. * * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) * * http://burtleburtle.net/bob/hash/ * * These are the credits from Bob's sources: * * lookup3.c, by Bob Jenkins, May 2006, Public Domain. * * These are functions for producing 32-bit hashes for hash table lookup. * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() * are externally useful functions. Routines to test the hash are included * if SELF_TEST is defined. You can use this free for any purpose. It's in * the public domain. It has no warranty. * * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu) */ static inline __u32 rol32(__u32 word, unsigned int shift) { return (word << shift) | (word >> ((-shift) & 31)); } /* copy paste of jhash from kernel sources (include/linux/jhash.h) to make sure * LLVM can compile it into valid sequence of BPF instructions */ #define __jhash_mix(a, b, c) \ { \ a -= c; a ^= rol32(c, 4); c += b; \ b -= a; b ^= rol32(a, 6); a += c; \ c -= b; c ^= rol32(b, 8); b += a; \ a -= c; a ^= rol32(c, 16); c += b; \ b -= a; b ^= rol32(a, 19); a += c; \ c -= b; c ^= rol32(b, 4); b += a; \ } #define __jhash_final(a, b, c) \ { \ c ^= b; c -= rol32(b, 14); \ a ^= c; a -= rol32(c, 11); \ b ^= a; b -= rol32(a, 25); \ c ^= b; c -= rol32(b, 16); \ a ^= c; a -= rol32(c, 4); \ b ^= a; b -= rol32(a, 14); \ c ^= b; c -= rol32(b, 24); \ } #define JHASH_INITVAL 0xdeadbeef typedef unsigned int u32; /* jhash - hash an arbitrary key * @k: sequence of bytes as key * @length: the length of the key * @initval: the previous hash, or an arbitray value * * The generic version, hashes an arbitrary sequence of bytes. * No alignment or length assumptions are made about the input key. * * Returns the hash value of the key. The result depends on endianness. */ static inline u32 jhash(const void *key, u32 length, u32 initval) { u32 a, b, c; const unsigned char *k = key; /* Set up the internal state */ a = b = c = JHASH_INITVAL + length + initval; /* All but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += *(u32 *)(k); b += *(u32 *)(k + 4); c += *(u32 *)(k + 8); __jhash_mix(a, b, c); length -= 12; k += 12; } /* Last block: affect all 32 bits of (c) */ switch (length) { case 12: c += (u32)k[11]<<24; /* fall through */ case 11: c += (u32)k[10]<<16; /* fall through */ case 10: c += (u32)k[9]<<8; /* fall through */ case 9: c += k[8]; /* fall through */ case 8: b += (u32)k[7]<<24; /* fall through */ case 7: b += (u32)k[6]<<16; /* fall through */ case 6: b += (u32)k[5]<<8; /* fall through */ case 5: b += k[4]; /* fall through */ case 4: a += (u32)k[3]<<24; /* fall through */ case 3: a += (u32)k[2]<<16; /* fall through */ case 2: a += (u32)k[1]<<8; /* fall through */ case 1: a += k[0]; __jhash_final(a, b, c); case 0: /* Nothing left to add */ break; } return c; } /* jhash2 - hash an array of u32's * @k: the key which must be an array of u32's * @length: the number of u32's in the key * @initval: the previous hash, or an arbitray value * * Returns the hash value of the key. */ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) { u32 a, b, c; /* Set up the internal state */ a = b = c = JHASH_INITVAL + (length<<2) + initval; /* Handle most of the key */ while (length > 3) { a += k[0]; b += k[1]; c += k[2]; __jhash_mix(a, b, c); length -= 3; k += 3; } /* Handle the last 3 u32's */ switch (length) { case 3: c += k[2]; /* fall through */ case 2: b += k[1]; /* fall through */ case 1: a += k[0]; __jhash_final(a, b, c); case 0: /* Nothing left to add */ break; } return c; } /* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) { a += initval; b += initval; c += initval; __jhash_final(a, b, c); return c; } static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) { return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); } static inline u32 jhash_2words(u32 a, u32 b, u32 initval) { return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); } static inline u32 jhash_1word(u32 a, u32 initval) { return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2)); } #endif /* _LINUX_JHASH_H */ xdp-tools-1.5.4/xdp-loader/0000755000175100001660000000000015003640462015054 5ustar runnerdockerxdp-tools-1.5.4/xdp-loader/xdp-loader.c0000644000175100001660000003266415003640462017272 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ #include #include #include #include #include #include #include #include #include #include #include #include #include "params.h" #include "logging.h" #include "util.h" #define PROG_NAME "xdp-loader" static const struct loadopt { bool help; struct iface iface; struct multistring filenames; char *pin_path; char *section_name; char *prog_name; enum xdp_attach_mode mode; __u32 prio; __u32 actions; } defaults_load = { .mode = XDP_MODE_NATIVE }; struct enum_val xdp_modes[] = { {"native", XDP_MODE_NATIVE}, {"skb", XDP_MODE_SKB}, {"hw", XDP_MODE_HW}, {"unspecified", XDP_MODE_UNSPEC}, {NULL, 0} }; struct flag_val load_actions[] = { {"XDP_ABORTED", 1U << XDP_ABORTED}, {"XDP_DROP", 1U << XDP_DROP}, {"XDP_PASS", 1U << XDP_PASS}, {"XDP_TX", 1U << XDP_TX}, {"XDP_REDIRECT", 1U << XDP_REDIRECT}, {} }; static struct prog_option load_options[] = { DEFINE_OPTION("mode", OPT_ENUM, struct loadopt, mode, .short_opt = 'm', .typearg = xdp_modes, .metavar = "", .help = "Load XDP program in ; default native"), DEFINE_OPTION("pin-path", OPT_STRING, struct loadopt, pin_path, .short_opt = 'p', .help = "Path to pin maps under (must be in bpffs)."), DEFINE_OPTION("section", OPT_STRING, struct loadopt, section_name, .metavar = "
", .short_opt = 's', .help = "ELF section name of program to load (default: first in file)."), DEFINE_OPTION("prog-name", OPT_STRING, struct loadopt, prog_name, .metavar = "", .short_opt = 'n', .help = "BPF program name of program to load (default: first in file)."), DEFINE_OPTION("dev", OPT_IFNAME, struct loadopt, iface, .positional = true, .metavar = "", .required = true, .help = "Load on device "), DEFINE_OPTION("filenames", OPT_MULTISTRING, struct loadopt, filenames, .positional = true, .metavar = "", .required = true, .help = "Load programs from "), DEFINE_OPTION("prio", OPT_U32, struct loadopt, prio, .short_opt = 'P', .help = "Set run priority of program"), DEFINE_OPTION("actions", OPT_FLAGS, struct loadopt, actions, .short_opt = 'A', .typearg = load_actions, .metavar = "", .help = "Chain call actions (default: XDP_PASS). e.g. XDP_PASS,XDP_DROP"), END_OPTIONS }; int do_load(const void *cfg, __unused const char *pin_root_path) { const struct loadopt *opt = cfg; struct xdp_program **progs, *p; char errmsg[STRERR_BUFSIZE]; int err = EXIT_SUCCESS; size_t num_progs, i; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, .pin_root_path = opt->pin_path); if (opt->section_name && opt->prog_name) { pr_warn("Only one of --section or --prog-name can be set\n"); return EXIT_FAILURE; } num_progs = opt->filenames.num_strings; if (!num_progs) { pr_warn("Need at least one filename to load\n"); return EXIT_FAILURE; } else if (num_progs > 1 && opt->mode == XDP_MODE_HW) { pr_warn("Cannot attach multiple programs in HW mode\n"); return EXIT_FAILURE; } progs = calloc(num_progs, sizeof(*progs)); if (!progs) { pr_warn("Couldn't allocate memory\n"); return EXIT_FAILURE; } pr_debug("Loading %zu files on interface '%s'.\n", num_progs, opt->iface.ifname); /* libbpf spits out a lot of unhelpful error messages while loading. * Silence the logging so we can provide our own messages instead; this * is a noop if verbose logging is enabled. */ silence_libbpf_logging(); retry: for (i = 0; i < num_progs; i++) { DECLARE_LIBXDP_OPTS(xdp_program_opts, xdp_opts, 0); struct bpf_program *bpf_prog = NULL; p = progs[i]; if (p) xdp_program__close(p); if (opt->prog_name) { xdp_opts.open_filename = opt->filenames.strings[i]; xdp_opts.prog_name = opt->prog_name; xdp_opts.opts = &opts; p = xdp_program__create(&xdp_opts); } else { p = xdp_program__open_file(opt->filenames.strings[i], opt->section_name, &opts); } err = libxdp_get_error(p); if (err) { if (err == -EPERM && !double_rlimit()) goto retry; libxdp_strerror(err, errmsg, sizeof(errmsg)); pr_warn("Couldn't open file '%s': %s\n", opt->filenames.strings[i], errmsg); goto out; } /* Disable autoload for all programs in the bpf object; libxdp * will make sure to turn it back on for the program that we're * actually loading */ bpf_object__for_each_program(bpf_prog, xdp_program__bpf_obj(p)) bpf_program__set_autoload(bpf_prog, false); if (opt->prio) { err = xdp_program__set_run_prio(p, opt->prio); if (err) { pr_warn("Error setting run priority: %u\n", opt->prio); goto out; } } if (opt->actions) { __u32 a; for (a = XDP_ABORTED; a <= XDP_REDIRECT; a++) { err = xdp_program__set_chain_call_enabled(p, a, opt->actions & (1U << a)); if (err) { pr_warn("Error setting chain call action: %u\n", a); goto out; } } } xdp_program__print_chain_call_actions(p, errmsg, sizeof(errmsg)); pr_debug("XDP program %zu: Run prio: %d. Chain call actions: %s\n", i, xdp_program__run_prio(p), errmsg); if (!opt->pin_path) { struct bpf_map *map; bpf_object__for_each_map(map, xdp_program__bpf_obj(p)) { err = bpf_map__set_pin_path(map, NULL); if (err) { pr_warn("Error clearing map pin path: %s\n", strerror(-err)); goto out; } } } progs[i] = p; } err = xdp_program__attach_multi(progs, num_progs, opt->iface.ifindex, opt->mode, 0); if (err) { if (err == -EPERM && !double_rlimit()) goto retry; if (err == -EOPNOTSUPP && (opt->mode == XDP_MODE_NATIVE || opt->mode == XDP_MODE_HW)) { pr_warn("Attaching XDP program in %s mode not supported - try %s mode.\n", opt->mode == XDP_MODE_NATIVE ? "native" : "HW", opt->mode == XDP_MODE_NATIVE ? "SKB" : "native or SKB"); } else { libbpf_strerror(err, errmsg, sizeof(errmsg)); pr_warn("Couldn't attach XDP program on iface '%s': %s(%d)\n", opt->iface.ifname, errmsg, err); } goto out; } out: for (i = 0; i < num_progs; i++) if (progs[i]) xdp_program__close(progs[i]); free(progs); return err; } static const struct unloadopt { bool all; __u32 prog_id; struct iface iface; } defaults_unload = {}; static struct prog_option unload_options[] = { DEFINE_OPTION("dev", OPT_IFNAME, struct unloadopt, iface, .positional = true, .metavar = "", .help = "Unload from device "), DEFINE_OPTION("id", OPT_U32, struct unloadopt, prog_id, .metavar = "", .short_opt = 'i', .help = "Unload program with id "), DEFINE_OPTION("all", OPT_BOOL, struct unloadopt, all, .short_opt = 'a', .help = "Unload all programs from interface"), END_OPTIONS }; int do_unload(const void *cfg, __unused const char *pin_root_path) { const struct unloadopt *opt = cfg; struct xdp_multiprog *mp = NULL; enum xdp_attach_mode mode; int err = EXIT_FAILURE; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, .pin_root_path = pin_root_path); if (!opt->all && !opt->prog_id) { pr_warn("Need prog ID or --all\n"); goto out; } if (!opt->iface.ifindex) { pr_warn("Must specify ifname\n"); goto out; } /* The feature probing done by libxdp makes libbpf output confusing * error messages even on unload. Silence the logging so we can provide * our own messages instead; this is a noop if verbose logging is * enabled. */ silence_libbpf_logging(); mp = xdp_multiprog__get_from_ifindex(opt->iface.ifindex); if (IS_ERR_OR_NULL(mp)) { pr_warn("No XDP program loaded on %s\n", opt->iface.ifname); mp = NULL; goto out; } if (opt->all) { err = xdp_multiprog__detach(mp); if (err) { pr_warn("Unable to detach XDP program: %s\n", strerror(-err)); goto out; } } else { struct xdp_program *prog = NULL; while ((prog = xdp_multiprog__next_prog(prog, mp))) { if (xdp_program__id(prog) == opt->prog_id) { mode = xdp_multiprog__attach_mode(mp); goto found; } } if (xdp_multiprog__is_legacy(mp)) { prog = xdp_multiprog__main_prog(mp); if (xdp_program__id(prog) == opt->prog_id) { mode = xdp_multiprog__attach_mode(mp); goto found; } } prog = xdp_multiprog__hw_prog(mp); if (xdp_program__id(prog) == opt->prog_id) { mode = XDP_MODE_HW; goto found; } pr_warn("Program with ID %u not loaded on %s\n", opt->prog_id, opt->iface.ifname); err = -ENOENT; goto out; found: pr_debug("Detaching XDP program with ID %u from %s\n", xdp_program__id(prog), opt->iface.ifname); err = xdp_program__detach(prog, opt->iface.ifindex, mode, 0); if (err) { pr_warn("Unable to detach XDP program: %s\n", strerror(-err)); goto out; } } out: xdp_multiprog__close(mp); return err ? EXIT_FAILURE : EXIT_SUCCESS; } static const struct statusopt { struct iface iface; } defaults_status = {}; static struct prog_option status_options[] = { DEFINE_OPTION("dev", OPT_IFNAME, struct statusopt, iface, .positional = true, .metavar = "[ifname]", .help = "Show status for device [ifname] (default all interfaces)"), END_OPTIONS }; int do_status(const void *cfg, __unused const char *pin_root_path) { const struct statusopt *opt = cfg; printf("CURRENT XDP PROGRAM STATUS:\n\n"); return iface_print_status(opt->iface.ifindex ? &opt->iface : NULL); } static const struct cleanopt { struct iface iface; } defaults_clean = {}; static struct prog_option clean_options[] = { DEFINE_OPTION("dev", OPT_IFNAME, struct cleanopt, iface, .positional = true, .metavar = "[ifname]", .help = "Clean up detached program links for [ifname] (default all interfaces)"), END_OPTIONS }; int do_clean(const void *cfg, __unused const char *pin_root_path) { const struct cleanopt *opt = cfg; printf("Cleaning up detached XDP program links for %s\n", opt->iface.ifindex ? opt->iface.ifname : "all interfaces"); return libxdp_clean_references(opt->iface.ifindex); } static const struct featuresopt { struct iface iface; } defaults_features = {}; static struct prog_option features_options[] = { DEFINE_OPTION("dev", OPT_IFNAME, struct featuresopt, iface, .positional = true, .metavar = "", .required = true, .help = "Show XDP features for device "), END_OPTIONS }; #define CHECK_XDP_FEATURE(f) (opts.feature_flags & (f) ? "yes" : "no") static int iface_print_xdp_features(const struct iface *iface) { #ifdef HAVE_LIBBPF_BPF_XDP_QUERY LIBBPF_OPTS(bpf_xdp_query_opts, opts); int err; err = bpf_xdp_query(iface->ifindex, 0, &opts); if (err) { pr_warn("The running kernel doesn't support querying XDP features (%d).\n", err); return err; } /* NETDEV_XDP features are defined in kernel header */ printf("NETDEV_XDP_ACT_BASIC:\t\t%s\n", CHECK_XDP_FEATURE(NETDEV_XDP_ACT_BASIC)); printf("NETDEV_XDP_ACT_REDIRECT:\t%s\n", CHECK_XDP_FEATURE(NETDEV_XDP_ACT_REDIRECT)); printf("NETDEV_XDP_ACT_NDO_XMIT:\t%s\n", CHECK_XDP_FEATURE(NETDEV_XDP_ACT_NDO_XMIT)); printf("NETDEV_XDP_ACT_XSK_ZEROCOPY:\t%s\n", CHECK_XDP_FEATURE(NETDEV_XDP_ACT_XSK_ZEROCOPY)); printf("NETDEV_XDP_ACT_HW_OFFLOAD:\t%s\n", CHECK_XDP_FEATURE(NETDEV_XDP_ACT_HW_OFFLOAD)); printf("NETDEV_XDP_ACT_RX_SG:\t\t%s\n", CHECK_XDP_FEATURE(NETDEV_XDP_ACT_RX_SG)); printf("NETDEV_XDP_ACT_NDO_XMIT_SG:\t%s\n", CHECK_XDP_FEATURE(NETDEV_XDP_ACT_NDO_XMIT_SG)); if (opts.feature_flags & ~NETDEV_XDP_ACT_MASK) pr_debug("unknown reported xdp features: 0x%lx\n", (unsigned long)opts.feature_flags & ~NETDEV_XDP_ACT_MASK); return 0; #else __unused const void *i = iface; pr_warn("Cannot display features, because xdp-loader was compiled against an " "old version of libbpf without support for querying features.\n"); return -EOPNOTSUPP; #endif } int do_features(const void *cfg, __unused const char *pin_root_path) { const struct featuresopt *opt = cfg; return iface_print_xdp_features(&opt->iface); } int do_help(__unused const void *cfg, __unused const char *pin_root_path) { fprintf(stderr, "Usage: xdp-loader COMMAND [options]\n" "\n" "COMMAND can be one of:\n" " load - load an XDP program on an interface\n" " unload - unload an XDP program from an interface\n" " status - show current XDP program status\n" " clean - clean up detached program links in XDP bpffs directory\n" " features - show XDP features supported by the NIC\n" " help - show this help message\n" "\n" "Use 'xdp-loader COMMAND --help' to see options for each command\n"); return -1; } static const struct prog_command cmds[] = { DEFINE_COMMAND(load, "Load an XDP program on an interface"), DEFINE_COMMAND(unload, "Unload an XDP program from an interface"), DEFINE_COMMAND(clean, "Clean up detached program links in XDP bpffs directory"), DEFINE_COMMAND(status, "Show XDP program status"), DEFINE_COMMAND(features, "Show NIC XDP features"), { .name = "help", .func = do_help, .no_cfg = true }, END_COMMANDS }; union all_opts { struct loadopt load; struct unloadopt unload; struct statusopt status; struct featuresopt features; }; int main(int argc, char **argv) { if (argc > 1) return dispatch_commands(argv[1], argc - 1, argv + 1, cmds, sizeof(union all_opts), PROG_NAME, false); return do_help(NULL, NULL); } xdp-tools-1.5.4/xdp-loader/README.org0000644000175100001660000002261015003640462016523 0ustar runnerdocker#+EXPORT_FILE_NAME: xdp-loader #+TITLE: xdp-loader #+OPTIONS: ^:nil #+MAN_CLASS_OPTIONS: :section-id "8\" \"DATE\" \"VERSION\" \"XDP program loader" # This file serves both as a README on github, and as the source for the man # page; the latter through the org-mode man page export support. # . # To export the man page, simply use the org-mode exporter; (require 'ox-man) if # it's not available. There's also a Makefile rule to export it. * xdp-loader - an XDP program loader XDP-loader is a simple loader for XDP programs with support for attaching multiple programs to the same interface. To achieve this it exposes the same load and unload semantics exposed by the libxdp library. See the =libxdp(3)= man page for details of how this works, and what kernel features it relies on. ** Running xdp-loader The syntax for running xdp-loader is: #+begin_src sh xdp-loader COMMAND [options] Where COMMAND can be one of: load - load an XDP program on an interface unload - unload an XDP program from an interface status - show current XDP program status features - show XDP features supported by the NIC clean - clean up detached program links in XDP bpffs directory help - show the list of available commands #+end_src Each command, and its options are explained below. Or use =xdp-loader COMMAND --help= to see the options for each command. * The LOAD command The =load= command loads one or more XDP programs onto an interface. The syntax for the =load= command is: =xdp-loader load [options] = Where == is the name of the interface to load the programs onto, and the == is one or more file names containing XDP programs. The programs will be loaded onto the interface in the order of their preference, as specified by the program metadata (see *libxdp(3)*). The supported options are: ** -m, --mode Specifies which mode to load the XDP program to be loaded in. The valid values are 'native', which is the default in-driver XDP mode, 'skb', which causes the so-called /skb mode/ (also known as /generic XDP/) to be used, 'hw' which causes the program to be offloaded to the hardware, or 'unspecified' which leaves it up to the kernel to pick a mode (which it will do by picking native mode if the driver supports it, or generic mode otherwise). Note that using 'unspecified' can make it difficult to predict what mode a program will end up being loaded in. For this reason, the default is 'native'. Note that hardware with support for the 'hw' mode is rare: Netronome/Corigine cards (using the 'nfp' driver) are the only devices with support for this in the mainline Linux kernel. ** -p, --pin-path This specifies a root path under which to pin any maps that define the 'pinning' attribute in their definitions. This path must be located on a =bpffs= file system. If not set, maps will not be pinned, even if they specify pinning in their definitions. When pinning maps, if the pinned location for a map already exist, the map pinned there will be reused if it is compatible with the type of the map being loaded. ** -s, --section
Specify which ELF section to load the XDP program(s) from in each file. The default is to use the first program in each file. If this option is set, it applies to all programs being loaded. ** -n, --prog-name Specify which BPF program with the name to load the XDP program(s) from in each file. The default is to use the first program in each file. Only one of --section and --prog-name may be specified. If this option is set, it applies to all programs being loaded. ** -P, --prio Specify the priority to load the XDP program(s) with (this affects the order of programs running on the interface). The default is to use the value from the metadata in the program ELF file, or a value of 50 if the program has no such metadata. If this option is set, it applies to all programs being loaded. ** -A, --actions Specify the "chain call actions" of the loaded XDP program(s). These are the XDP actions that will cause the next program loaded on the interface to be called, instead of returning immediately. The default is to use the value set in the metadata in the program ELF file, or XDP_PASS if no such metadata is set. If this option is set, it applies to all programs being loaded. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The UNLOAD command The =unload= command is used for unloading programs from an interface. The syntax for the =unload= command is: =xdp-loader unload [options] = Where == is the name of the interface to load the programs onto. Either the =--all= or =--id= options must be used to specify which program(s) to unload. The supported options are: ** -i, --id Unload a single program from the interface by ID. Use =xdp-loader status= to obtain the ID of the program being unloaded. If this program is the last program loaded on the interface, the dispatcher program will also be removed, which makes the operation equivalent to specifying =--all=. ** -a, --all Unload all XDP programs on the interface, as well as the multi-program dispatcher. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The STATUS command The =status= command displays a list of interfaces in the system, and the XDP program(s) loaded on each interface. For each interface, a list of programs are shown, with the run priority and "chain actions" for each program. See the section on program metadata for the meaning of this metadata. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * The FEATURES command The =features= command displays the XDP features supported by the NIC. Currently supported XDP features are: ** NETDEV_XDP_ACT_BASIC The networking device has basic support for running XDP programs, and can handle the base set of return codes (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX). ** NETDEV_XDP_ACT_REDIRECT The network device supports handling the XDP_REDIRECT return code. This means packets can be redirected from this device by XDP. ** NETDEV_XDP_ACT_NDO_XMIT The networking interfaces implements the ndo_xdp_xmit callback. This means packets can be redirected to this device by XDP. ** NETDEV_XDP_ACT_XSK_ZEROCOPY The networking device supports AF_XDP in zero copy mode. ** NETDEV_XDP_ACT_HW_OFFLOAD The networking device supports XDP hw offloading. ** NETDEV_XDP_ACT_RX_SG The networking device supports non-linear XDP frames on the receive side. This means XDP can be used with big MTUs on this device (if the XDP program is compiled with fragments support) ** NETDEV_XDP_ACT_NDO_XMIT_SG The networking device supports non-linear XDP frames on the transmit side. This means non-linear frames can be redirected to this device. * The CLEAN command The syntax for the =clean= command is: =xdp-loader clean [options] [ifname]= The =clean= command cleans up any detached program links in the XDP bpffs directory. When a network interface disappears, any programs loaded in software mode (e.g. skb, native) remain pinned in the bpffs directory, but become detached from the interface. These need to be unlinked from the filesystem. The =clean= command takes an optional interface parameter to only unlink detached programs corresponding to the interface. By default, all detached programs for all interfaces are unlinked. The supported options are: ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** -h, --help Display a summary of the available options * Examples To load an XDP program on the eth0 interface simply do: #+begin_src sh # xdp-loader load eth0 xdp_drop.o # xdp-loader status CURRENT XDP PROGRAM STATUS: Interface Prio Program name Mode ID Tag Chain actions ------------------------------------------------------------------------------------- lo eth0 xdp_dispatcher native 50 d51e469e988d81da => 50 xdp_drop 55 57cd311f2e27366b XDP_PASS #+end_src Which shows that a dispatcher program was loaded on the interface, and the xdp_drop program was installed as the first (and only) component program after it. In this instance, the program does not specify any of the metadata above, so the defaults (priority 50 and XDP_PASS as its chain call action) was used. To use the automatic map pinning, include the =pinning= attribute into the map definition in the program, something like: #+begin_src c struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 10); __type(key, __u32); __type(value, __u64); __uint(pinning, LIBBPF_PIN_BY_NAME); } my_map SEC(".maps"); #+end_src And load it with the =--pin-path= attribute: #+begin_src sh # xdp-loader load eth0 my_prog.o --pin-path /sys/fs/bpf/my-prog #+end_src This will pin the map at =/sys/fs/bpf/my-prog/my_map=. If this already exists, the pinned map will be reused instead of creating a new one, which allows different BPF programs to share the map. * SEE ALSO =libxdp(3)= for details on the XDP loading semantics and kernel compatibility requirements. * BUGS Please report any bugs on Github: https://github.com/xdp-project/xdp-tools/issues * AUTHOR xdp-loader and this man page were written by Toke Høiland-Jørgensen. xdp-tools-1.5.4/xdp-loader/tests/0000755000175100001660000000000015003640462016216 5ustar runnerdockerxdp-tools-1.5.4/xdp-loader/tests/test-xdp-loader.sh0000644000175100001660000000602415003640462021570 0ustar runnerdockerXDP_LOADER=${XDP_LOADER:-./xdp-loader} ALL_TESTS="test_load test_section test_prog_name test_load_multi test_load_incremental test_load_clobber test_features" test_load() { check_run $XDP_LOADER load $NS $TEST_PROG_DIR/xdp_drop.o -vv check_run $XDP_LOADER unload $NS --all -vv } test_section() { check_run $XDP_LOADER load $NS $TEST_PROG_DIR/xdp_drop.o -s xdp -vv check_run $XDP_LOADER unload $NS --all -vv } test_prog_name() { check_run $XDP_LOADER load $NS $TEST_PROG_DIR/xdp_drop.o -n xdp_drop -vv check_run $XDP_LOADER unload $NS --all -vv } check_progs_loaded() { local iface="$1" local num=$2 local num_loaded num_loaded=$($XDP_LOADER status $NS | grep -c '=>') if [ "$num_loaded" -ne "$num" ]; then echo "Expected $num programs loaded, found $num_loaded" exit 1 fi } test_load_multi() { skip_if_legacy_fallback check_run $XDP_LOADER load $NS $TEST_PROG_DIR/xdp_drop.o $TEST_PROG_DIR/xdp_pass.o -vv check_progs_loaded $NS 2 check_run $XDP_LOADER unload $NS --all -vv } test_load_incremental() { skip_if_legacy_fallback local output local ret local id check_run $XDP_LOADER load $NS $TEST_PROG_DIR/xdp_drop.o -vv check_progs_loaded $NS 1 output=$($XDP_LOADER load $NS $TEST_PROG_DIR/xdp_pass.o -vv 2>&1) ret=$? if [ "$ret" -ne "0" ] && echo $output | grep -q "Falling back to loading single prog"; then ret=$SKIPPED_TEST check_run $XDP_LOADER unload $NS --all -vv else check_progs_loaded $NS 2 id=$($XDP_LOADER status $NS | grep xdp_pass | awk '{print $4}') check_run $XDP_LOADER unload $NS --id $id check_progs_loaded $NS 1 id=$($XDP_LOADER status $NS | grep xdp_drop | awk '{print $4}') check_run $XDP_LOADER unload $NS --id $id check_progs_loaded $NS 0 fi return $ret } test_load_clobber() { skip_if_legacy_fallback check_run env LIBXDP_SKIP_DISPATCHER=1 $XDP_LOADER load $NS $TEST_PROG_DIR/xdp_drop.o -vv check_progs_loaded $NS 0 # legacy prog so should show up as 0 $XDP_LOADER load $NS $TEST_PROG_DIR/xdp_pass.o -vv ret=$? if [ "$ret" -eq "0" ]; then echo "Should not have been able to load prog with legacy prog loaded" return 1 fi check_progs_loaded $NS 0 check_run $XDP_LOADER unload $NS --all -vv } check_xdp_feature() { check_run ip link add dev v0 type veth peer name v1 $XDP_LOADER features v0 | grep "$1" | grep -q "$2" ret=$? ip link del dev v0 [ $ret -eq 1 ] && exit 1 } test_features() { skip_if_missing_kernel_symbol xdp_set_features_flag check_xdp_feature NETDEV_XDP_ACT_BASIC yes check_xdp_feature NETDEV_XDP_ACT_REDIRECT yes check_xdp_feature NETDEV_XDP_ACT_NDO_XMIT no check_xdp_feature NETDEV_XDP_ACT_XSK_ZEROCOPY no check_xdp_feature NETDEV_XDP_ACT_HW_OFFLOAD no check_xdp_feature NETDEV_XDP_ACT_RX_SG yes check_xdp_feature NETDEV_XDP_ACT_NDO_XMIT_SG no return 0 } cleanup_tests() { $XDP_LOADER unload $NS --all >/dev/null 2>&1 } xdp-tools-1.5.4/xdp-loader/xdp-loader.80000644000175100001660000002275015003640462017212 0ustar runnerdocker.TH "xdp-loader" "8" "SEPTEMBER 12, 2024" "V1.5.4" "XDP program loader" .SH "NAME" xdp-loader \- an XDP program loader .SH "SYNOPSIS" .PP XDP-loader is a simple loader for XDP programs with support for attaching multiple programs to the same interface. To achieve this it exposes the same load and unload semantics exposed by the libxdp library. See the \fIlibxdp(3)\fP man page for details of how this works, and what kernel features it relies on. .SS "Running xdp-loader" .PP The syntax for running xdp-loader is: .RS .nf \fCxdp-loader COMMAND [options] Where COMMAND can be one of: load - load an XDP program on an interface unload - unload an XDP program from an interface status - show current XDP program status features - show XDP features supported by the NIC clean - clean up detached program links in XDP bpffs directory help - show the list of available commands \fP .fi .RE .PP Each command, and its options are explained below. Or use \fIxdp\-loader COMMAND \-\-help\fP to see the options for each command. .SH "The LOAD command" .PP The \fIload\fP command loads one or more XDP programs onto an interface. .PP The syntax for the \fIload\fP command is: .PP \fIxdp\-loader load [options] \fP .PP Where \fI\fP is the name of the interface to load the programs onto, and the \fI\fP is one or more file names containing XDP programs. The programs will be loaded onto the interface in the order of their preference, as specified by the program metadata (see \fBlibxdp(3)\fP). .PP The supported options are: .SS "-m, --mode " .PP Specifies which mode to load the XDP program to be loaded in. The valid values are 'native', which is the default in-driver XDP mode, 'skb', which causes the so-called \fIskb mode\fP (also known as \fIgeneric XDP\fP) to be used, 'hw' which causes the program to be offloaded to the hardware, or 'unspecified' which leaves it up to the kernel to pick a mode (which it will do by picking native mode if the driver supports it, or generic mode otherwise). Note that using 'unspecified' can make it difficult to predict what mode a program will end up being loaded in. For this reason, the default is 'native'. Note that hardware with support for the 'hw' mode is rare: Netronome/Corigine cards (using the 'nfp' driver) are the only devices with support for this in the mainline Linux kernel. .SS "-p, --pin-path " .PP This specifies a root path under which to pin any maps that define the 'pinning' attribute in their definitions. This path must be located on a \fIbpffs\fP file system. If not set, maps will not be pinned, even if they specify pinning in their definitions. When pinning maps, if the pinned location for a map already exist, the map pinned there will be reused if it is compatible with the type of the map being loaded. .SS "-s, --section
" .PP Specify which ELF section to load the XDP program(s) from in each file. The default is to use the first program in each file. If this option is set, it applies to all programs being loaded. .SS "-n, --prog-name " .PP Specify which BPF program with the name to load the XDP program(s) from in each file. The default is to use the first program in each file. Only one of --section and --prog-name may be specified. If this option is set, it applies to all programs being loaded. .SS "-P, --prio " .PP Specify the priority to load the XDP program(s) with (this affects the order of programs running on the interface). The default is to use the value from the metadata in the program ELF file, or a value of 50 if the program has no such metadata. If this option is set, it applies to all programs being loaded. .SS "-A, --actions " .PP Specify the "chain call actions" of the loaded XDP program(s). These are the XDP actions that will cause the next program loaded on the interface to be called, instead of returning immediately. The default is to use the value set in the metadata in the program ELF file, or XDP_PASS if no such metadata is set. If this option is set, it applies to all programs being loaded. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The UNLOAD command" .PP The \fIunload\fP command is used for unloading programs from an interface. .PP The syntax for the \fIunload\fP command is: .PP \fIxdp\-loader unload [options] \fP .PP Where \fI\fP is the name of the interface to load the programs onto. Either the \fI\-\-all\fP or \fI\-\-id\fP options must be used to specify which program(s) to unload. .PP The supported options are: .SS "-i, --id " .PP Unload a single program from the interface by ID. Use \fIxdp\-loader status\fP to obtain the ID of the program being unloaded. If this program is the last program loaded on the interface, the dispatcher program will also be removed, which makes the operation equivalent to specifying \fI\-\-all\fP. .SS "-a, --all" .PP Unload all XDP programs on the interface, as well as the multi-program dispatcher. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The STATUS command" .PP The \fIstatus\fP command displays a list of interfaces in the system, and the XDP program(s) loaded on each interface. For each interface, a list of programs are shown, with the run priority and "chain actions" for each program. See the section on program metadata for the meaning of this metadata. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "The FEATURES command" .PP The \fIfeatures\fP command displays the XDP features supported by the NIC. .PP Currently supported XDP features are: .SS "NETDEV_XDP_ACT_BASIC" .PP The networking device has basic support for running XDP programs, and can handle the base set of return codes (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX). .SS "NETDEV_XDP_ACT_REDIRECT" .PP The network device supports handling the XDP_REDIRECT return code. This means packets can be redirected from this device by XDP. .SS "NETDEV_XDP_ACT_NDO_XMIT" .PP The networking interfaces implements the ndo_xdp_xmit callback. This means packets can be redirected to this device by XDP. .SS "NETDEV_XDP_ACT_XSK_ZEROCOPY" .PP The networking device supports AF_XDP in zero copy mode. .SS "NETDEV_XDP_ACT_HW_OFFLOAD" .PP The networking device supports XDP hw offloading. .SS "NETDEV_XDP_ACT_RX_SG" .PP The networking device supports non-linear XDP frames on the receive side. This means XDP can be used with big MTUs on this device (if the XDP program is compiled with fragments support) .SS "NETDEV_XDP_ACT_NDO_XMIT_SG" .PP The networking device supports non-linear XDP frames on the transmit side. This means non-linear frames can be redirected to this device. .SH "The CLEAN command" .PP The syntax for the \fIclean\fP command is: .PP \fIxdp\-loader clean [options] [ifname]\fP .PP The \fIclean\fP command cleans up any detached program links in the XDP bpffs directory. When a network interface disappears, any programs loaded in software mode (e.g. skb, native) remain pinned in the bpffs directory, but become detached from the interface. These need to be unlinked from the filesystem. The \fIclean\fP command takes an optional interface parameter to only unlink detached programs corresponding to the interface. By default, all detached programs for all interfaces are unlinked. .PP The supported options are: .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "-h, --help" .PP Display a summary of the available options .SH "Examples" .PP To load an XDP program on the eth0 interface simply do: .RS .nf \fC# xdp-loader load eth0 xdp_drop.o # xdp-loader status CURRENT XDP PROGRAM STATUS: Interface Prio Program name Mode ID Tag Chain actions ------------------------------------------------------------------------------------- lo eth0 xdp_dispatcher native 50 d51e469e988d81da => 50 xdp_drop 55 57cd311f2e27366b XDP_PASS \fP .fi .RE .PP Which shows that a dispatcher program was loaded on the interface, and the xdp_drop program was installed as the first (and only) component program after it. In this instance, the program does not specify any of the metadata above, so the defaults (priority 50 and XDP_PASS as its chain call action) was used. .PP To use the automatic map pinning, include the \fIpinning\fP attribute into the map definition in the program, something like: .RS .nf \fCstruct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 10); __type(key, __u32); __type(value, __u64); __uint(pinning, LIBBPF_PIN_BY_NAME); } my_map SEC(".maps"); \fP .fi .RE .PP And load it with the \fI\-\-pin\-path\fP attribute: .RS .nf \fC# xdp-loader load eth0 my_prog.o --pin-path /sys/fs/bpf/my-prog \fP .fi .RE .PP This will pin the map at \fI/sys/fs/bpf/my\-prog/my_map\fP. If this already exists, the pinned map will be reused instead of creating a new one, which allows different BPF programs to share the map. .SH "SEE ALSO" .PP \fIlibxdp(3)\fP for details on the XDP loading semantics and kernel compatibility requirements. .SH "BUGS" .PP Please report any bugs on Github: \fIhttps://github.com/xdp-project/xdp-tools/issues\fP .SH "AUTHOR" .PP xdp-loader and this man page were written by Toke Høiland-Jørgensen. xdp-tools-1.5.4/xdp-loader/.gitignore0000644000175100001660000000001315003640462017036 0ustar runnerdockerxdp-loader xdp-tools-1.5.4/xdp-loader/Makefile0000644000175100001660000000033215003640462016512 0ustar runnerdocker# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) TOOL_NAME := xdp-loader USER_TARGETS := xdp-loader TEST_FILE := tests/test-xdp-loader.sh MAN_PAGE := xdp-loader.8 LIB_DIR = ../lib include $(LIB_DIR)/common.mk xdp-tools-1.5.4/xdp-dump/0000755000175100001660000000000015003640462014553 5ustar runnerdockerxdp-tools-1.5.4/xdp-dump/README.org0000644000175100001660000002562715003640462016235 0ustar runnerdocker#+EXPORT_FILE_NAME: xdpdump #+TITLE: xdpdump #+MAN_CLASS_OPTIONS: :section-id "8\" \"DATE\" \"VERSION\" \"a simple tcpdump like tool for capturing packets at the XDP layer" # This file serves both as a README on github, and as the source for the man # page; the latter through the org-mode man page export support. # . # To export the man page, simply use the org-mode exporter; (require 'ox-man) if # it's not available. # . # The org-mode export doesn't support extended title lines, so manually copy # over the first line of the resulting .man file before exporting and # committing. * xdpdump - a simple tcpdump like tool for capturing packets at the XDP layer =xdpdump= is a simple XDP packet capture tool that tries to behave similar to =tcpdump=, however, it has no packet filter or decode capabilities. This can be used for debugging XDP programs that are already loaded on an interface. Packets can be dumped/inspected before on *entry* to XDP program, or after at *exit* from an XDP program. Furthermore, at *exit* the XDP action is also captured. This means that even packets that are dropped at the XDP layer can be captured via this tool. =xdpdump= works by attaching a bpf trace program to the XDP entry and/or exit function which stores the raw packet in a perf trace buffer. If no XDP program is loaded this approach can not be used and the tool will use a libpcap live-capture to be backward compatible. ** Running xdpdump The syntax for running =xdpdump= is: #+begin_src Usage: xdpdump [options] XDPDump tool to dump network traffic Options: --rx-capture Capture point for the rx direction (valid values: entry,exit) -D, --list-interfaces Print the list of available interfaces -i, --interface Name of interface to capture on --perf-wakeup Wake up xdpdump every packets -p, --program-names Specific program to attach to -s, --snapshot-length Minimum bytes of packet to capture --use-pcap Use legacy pcap format for XDP traces -w, --write Write raw packets to pcap file -x, --hex Print the full packet in hex -v, --verbose Enable verbose logging (-vv: more verbose) --version Display version information -h, --help Show this help #+end_src * The options explained The =xdpdump= tool tries to mimic the basic =tcpdump= options, but just in case below each of the available options is explained: ** --rx-capture Specify where the ingress packet gets captured. Either at the entry of the XDP program and/or exit of the XDP program. Valid options are *entry*, *exit*, or both *entry,exit*. The packet at *exit* can be modified by the XDP program. If you are interested to see both the original and modified packet, use the *entry,exit* option. With this, each packet is captured twice. The default value for this is *entry*. ** -D, --list-interfaces Display a list of available interfaces and any XDP program loaded ** --load-xdp-mode Specifies which loader mode to use with the =--load-xdp-program= option. The valid values are ‘native’, which is the default in-driver XDP mode, ‘skb’, which causes the so-called skb mode (also known as generic XDP) to be used, ‘hw’ which causes the program to be offloaded to the hardware, or ‘unspecified’ which leaves it up to the kernel to pick a mode (which it will do by picking native mode if the driver supports it, or generic mode otherwise). Note that using ‘unspecified’ can make it difficult to predict what mode a program will end up being loaded in. For this reason, the default is ‘native’. ** --load-xdp-program If no XDP program is loaded on the interface, by default, xdpdump will fallback to libpcap's live capture mode to capture the packets. Alternatively, with this option, you can ask xdpdump to load an XDP program to capture the packets directly. ** -i, --interface Listen on interface =ifname=. Note that if no XDP program is loaded on the interface it will use libpcap's live capture mode to capture the packets. ** --perf-wakeup :feat_perfbuf: Let the Kernel wake up =xdpdump= once for every == being posted in the perf ring buffer. The higher the number the less the impact is on the actual XDP program. The default value is 0, which automatically calculates the value based on the available CPUs/buffers. Use -v to see the actual used value. ** -p, --program-names [|all] This option allows you to capture packets for a specific, set of, or all XDP programs loaded on the interface. You can either specify the actual program names or program IDs separated by commas. In the case where multiple programs are attached with the same name, you should use the program ID. Use the -D option to see the loaded programs and their IDs. In addition, the Linux API does not provide the full name of the attached eBPF entry function if it's longer than 15 characters. xdpdump will try to guess the correct function name from the available BTF debug information. However, if multiple functions exist with the same leading name, it can not pick the correct one. It will dump the available functions, and you can choose the correct one, and supply it with this option. If you have programs with duplicate long names, you also need to specify the program ID with the full name. This can be done by adding the id to the name with the =@= suffix. ** -P, --promiscuous-mode This option puts the interface into promiscuous mode. ** -s, --snapshot-length Capture *snaplen* bytes of a packet rather than the default 262144 bytes. ** --use-pcap Use legacy pcap format for XDP traces. By default, it will use the PcapNG format so that it can store various metadata. ** -w, --write Write the raw packets to a pcap file rather than printing them out hexadecimal. Standard output is used if *file* is =-=. ** -x, --hex When dumping packets on the console also print the full packet content in hex. ** -v, --verbose Enable debug logging. Specify twice for even more verbosity. ** --version Display =xpdump= version information and exit. ** -h, --help Display a summary of the available options * Examples The below will load the =xdp-filter= program on eth0, but it does not do any actual filtering: #+begin_src # xdp-filter load --mode skb eth0 # # xdpdump -D Interface Prio Program name Mode ID Tag Chain actions -------------------------------------------------------------------------------------- lo eth0 xdp_dispatcher skb 10651 d51e469e988d81da => 10 xdpfilt_alw_all 10669 0b394f43ab24501c XDP_PASS #+end_src Now we can try =xdpdump=: #+begin_src # xdpdump -i eth0 -x listening on eth0, ingress XDP program ID 10651 func xdp_dispatcher, capture mode entry, capture size 262144 bytes 1584373839.460733895: xdp_dispatcher()@entry: packet size 102 bytes, captured 102 bytes on if_index 2, rx queue 0, id 1 0x0000: 52 54 00 db 44 b6 52 54 00 34 38 da 08 00 45 48 RT..D.RT.48...EH 0x0010: 00 58 d7 dd 40 00 40 06 ec c3 c0 a8 7a 01 c0 a8 .X..@.@.....z... 0x0020: 7a 64 9c de 00 16 0d d5 c6 bc 46 c9 bb 11 80 18 zd........F..... 0x0030: 01 f5 7b b4 00 00 01 01 08 0a 77 0a 8c b8 40 12 ..{.......w...@. 0x0040: cc a6 00 00 00 10 54 ce 6e 20 c3 e7 da 6c 08 42 ......T.n ...l.B 0x0050: d6 d9 ee 42 42 f0 82 c9 4f 12 ed 7b 19 ab 22 0d ...BB...O..{..". 0x0060: 09 29 a9 ee df 89 .).... 1584373839.462340808: xdp_dispatcher()@entry: packet size 66 bytes, captured 66 bytes on if_index 2, rx queue 0, id 2 0x0000: 52 54 00 db 44 b6 52 54 00 34 38 da 08 00 45 48 RT..D.RT.48...EH 0x0010: 00 34 d7 de 40 00 40 06 ec e6 c0 a8 7a 01 c0 a8 .4..@.@.....z... 0x0020: 7a 64 9c de 00 16 0d d5 c6 e0 46 c9 bc 85 80 10 zd........F..... 0x0030: 01 f5 74 0c 00 00 01 01 08 0a 77 0a 8c ba 40 12 ..t.......w...@. 0x0040: d2 34 .4 ^C 2 packets captured 0 packets dropped by perf ring #+end_src Below are two more examples redirecting the capture file to =tcpdump= or =tshark=: #+begin_src # xdpdump -i eth0 -w - | tcpdump -r - -n listening on eth0, ingress XDP program ID 10651 func xdp_dispatcher, capture mode entry, capture size 262144 bytes reading from file -, link-type EN10MB (Ethernet) 15:55:09.075887 IP 192.168.122.1.40928 > 192.168.122.100.ssh: Flags [P.], seq 3857553815:3857553851, ack 3306438882, win 501, options [nop,nop,TS val 1997449167 ecr 1075234328], length 36 15:55:09.077756 IP 192.168.122.1.40928 > 192.168.122.100.ssh: Flags [.], ack 37, win 501, options [nop,nop,TS val 1997449169 ecr 1075244363], length 0 15:55:09.750230 IP 192.168.122.1.40928 > 192.168.122.100.ssh: Flags [P.], seq 36:72, ack 37, win 501, options [nop,nop,TS val 1997449842 ecr 1075244363], length 36 #+end_src #+begin_src # xdpdump -i eth0 -w - | tshark -r - -n listening on eth0, ingress XDP program ID 10651 func xdp_dispatcher, capture mode entry, capture size 262144 bytes 1 0.000000 192.168.122.1 → 192.168.122.100 SSH 102 Client: Encrypted packet (len=36) 2 0.000646 192.168.122.1 → 192.168.122.100 TCP 66 40158 → 22 [ACK] Seq=37 Ack=37 Win=1467 Len=0 TSval=1997621571 TSecr=1075416765 3 12.218164 192.168.122.1 → 192.168.122.100 SSH 102 Client: Encrypted packet (len=36) #+end_src One final example capturing specific XDP programs loaded on the interface: #+begin_src # xdpdump -D Interface Prio Program name Mode ID Tag Chain actions -------------------------------------------------------------------------------------- lo eth0 xdp_dispatcher skb 10558 d51e469e988d81da => 5 xdp_test_prog_w 10576 b5a46c6e9935298c XDP_PASS => 10 xdp_pass 10582 3b185187f1855c4c XDP_PASS => 10 xdp_pass 10587 3b185187f1855c4c XDP_PASS #+end_src We would like to see the packets on the =xdp_dispatcher()= and the 2nd =xdp_pass()= program: #+begin_src # xdpdump -i eth0 --rx-capture=entry,exit -p xdp_dispatcher,xdp_pass@10587 or # xdpdump -i eth0 --rx-capture=entry,exit -p 10558,10587 listening on eth0, ingress XDP program ID 10558 func xdp_dispatcher, ID 10587 func xdp_pass, capture mode entry/exit, capture size 262144 bytes 1607694215.501287259: xdp_dispatcher()@entry: packet size 102 bytes on if_index 2, rx queue 0, id 1 1607694215.501371504: xdp_pass()@entry: packet size 102 bytes on if_index 2, rx queue 0, id 1 1607694215.501383099: xdp_pass()@exit[PASS]: packet size 102 bytes on if_index 2, rx queue 0, id 1 1607694215.501394709: xdp_dispatcher()@exit[PASS]: packet size 102 bytes on if_index 2, rx queue 0, id 1 ^C 4 packets captured 0 packets dropped by perf ring #+end_src * BUGS Please report any bugs on Github: https://github.com/xdp-project/xdp-tools/issues * AUTHOR =xdpdump= was written by Eelco Chaudron xdp-tools-1.5.4/xdp-dump/xdpdump_bpf.c0000644000175100001660000000737715003640462017245 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /***************************************************************************** * Include files *****************************************************************************/ #include #include #include #include #include "xdpdump.h" /***************************************************************************** * Macros *****************************************************************************/ #define min(x,y) ((x)<(y) ? x : y) /***************************************************************************** * (re)definition of kernel data structures for use with BTF *****************************************************************************/ struct net_device { /* Structure does not need to contain all entries, * as "preserve_access_index" will use BTF to fix this... */ int ifindex; } __attribute__((preserve_access_index)); struct xdp_rxq_info { /* Structure does not need to contain all entries, * as "preserve_access_index" will use BTF to fix this... */ struct net_device *dev; __u32 queue_index; } __attribute__((preserve_access_index)); struct xdp_buff { void *data; void *data_end; void *data_meta; void *data_hard_start; unsigned long handle; struct xdp_rxq_info *rxq; } __attribute__((preserve_access_index)); /***************************************************************************** * Local definitions and global variables *****************************************************************************/ struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); __uint(max_entries, MAX_CPUS); __type(key, int); __type(value, __u32); } xdpdump_perf_map SEC(".maps"); /***************************************************************************** * .data section value storing the capture configuration *****************************************************************************/ struct trace_configuration trace_cfg SEC(".data"); /***************************************************************************** * trace_to_perf_buffer() *****************************************************************************/ static inline void trace_to_perf_buffer(struct xdp_buff *xdp, bool fexit, int action) { void *data_end = (void *)(long)xdp->data_end; void *data = (void *)(long)xdp->data; struct pkt_trace_metadata metadata; if (data >= data_end || trace_cfg.capture_if_ifindex != xdp->rxq->dev->ifindex) return; metadata.prog_index = trace_cfg.capture_prog_index; metadata.ifindex = xdp->rxq->dev->ifindex; metadata.rx_queue = xdp->rxq->queue_index; metadata.pkt_len = (__u16)(data_end - data); metadata.cap_len = min(metadata.pkt_len, trace_cfg.capture_snaplen); metadata.action = action; metadata.flags = 0; if (fexit) metadata.flags |= MDF_DIRECTION_FEXIT; bpf_xdp_output(xdp, &xdpdump_perf_map, ((__u64) metadata.cap_len << 32) | BPF_F_CURRENT_CPU, &metadata, sizeof(metadata)); } /***************************************************************************** * trace_on_entry() *****************************************************************************/ SEC("fentry/func") int BPF_PROG(trace_on_entry, struct xdp_buff *xdp) { trace_to_perf_buffer(xdp, false, 0); return 0; } /***************************************************************************** * trace_on_exit() *****************************************************************************/ SEC("fexit/func") int BPF_PROG(trace_on_exit, struct xdp_buff *xdp, int ret) { trace_to_perf_buffer(xdp, true, ret); return 0; } /***************************************************************************** * License *****************************************************************************/ char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-dump/tests/0000755000175100001660000000000015003640462015715 5ustar runnerdockerxdp-tools-1.5.4/xdp-dump/tests/test-xdpdump.sh0000644000175100001660000006751115003640462020721 0ustar runnerdocker# # Test scrip to do basic xdpdump checks # # shellcheck disable=2039 # ALL_TESTS="test_help test_interfaces test_capt_pcap test_capt_pcapng test_capt_term test_exitentry test_snap test_multi_pkt test_perf_wakeup test_promiscuous_selfload test_promiscuous_preload test_none_xdp test_pname_parse test_multi_prog test_xdp_load" XDPDUMP=${XDPDUMP:-./xdpdump} XDP_LOADER=${XDP_LOADER:-../xdp-loader/xdp-loader} RESULT="" print_result() { if [ -n "$1" ]; then echo "ERROR: $1" echo "==== RESULT: ====" echo "$RESULT" echo "==== END ====" else echo "$RESULT" fi } test_help() { local XDPDUMP_HELP_TEXT XDPDUMP_HELP_TEXT=$(cat <<-END Usage: xdpdump [options] XDPDump tool to dump network traffic Options: --rx-capture Capture point for the rx direction (valid values: entry,exit) -D, --list-interfaces Print the list of available interfaces --load-xdp-mode Mode used for --load-xdp-mode, default native (valid values: native,skb,hw,unspecified) --load-xdp-program Load XDP trace program if no XDP program is loaded -i, --interface Name of interface to capture on --perf-wakeup Wake up xdpdump every packets -p, --program-names Specific program to attach to -P, --promiscuous-mode Open interface in promiscuous mode -s, --snapshot-length Minimum bytes of packet to capture --use-pcap Use legacy pcap format for XDP traces -w, --write Write raw packets to pcap file -x, --hex Print the full packet in hex -v, --verbose Enable verbose logging (-vv: more verbose) --version Display version information -h, --help Show this help END ) $XDPDUMP --help | grep -q "\-\-perf-wakeup" if [ $? -eq 1 ]; then XDPDUMP_HELP_TEXT=$(echo "$XDPDUMP_HELP_TEXT" | sed '/--perf-wakeup /d') fi RESULT=$($XDPDUMP --help) if [ "$RESULT" != "$XDPDUMP_HELP_TEXT" ]; then print_result "The --help output failed" return 1 fi RESULT=$($XDPDUMP -h) if [ "$RESULT" != "$XDPDUMP_HELP_TEXT" ]; then print_result "The -h output failed" return 1 fi } test_interfaces() { local NO_PROG_REGEX="($NS +)" if is_multiprog_supported; then local PROG_REGEX="($NS[[:space:]]+xdp_dispatcher.+xdp_drop)" else local PROG_REGEX="($NS[[:space:]]+xdp_drop)" fi RESULT=$($XDPDUMP -D) if ! [[ $RESULT =~ $NO_PROG_REGEX ]]; then print_result "Failed showing test interface with no XPD program loaded" return 1 fi RESULT=$($XDPDUMP --list-interfaces) if ! [[ $RESULT =~ $NO_PROG_REGEX ]]; then print_result "Failed showing test interface with no XPD program loaded" return 1 fi $XDP_LOADER load "$NS" "$TEST_PROG_DIR/xdp_drop.o" RESULT=$($XDPDUMP -D) if ! [[ $RESULT =~ $PROG_REGEX ]]; then print_result "Failed showing test interface with XPD program loaded" return 1 fi $XDP_LOADER unload "$NS" --all } test_capt_pcap() { skip_if_missing_kernel_symbol bpf_xdp_output_proto skip_if_missing_trace_attach local PASS_PKT="IP6 $INSIDE_IP6 > $OUTSIDE_IP6: ICMP6, echo reply(, id [0-9]+)?, seq 1, length 64" $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" || return 1 PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name --use-pcap -w - 2> /dev/null | tcpdump -r - -n") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 sleep 1 RESULT=$(stop_background "$PID") $XDP_LOADER unload "$NS" --all || return 1 if ! [[ $RESULT =~ $PASS_PKT ]]; then print_result "IPv6 packet not received" return 1 fi } version_greater_or_equal() { printf '%s\n%s\n' "$2" "$1" | sort -V -C } test_capt_pcapng() { skip_if_missing_kernel_symbol bpf_xdp_output_proto skip_if_missing_trace_attach local PCAP_FILE="/tmp/${NS}_PID_$$_$RANDOM.pcap" local PASS_PKT="IP6 $INSIDE_IP6 > $OUTSIDE_IP6: ICMP6, echo reply(, id [0-9]+)?, seq 1, length 64" local HW=$(uname -m | sed -e 's/[]\/$*+.^|[]/\\&/g') local OS=$(uname -snrv | sed -e 's/[]\/$+*.^()|[]/\\&/g') local INFOS_REGEX="" local OLD_CAPINFOS=0 local TSHARK_VERSION=$(tshark --version 2> /dev/null | sed -ne 's/^TShark (Wireshark) \([0-9]\+\.[0-9]\+\.[0-9]\+\).*/\1/p') if [[ "$(capinfos --help)" == *"Capinfos (Wireshark) 2."* ]]; then OLD_CAPINFOS=1 fi INFOS_REGEX+="(File type: Wireshark\/\.\.\. - pcapng.*" INFOS_REGEX+="Capture hardware: $HW.*" INFOS_REGEX+="Capture oper-sys: $OS.*" INFOS_REGEX+="Capture application: xdpdump v[0-9]+\.[0-9]+\.[0-9]+.*" INFOS_REGEX+="Capture comment: Capture was taken on interface $NS, with the following XDP programs loaded: xdp_dispatcher\(\) xdp_test_prog_w.*" INFOS_REGEX+="Interface #0 info:.*" INFOS_REGEX+="Name = ${NS}:xdp_test_prog_with_a_long_name\(\)@fentry.*" if [ $OLD_CAPINFOS -eq 0 ]; then INFOS_REGEX+="Hardware = driver: \"veth\", version: \"1\.0\", fw-version: \"\", rom-version: \"\", bus-info: \"\".*" fi INFOS_REGEX+="Time precision = nanoseconds \(9\).*" INFOS_REGEX+="Interface #1 info:.*" INFOS_REGEX+="Name = ${NS}:xdp_test_prog_with_a_long_name\(\)@fexit.*" if [ $OLD_CAPINFOS -eq 0 ]; then INFOS_REGEX+="Hardware = driver: \"veth\", version: \"1\.0\", fw-version: \"\", rom-version: \"\", bus-info: \"\".*" fi INFOS_REGEX+="Time precision = nanoseconds \(9\))" $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" || return 1 PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name -w - 2> /dev/null | tcpdump -r - -n") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_PKT ]]; then print_result "IPv6 packet not received" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name -w $PCAP_FILE --rx-capture=entry,exit") $PING6 -W 2 -c 1 "$INSIDE_IP6" || (rm "$PCAP_FILE" >& /dev/null; return 1) RESULT=$(stop_background "$PID") || (print_result "xdpdump failed"; rm "$PCAP_FILE" >& /dev/null; return 1) RESULT=$(capinfos "$PCAP_FILE") || (print_result "capinfos failed"; rm "$PCAP_FILE" >& /dev/null; return 1) if ! [[ $RESULT =~ $INFOS_REGEX ]]; then echo "REGEX: $INFOS_REGEX" print_result "Failed capinfos content" rm "$PCAP_FILE" >& /dev/null return 1 fi if version_greater_or_equal "$TSHARK_VERSION" 3.6.7; then local ATTRIB_REGEX="^$NS:xdp_test_prog_with_a_long_name\(\)@fentry 0 1 $.*^$NS:xdp_test_prog_with_a_long_name\(\)@fexit 0 1 2$.*" RESULT=$(tshark -r "$PCAP_FILE" -T fields \ -e frame.interface_name \ -e frame.interface_queue \ -e frame.packet_id \ -e frame.verdict.ebpf_xdp) if ! [[ $RESULT =~ $ATTRIB_REGEX ]]; then print_result "Failed attributes content with Tshark $TSHARK_VERSION" rm "$PCAP_FILE" >& /dev/null return 1 fi fi rm "$PCAP_FILE" >& /dev/null $XDP_LOADER unload "$NS" --all || return 1 } test_capt_term() { skip_if_missing_kernel_symbol bpf_xdp_output_proto skip_if_missing_trace_attach local PASS_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local PASS_X_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes, captured 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local PASS_X_OPT="0x0020: 00 00 00 00 00 02 fc 42 de ad ca fe" $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" || return 1 PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_REGEX ]]; then print_result "IPv6 packet not received" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name -x") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_X_REGEX ]]; then print_result "IPv6 packet not received[2]" return 1 fi # If the IP6 addresses remain the same this simple string compare can be # used to verify the -x output is present. if [[ "$RESULT" != *"$PASS_X_OPT"* ]]; then print_result "IPv6 HEX packet not received" return 1 fi $XDP_LOADER unload "$NS" --all || return 1 } test_exitentry() { skip_if_missing_kernel_symbol bpf_xdp_output_proto skip_if_missing_trace_attach local PASS_ENTRY_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local PASS_EXIT_REGEX="(xdp_test_prog_with_a_long_name\(\)@exit\[PASS\]: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local PASS_ENTRY_D_REGEX="(xdp_drop\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local PASS_EXIT_D_REGEX="(xdp_drop\(\)@exit\[DROP\]: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local ID_ENTRY_REGEX="xdp_drop\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id ([0-9]+)" local ID_EXIT_REGEX="xdp_drop\(\)@exit\[DROP\]: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id ([0-9]+)" $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" || return 1 PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name --rx-capture=entry") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_ENTRY_REGEX ]]; then print_result "IPv6 entry packet not received" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name --rx-capture=exit") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_EXIT_REGEX ]]; then print_result "IPv6 exit packet not received" return 1 fi $XDP_LOADER unload "$NS" --all || return 1 $XDP_LOADER load "$NS" "$TEST_PROG_DIR/xdp_drop.o" || return 1 PID=$(start_background "$XDPDUMP -i $NS --rx-capture=exit") $PING6 -W 1 -c 1 "$INSIDE_IP6" # Note that this ping will fail!! RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_EXIT_D_REGEX ]]; then print_result "IPv6 drop exit packet not received" return 1 fi PID=$(start_background "$XDPDUMP -i $NS --rx-capture=exit,entry") $PING6 -W 1 -c 1 "$INSIDE_IP6" # Note that this ping will fail!! RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_EXIT_D_REGEX && $RESULT =~ $PASS_ENTRY_D_REGEX ]]; then print_result "IPv6 drop entry/exit packet not received" return 1 fi [[ $RESULT =~ $ID_ENTRY_REGEX ]] ENTRY_ID=${BASH_REMATCH[1]} [[ $RESULT =~ $ID_EXIT_REGEX ]] EXIT_ID=${BASH_REMATCH[1]} if [[ "$EXIT_ID" != "$ENTRY_ID" ]]; then print_result "Failed matching IDs" return 1 fi $XDP_LOADER unload "$NS" --all || return 1 } test_snap() { skip_if_missing_kernel_symbol bpf_xdp_output_proto skip_if_missing_trace_attach local PASS_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes, captured 16 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local PASS_II_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes, captured 21 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" || return 1 PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name -x --snapshot-length=16") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_REGEX ]]; then print_result "IPv6 packet fragment not received" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name -x -s 21") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_II_REGEX ]]; then print_result "IPv6 packet fragment not received[2]" return 1 fi $XDP_LOADER unload "$NS" --all || return 1 } test_multi_pkt() { skip_if_missing_kernel_symbol bpf_xdp_output_proto skip_if_missing_trace_attach local PASS_ENTRY_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size [0-9]+ bytes on if_index [0-9]+, rx queue [0-9]+, id 20000)" local PASS_EXIT_REGEX="(xdp_test_prog_with_a_long_name\(\)@exit\[PASS\]: packet size [0-9]+ bytes on if_index [0-9]+, rx queue [0-9]+, id 20000)" local PKT_SIZES=(56 512 1500) $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" || return 1 for PKT_SIZE in "${PKT_SIZES[@]}" ; do PID=$(start_background_no_stderr "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name --rx-capture=entry,exit") timeout 40 $PING6 -q -W 2 -s "$PKT_SIZE" -c 20000 -f "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_ENTRY_REGEX ]]; then print_result "IPv6 entry packet not received, $PKT_SIZE" return 1 fi if ! [[ $RESULT =~ $PASS_EXIT_REGEX ]]; then print_result "IPv6 exit packet not received, $PKT_SIZE" return 1 fi done $XDP_LOADER unload "$NS" --all || return 1 } test_perf_wakeup() { skip_if_missing_kernel_symbol bpf_xdp_output_proto skip_if_missing_trace_attach $XDPDUMP --help | grep -q "\-\-perf-wakeup" if [ $? -eq 1 ]; then # No support for perf_wakeup, so return SKIP return "$SKIPPED_TEST" fi local PASS_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+)" local PASS_10K_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id 10000)" local WAKEUPS=(0 1 32 128) $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" || return 1 for WAKEUP in "${WAKEUPS[@]}" ; do # We send a single packet to make sure flushing of the buffer works! PID=$(start_background_no_stderr "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name --perf-wakeup=$WAKEUP") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_REGEX ]]; then print_result "IPv6 packet not received for wakeup $WAKEUP" return 1 fi # We sent 10k packets and see if the all arrive PID=$(start_background_no_stderr "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name --perf-wakeup=$WAKEUP") timeout 20 "$PING6" -q -W 2 -c 10000 -f "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_10K_REGEX ]]; then print_result "IPv6 10k packet not received for wakeup $WAKEUP" return 1 fi done $XDP_LOADER unload "$NS" --all || return 1 } test_none_xdp() { local PASS_PKT="packet size 118 bytes on if_name \"$NS\"" local WARN_MSG="WARNING: Specified interface does not have an XDP program loaded," $XDP_LOADER unload "$NS" --all PID=$(start_background "$XDPDUMP -i $NS") $PING6 -W 2 -c 4 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if [[ "$RESULT" != *"$PASS_PKT"* ]]; then print_result "IPv6 packet not received" return 1 fi if [[ "$RESULT" != *"$WARN_MSG"* ]]; then print_result "Missing warning message" return 1 fi } test_promiscuous_selfload() { local PASS_PKT="packet size 118 bytes on if_name \"$NS\"" local PASS_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes, captured 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" $XDP_LOADER unload "$NS" --all dmesg -C PID=$(start_background "$XDPDUMP -i $NS -P") $PING6 -W 2 -c 4 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if [[ "$RESULT" != *"$PASS_PKT"* ]]; then print_result "IPv6 packet not received [legacy mode]" return 1 fi RESULT=$(dmesg) if [[ "$RESULT" != *"device $NS entered promiscuous mode"* ]] && [[ "$RESULT" != *"$NS: entered promiscuous mode"* ]]; then print_result "Failed enabling promiscuous mode on legacy interface" return 1 fi if [[ "$RESULT" != *"device $NS left promiscuous mode"* ]] && [[ "$RESULT" != *"$NS: left promiscuous mode"* ]]; then print_result "Failed disabling promiscuous mode on legacy interface" return 1 fi } test_promiscuous_preload() { skip_if_missing_kernel_symbol bpf_xdp_output skip_if_missing_trace_attach local PASS_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes, captured 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" || return 1 dmesg -C PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name -x --promiscuous-mode") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_REGEX ]]; then print_result "IPv6 packet not received" return 1 fi RESULT=$(dmesg) if [[ "$RESULT" != *"device $NS entered promiscuous mode"* ]] && [[ "$RESULT" != *"$NS: entered promiscuous mode"* ]]; then print_result "Failed enabling promiscuous mode on interface" return 1 fi if [[ "$RESULT" != *"device $NS left promiscuous mode"* ]] && [[ "$RESULT" != *"$NS: left promiscuous mode"* ]]; then print_result "Failed disabling promiscuous mode on interface" return 1 fi } test_pname_parse() { skip_if_legacy_fallback local PASS_REGEX="(xdp_test_prog_with_a_long_name\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local PROG_ID_1=0 local PROG_ID_2=0 local PROG_ID_3=0 local PROG_ID_4=0 $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 # Here we load the programs without the xdp-tools loader to make sure # they are not loaded as a multi-program. $TEST_PROG_DIR/test-tool load -m skb "$NS" "$TEST_PROG_DIR/test_long_func_name.o" # We need to specify the function name or else it should fail PID=$(start_background "$XDPDUMP -i $NS") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Can't identify the full XDP main function!"* ]]; then print_result "xdpdump should fail with duplicate function!" return 1 fi # Here we specify the correct function name so we should get the packet PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $PASS_REGEX ]]; then print_result "IPv6 packet not received" return 1 fi # Here we specify the wrong correct function name so we should not get the packet PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name_too") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Can't load eBPF object:"* ]]; then print_result "xdpdump should fail being unable to attach!" return 1 fi # Here we specify an non-existing function PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_non_existing_name") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Can't find function 'xdp_test_prog_with_a_long_non_existing_name' on interface!"* ]]; then print_result "xdpdump should fail with unknown function!" return 1 fi # Verify invalid program indexes PID=$(start_background "$XDPDUMP -i $NS -p hallo@3e") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Can't extract valid program id from \"hallo@3e\"!"* ]]; then print_result "xdpdump should fail with id value error!" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p hallo@128") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Invalid program id supplied, \"hallo@128\"!"* ]]; then print_result "xdpdump should fail with invalid id!" return 1 fi # Remove loaded program ip link set dev "$NS" xdpgeneric off # Now test actual multi-program parsing (negative test cases) $XDP_LOADER unload "$NS" --all $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" "$TEST_PROG_DIR/xdp_pass.o" "$TEST_PROG_DIR/xdp_drop.o" PID=$(start_background "$XDPDUMP -D") RESULT=$(stop_background "$PID") PROG_ID_1=$(echo "$RESULT" | grep "$NS" -A4 | cut -c51-55 | sed -n 1p | tr -d ' ') PROG_ID_2=$(echo "$RESULT" | grep "$NS" -A4 | cut -c51-55 | sed -n 2p | tr -d ' ') PROG_ID_3=$(echo "$RESULT" | grep "$NS" -A4 | cut -c51-55 | sed -n 3p | tr -d ' ') PROG_ID_4=$(echo "$RESULT" | grep "$NS" -A4 | cut -c51-55 | sed -n 4p | tr -d ' ') PID=$(start_background "$XDPDUMP -i $NS -p all") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Can't identify the full XDP 'xdp_test_prog_w' function in program $PROG_ID_2!"* || $RESULT != *"xdp_test_prog_with_a_long_name@$PROG_ID_2"* || $RESULT != *"xdp_test_prog_with_a_long_name_too@$PROG_ID_2"* || $RESULT != *"Command line to replace 'all':"* || $RESULT != *"xdp_dispatcher@$PROG_ID_1,@$PROG_ID_2,xdp_pass@$PROG_ID_3,xdp_drop@$PROG_ID_4"* ]]; then print_result "xdpdump should fail with all list!" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p hallo@$PROG_ID_1") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Can't find function 'hallo' in interface program $PROG_ID_1!"* ]]; then print_result "xdpdump should fail with hallo not found on program $PROG_ID_1!" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p hallo") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Can't find function 'hallo' on interface"* ]]; then print_result "xdpdump should fail hallo not found!" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_w") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Can't identify the full XDP 'xdp_test_prog_w' function!"* || $RESULT != *"xdp_test_prog_with_a_long_name_too"* ]]; then print_result "xdpdump should fail can't id xdp_test_prog_w!" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_w@$PROG_ID_2") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: Can't identify the full XDP 'xdp_test_prog_w' function in program $PROG_ID_2!"* || $RESULT != *"xdp_test_prog_with_a_long_name_too@$PROG_ID_2"* ]]; then print_result "xdpdump should fail can't id xdp_test_prog_w@$PROG_ID_2!" return 1 fi # Now load XDP programs with duplicate functions $XDP_LOADER unload "$NS" --all $XDP_LOADER load "$NS" "$TEST_PROG_DIR/test_long_func_name.o" "$TEST_PROG_DIR/test_long_func_name.o" "$TEST_PROG_DIR/xdp_pass.o" "$TEST_PROG_DIR/xdp_drop.o" PID=$(start_background "$XDPDUMP -D") RESULT=$(stop_background "$PID") PROG_ID_1=$(echo "$RESULT" | grep "$NS" -A2 | cut -c51-55 | sed -n 1p | tr -d ' ') PROG_ID_2=$(echo "$RESULT" | grep "$NS" -A2 | cut -c51-55 | sed -n 2p | tr -d ' ') PROG_ID_3=$(echo "$RESULT" | grep "$NS" -A2 | cut -c51-55 | sed -n 2p | tr -d ' ') PID=$(start_background "$XDPDUMP -i $NS -p xdp_test_prog_with_a_long_name") RESULT=$(stop_background "$PID") if [[ $RESULT != *"ERROR: The function 'xdp_test_prog_with_a_long_name' exists in multiple programs!"* || $RESULT != *"xdp_test_prog_with_a_long_name@$PROG_ID_2"* || $RESULT != *"xdp_test_prog_with_a_long_name@$PROG_ID_3"* ]]; then print_result "xdpdump should fail with duplicate function!" return 1 fi $XDP_LOADER unload "$NS" --all return 0 } test_multi_prog() { skip_if_legacy_fallback skip_if_missing_trace_attach local ENTRY_REGEX="(xdp_dispatcher\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+).*(xdp_pass\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local EXIT_REGEX="(xdp_pass\(\)@exit\[PASS\]: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+).*(xdp_dispatcher\(\)@exit\[PASS\]: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local PROG_ID_1=0 local PROG_ID_4=0 $XDP_LOADER load "$NS" "$TEST_PROG_DIR/xdp_pass.o" "$TEST_PROG_DIR/test_long_func_name.o" "$TEST_PROG_DIR/xdp_pass.o" PID=$(start_background "$XDPDUMP -D") RESULT=$(stop_background "$PID") PROG_ID_1=$(echo "$RESULT" | grep "$NS" -A4 | cut -c51-55 | sed -n 1p | tr -d ' ') PROG_ID_4=$(echo "$RESULT" | grep "$NS" -A4 | cut -c51-55 | sed -n 4p | tr -d ' ') PID=$(start_background "$XDPDUMP -i $NS -p xdp_dispatcher,xdp_pass@$PROG_ID_4 -vv") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if [[ $RESULT == *"Unrecognized arg#0 type PTR"* ]]; then $XDP_LOADER unload "$NS" --all return $SKIPPED_TEST fi if ! [[ $RESULT =~ $ENTRY_REGEX ]]; then print_result "Not received all fentry packets" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p xdp_dispatcher,xdp_pass@$PROG_ID_4 --rx-capture=exit") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $EXIT_REGEX ]]; then print_result "Not received all fexit packets" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p xdp_dispatcher,xdp_pass@$PROG_ID_4 --rx-capture=exit,entry") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $ENTRY_REGEX ]]; then print_result "Not received all fentry packets on entry/exit test" return 1 fi if ! [[ $RESULT =~ $EXIT_REGEX ]]; then print_result "Not received all fexit packets on entry/exit test" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p $PROG_ID_1,$PROG_ID_4 --rx-capture=exit,entry") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $ENTRY_REGEX ]]; then print_result "[IDs]Not received all fentry packets on entry/exit test" return 1 fi if ! [[ $RESULT =~ $EXIT_REGEX ]]; then print_result "[IDs]Not received all fexit packets on entry/exit test" return 1 fi PID=$(start_background "$XDPDUMP -i $NS -p xdp_dispatcher,$PROG_ID_4 --rx-capture=exit,entry") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if ! [[ $RESULT =~ $ENTRY_REGEX ]]; then print_result "[Mix]Not received all fentry packets on entry/exit test" return 1 fi if ! [[ $RESULT =~ $EXIT_REGEX ]]; then print_result "[Mix]Not received all fexit packets on entry/exit test" return 1 fi $XDP_LOADER unload "$NS" --all return 0 } test_xdp_load() { local PASS_REGEX="(xdpdump\(\)@entry: packet size 118 bytes on if_index [0-9]+, rx queue [0-9]+, id [0-9]+)" local WARN_MSG="Will load a capture only XDP program!" PID=$(start_background "$XDPDUMP -i $NS --load-xdp-program") $PING6 -W 2 -c 1 "$INSIDE_IP6" || return 1 RESULT=$(stop_background "$PID") if [[ "$RESULT" != *"$WARN_MSG"* ]]; then print_result "Missing warning message" return 1 fi if ! [[ $RESULT =~ $PASS_REGEX ]]; then print_result "IPv6 packet not received" return 1 fi } cleanup_tests() { $XDP_LOADER unload "$NS" --all >/dev/null 2>&1 } xdp-tools-1.5.4/xdp-dump/xdpdump.c0000644000175100001660000015577015003640462016417 0ustar runnerdocker/* SPDX-License-Identifier: GPL-2.0 */ /***************************************************************************** * Include files *****************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PCAP_DONT_INCLUDE_PCAP_BPF_H #include #include #include #include #include #include #include #include "logging.h" #include "params.h" #include "util.h" #include "xdpdump.h" #include "xpcapng.h" #include "compat.h" /***************************************************************************** * Local definitions and global variables *****************************************************************************/ #define PROG_NAME "xdpdump" #define DEFAULT_SNAP_LEN 262144 #ifndef ENOTSUPP #define ENOTSUPP 524 /* Operation is not supported */ #endif #define RX_FLAG_FENTRY (1<<0) #define RX_FLAG_FEXIT (1<<1) struct flag_val rx_capture_flags[] = { {"entry", RX_FLAG_FENTRY}, {"exit", RX_FLAG_FEXIT}, {} }; struct enum_val xdp_modes[] = { {"native", XDP_MODE_NATIVE}, {"skb", XDP_MODE_SKB}, {"hw", XDP_MODE_HW}, {"unspecified", XDP_MODE_UNSPEC}, {NULL, 0} }; static const struct dumpopt { bool hex_dump; bool list_interfaces; bool load_xdp; bool promiscuous; bool use_pcap; struct iface iface; uint32_t perf_wakeup; uint32_t snaplen; char *pcap_file; char *program_names; unsigned int load_xdp_mode; unsigned int rx_capture; } defaults_dumpopt = { .hex_dump = false, .list_interfaces = false, .load_xdp = false, .promiscuous = false, .use_pcap = false, .snaplen = DEFAULT_SNAP_LEN, .load_xdp_mode = XDP_MODE_NATIVE, .rx_capture = RX_FLAG_FENTRY, }; struct dumpopt cfg_dumpopt; static struct prog_option xdpdump_options[] = { DEFINE_OPTION("rx-capture", OPT_FLAGS, struct dumpopt, rx_capture, .metavar = "", .typearg = rx_capture_flags, .help = "Capture point for the rx direction"), DEFINE_OPTION("list-interfaces", OPT_BOOL, struct dumpopt, list_interfaces, .short_opt = 'D', .help = "Print the list of available interfaces"), DEFINE_OPTION("load-xdp-mode", OPT_ENUM, struct dumpopt, load_xdp_mode, .typearg = xdp_modes, .metavar = "", .help = "Mode used for --load-xdp-mode, default native"), DEFINE_OPTION("load-xdp-program", OPT_BOOL, struct dumpopt, load_xdp, .help = "Load XDP trace program if no XDP program is loaded"), DEFINE_OPTION("interface", OPT_IFNAME, struct dumpopt, iface, .short_opt = 'i', .metavar = "", .help = "Name of interface to capture on"), #ifdef HAVE_LIBBPF_PERF_BUFFER__CONSUME DEFINE_OPTION("perf-wakeup", OPT_U32, struct dumpopt, perf_wakeup, .metavar = "", .help = "Wake up xdpdump every packets"), #endif DEFINE_OPTION("program-names", OPT_STRING, struct dumpopt, program_names, .short_opt = 'p', .metavar = "", .help = "Specific program to attach to"), DEFINE_OPTION("promiscuous-mode", OPT_BOOL, struct dumpopt, promiscuous, .short_opt = 'P', .help = "Open interface in promiscuous mode"), DEFINE_OPTION("snapshot-length", OPT_U32, struct dumpopt, snaplen, .short_opt = 's', .metavar = "", .help = "Minimum bytes of packet to capture"), DEFINE_OPTION("use-pcap", OPT_BOOL, struct dumpopt, use_pcap, .help = "Use legacy pcap format for XDP traces"), DEFINE_OPTION("write", OPT_STRING, struct dumpopt, pcap_file, .short_opt = 'w', .metavar = "", .help = "Write raw packets to pcap file"), DEFINE_OPTION("hex", OPT_BOOL, struct dumpopt, hex_dump, .short_opt = 'x', .help = "Print the full packet in hex"), END_OPTIONS }; #define MAX_LOADED_XDP_PROGRAMS (MAX_DISPATCHER_ACTIONS + 1) struct capture_programs { /* Contains a list of programs to capture on, with the respective * program names. The order MUST be the same as the loaded order! */ unsigned int nr_of_progs; struct prog_info { struct xdp_program *prog; const char *func; unsigned int rx_capture; /* Fields used by the actual loader. */ bool attached; int perf_map_fd; struct bpf_object *prog_obj; struct bpf_link *fentry_link; struct bpf_link *fexit_link; } progs[MAX_LOADED_XDP_PROGRAMS]; }; struct perf_handler_ctx { uint64_t missed_events; uint64_t last_missed_events; uint64_t captured_packets; uint64_t epoch_delta; uint64_t packet_id; uint64_t cpu_packet_id[MAX_CPUS]; struct dumpopt *cfg; struct capture_programs *xdp_progs; pcap_t *pcap; pcap_dumper_t *pcap_dumper; struct xpcapng_dumper *pcapng_dumper; }; bool exit_xdpdump; pcap_t *exit_pcap; /***************************************************************************** * get_if_speed() *****************************************************************************/ static uint64_t get_if_speed(struct iface *iface) { #define MAX_MODE_MASKS 10 int fd; struct ifreq ifr; struct { struct ethtool_link_settings req; uint32_t modes[3 * MAX_MODE_MASKS]; } ereq; if (iface == NULL) return 0; /* Open socket, and initialize structures. */ fd = socket(AF_INET, SOCK_DGRAM, 0); if (fd < 0) return 0; memset(&ereq, 0, sizeof(ereq)); ereq.req.cmd = ETHTOOL_GLINKSETTINGS; memset(&ifr, 0, sizeof(ifr)); strncpy(ifr.ifr_name, iface->ifname, sizeof(ifr.ifr_name) - 1); ifr.ifr_data = (void *)&ereq; /* First query the kernel to see how many masks we need to ask for. */ if (ioctl(fd, SIOCETHTOOL, &ifr) != 0) goto error_exit; if (ereq.req.link_mode_masks_nwords >= 0 || ereq.req.link_mode_masks_nwords < -MAX_MODE_MASKS || ereq.req.cmd != ETHTOOL_GLINKSETTINGS) goto error_exit; /* Now ask for the data set, and extract the speed in bps. */ ereq.req.link_mode_masks_nwords = -ereq.req.link_mode_masks_nwords; if (ioctl(fd, SIOCETHTOOL, &ifr) != 0) goto error_exit; /* If speed is unknown return 0. */ if (ereq.req.speed == -1U) ereq.req.speed = 0; close(fd); return ereq.req.speed * 1000000ULL; error_exit: close(fd); return 0; } /***************************************************************************** * get_if_drv_info() *****************************************************************************/ static char *get_if_drv_info(struct iface *iface, char *buffer, size_t len) { int fd; char *r_buffer = NULL; struct ifreq ifr; struct ethtool_drvinfo info; if (iface == NULL || buffer == NULL || len == 0) return NULL; fd = socket(AF_INET, SOCK_DGRAM, 0); if (fd < 0) return NULL; memset(&info, 0, sizeof(info)); info.cmd = ETHTOOL_GDRVINFO; memset(&ifr, 0, sizeof(ifr)); strncpy(ifr.ifr_name, iface->ifname, sizeof(ifr.ifr_name) - 1); ifr.ifr_data = (void *)&info; if (ioctl(fd, SIOCETHTOOL, &ifr) != 0) goto exit; if (try_snprintf(buffer, len, "driver: \"%s\", version: \"%s\", " "fw-version: \"%s\", rom-version: \"%s\", " "bus-info: \"%s\"", info.driver, info.version, info.fw_version, info.erom_version, info.bus_info)) goto exit; r_buffer = buffer; exit: close(fd); return r_buffer; } /***************************************************************************** * set_if_promiscuous_mode() *****************************************************************************/ static int set_if_promiscuous_mode(struct iface *iface, bool enable, bool *did_enable) { int fd; int rc = 0; struct ifreq ifr; if (iface == NULL) return -EINVAL; fd = socket(AF_INET, SOCK_DGRAM, 0); if (fd < 0) return -errno; memset(&ifr, 0, sizeof(ifr)); strncpy(ifr.ifr_name, iface->ifname, sizeof(ifr.ifr_name) - 1); if (ioctl(fd, SIOCGIFFLAGS, &ifr) != 0) { pr_debug("DBG: Failed getting promiscuous mode: %s\n", strerror(errno)); rc = -errno; goto exit; } if (((ifr.ifr_flags & IFF_PROMISC) && enable) || (!(ifr.ifr_flags & IFF_PROMISC) && !enable)) { pr_debug("DBG: Promiscuous mode already %s!\n", enable ? "on" : "off"); goto exit; } if (enable) ifr.ifr_flags |= IFF_PROMISC; else ifr.ifr_flags &= ~IFF_PROMISC; if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0) { pr_debug("DBG: Failed setting promiscuous mode %s: %s\n", enable ? "on" : "off", strerror(errno)); rc = -errno; goto exit; } if (did_enable) { if (enable) *did_enable = true; else *did_enable = false; } exit: close(fd); return rc; } /***************************************************************************** * get_xdp_return_string() *****************************************************************************/ static const char *get_xdp_action_string(enum xdp_action act) { switch (act) { case XDP_ABORTED: return "[ABORTED]"; case XDP_DROP: return "[DROP]"; case XDP_PASS: return "[PASS]"; case XDP_TX: return "[TX]"; case XDP_REDIRECT: return "[REDIRECT]"; } return "[*unknown*]"; } /***************************************************************************** * get_capture_mode_string() *****************************************************************************/ static const char *get_capture_mode_string(unsigned int mode) { switch(mode) { case RX_FLAG_FENTRY: return "entry"; case RX_FLAG_FEXIT: return "exit"; case RX_FLAG_FENTRY | RX_FLAG_FEXIT: return "entry/exit"; } return "unknown"; } /***************************************************************************** * snprinth() *****************************************************************************/ #define SNPRINTH_MIN_BUFFER_SIZE sizeof("0xffff: 00 11 22 33 44 55 66 77 88" \ " 99 aa bb cc dd ee ff " \ "................0") static int snprinth(char *str, size_t size, const uint8_t *buffer, size_t buffer_size, size_t offset) { int i; int pre_skip; int post_skip; size_t zero_offset; if (str == NULL || size < SNPRINTH_MIN_BUFFER_SIZE || buffer == NULL || offset >= buffer_size || buffer_size > 0xffff) return -EINVAL; zero_offset = offset & ~0xf; pre_skip = offset & 0xf; post_skip = (zero_offset + 0xf) < buffer_size ? \ 0 : 16 - (buffer_size - zero_offset); /* Print offset */ snprintf(str, size, "0x%04zx: ", offset & 0xfff0); str += 9; /* Print hex values */ if (pre_skip) { memset(str, ' ', pre_skip * 3); str[pre_skip * 3] = 0; } for (i = pre_skip; i < 16 - post_skip; i++) { snprintf(str + (i * 3), 5, "%02x ", buffer[zero_offset + i]); } if (post_skip) { memset(str + (i * 3), ' ', post_skip * 3); str[(i * 3) + (post_skip * 3)] = 0; } /* Print printable chars */ str += 16 * 3; *str++ = ' '; if (pre_skip) { memset(str, ' ', pre_skip); str[pre_skip] = 0; } for (i = pre_skip; i < 16 - post_skip; i++) str[i] = isprint(buffer[zero_offset + i]) ? \ buffer[zero_offset + i]: '.'; str[i] = 0; return 0; } /***************************************************************************** * handle_perf_event() *****************************************************************************/ static enum bpf_perf_event_ret handle_perf_event(void *private_data, int cpu, struct perf_event_header *event) { uint64_t ts; bool fexit; unsigned int if_idx, prog_idx; const char *xdp_func; struct perf_handler_ctx *ctx = private_data; struct perf_sample_event *e = container_of(event, struct perf_sample_event, header); struct perf_lost_event *lost = container_of(event, struct perf_lost_event, header); switch(e->header.type) { case PERF_RECORD_SAMPLE: if (cpu >= MAX_CPUS || e->header.size < sizeof(struct perf_sample_event) || e->size < (sizeof(struct pkt_trace_metadata) + e->metadata.cap_len) || e->metadata.prog_index >= ctx->xdp_progs->nr_of_progs) return LIBBPF_PERF_EVENT_CONT; fexit = e->metadata.flags & MDF_DIRECTION_FEXIT; prog_idx = e->metadata.prog_index; if_idx = prog_idx * 2 + (fexit ? 1 : 0); xdp_func = ctx->xdp_progs->progs[prog_idx].func; if (prog_idx == 0 && (!fexit || ctx->xdp_progs->progs[prog_idx].rx_capture == RX_FLAG_FEXIT)) ctx->cpu_packet_id[cpu] = ++ctx->packet_id; ts = e->time + ctx->epoch_delta; if (ctx->pcapng_dumper) { struct xpcapng_epb_options_s options = {}; int64_t action = e->metadata.action; uint32_t queue = e->metadata.rx_queue; options.flags = PCAPNG_EPB_FLAG_INBOUND; options.dropcount = ctx->last_missed_events; options.packetid = &ctx->cpu_packet_id[cpu]; options.queue = &queue; options.xdp_verdict = fexit ? &action : NULL; xpcapng_dump_enhanced_pkt(ctx->pcapng_dumper, if_idx, e->packet, e->metadata.pkt_len, min(e->metadata.cap_len, ctx->cfg->snaplen), ts, &options); ctx->last_missed_events = 0; if (ctx->cfg->pcap_file[0] == '-' && ctx->cfg->pcap_file[1] == 0) xpcapng_dump_flush(ctx->pcapng_dumper); } else if (ctx->pcap_dumper) { struct pcap_pkthdr h; h.ts.tv_sec = ts / 1000000000ULL; h.ts.tv_usec = ts % 1000000000ULL / 1000; h.caplen = min(e->metadata.cap_len, ctx->cfg->snaplen); h.len = e->metadata.pkt_len; pcap_dump((u_char *) ctx->pcap_dumper, &h, e->packet); if (ctx->cfg->pcap_file[0] == '-' && ctx->cfg->pcap_file[1] == 0) pcap_dump_flush(ctx->pcap_dumper); } else { int i; char hline[SNPRINTH_MIN_BUFFER_SIZE]; if (ctx->cfg->hex_dump) { printf("%llu.%09lld: %s()@%s%s: packet size %u " "bytes, captured %u bytes on if_index " "%u, rx queue %u, id %"PRIu64"\n", ts / 1000000000ULL, ts % 1000000000ULL, xdp_func, fexit ? "exit" : "entry", fexit ? get_xdp_action_string( e->metadata.action) : "", e->metadata.pkt_len, e->metadata.cap_len, e->metadata.ifindex, e->metadata.rx_queue, ctx->cpu_packet_id[cpu]); for (i = 0; i < e->metadata.cap_len; i += 16) { snprinth(hline, sizeof(hline), e->packet, e->metadata.cap_len, i); printf(" %s\n", hline); } } else { printf("%llu.%09lld: %s()@%s%s: packet size %u " "bytes on if_index %u, rx queue %u, " "id %"PRIu64"\n", ts / 1000000000ULL, ts % 1000000000ULL, xdp_func, fexit ? "exit" : "entry", fexit ? get_xdp_action_string( e->metadata.action) : "", e->metadata.pkt_len,e->metadata.ifindex, e->metadata.rx_queue, ctx->cpu_packet_id[cpu]); } } ctx->captured_packets++; break; case PERF_RECORD_LOST: ctx->missed_events += lost->lost; ctx->last_missed_events += lost->lost; break; } return LIBBPF_PERF_EVENT_CONT; } /***************************************************************************** * get_epoch_to_uptime_delta() *****************************************************************************/ static int get_epoch_to_uptime_delta(uint64_t *delta) { /* This function will calculate the rough delta between uptime * seconds and the epoch time. This is not a precise delta as there is * a delay between calling the two functions below (and time() being in * seconds), but it's good enough to get a general offset. The delta * between packets is still based on the timestamps from the trace * infrastructure. */ struct timespec ts; uint64_t uptime; uint64_t epoch = time(NULL) * 1000000000ULL; if (clock_gettime(CLOCK_MONOTONIC, &ts)) { pr_warn("ERROR: Failed to get CLOCK_MONOTONIC time: %s(%d)", strerror(errno), errno); return -errno; } uptime = ts.tv_sec * 1000000000ULL + ts.tv_nsec; *delta = epoch - uptime; return 0; } /***************************************************************************** * capture_on_legacy_interface() *****************************************************************************/ static bool capture_on_legacy_interface(struct dumpopt *cfg) { bool rc = false; char errbuf[PCAP_ERRBUF_SIZE]; uint64_t captured_packets = 0; pcap_t *pcap = NULL; pcap_dumper_t *pcap_dumper = NULL; struct pcap_stat ps; /* Open pcap handle for live capture. */ if (cfg->rx_capture != RX_FLAG_FENTRY) { pr_warn("ERROR: For legacy capture only \"--rx-capture entry\"" " is supported!\n"); goto error_exit; } pcap = pcap_open_live(cfg->iface.ifname, cfg->snaplen, cfg->promiscuous, 1000, errbuf); if (pcap == NULL) { pr_warn("ERROR: Can't open pcap live interface: %s\n", errbuf); goto error_exit; } /* Open the pcap handle for pcap file. */ if (cfg->pcap_file) { pcap_dumper = pcap_dump_open(pcap, cfg->pcap_file); if (!pcap_dumper) { pr_warn("ERROR: Can't open pcap file for writing!\n"); goto error_exit; } } /* No more error conditions, display some capture information */ fprintf(stderr, "listening on %s, link-type %s (%s), " "capture size %d bytes\n", cfg->iface.ifname, pcap_datalink_val_to_name(pcap_datalink(pcap)), pcap_datalink_val_to_description(pcap_datalink(pcap)), cfg->snaplen); /* Loop for receive packets on live interface. */ exit_pcap = pcap; while (!exit_xdpdump) { const uint8_t *packet; struct pcap_pkthdr h; packet = pcap_next(pcap, &h); if (!packet) continue; if (pcap_dumper) { pcap_dump((u_char *) pcap_dumper, &h, packet); if (cfg->pcap_file[0] == '-' && cfg->pcap_file[1] == 0) pcap_dump_flush(pcap_dumper); } else { size_t i; char hline[SNPRINTH_MIN_BUFFER_SIZE]; if (cfg->hex_dump) { printf("%ld.%06ld: packet size %u bytes, " "captured %u bytes on if_name \"%s\"\n", (long) h.ts.tv_sec, (long) h.ts.tv_usec, h.len, h.caplen, cfg->iface.ifname); for (i = 0; i < h.caplen; i += 16) { snprinth(hline, sizeof(hline), packet, h.caplen, i); printf(" %s\n", hline); } } else { printf("%ld.%06ld: packet size %u bytes on " "if_name \"%s\"\n", (long) h.ts.tv_sec, (long) h.ts.tv_usec, h.len, cfg->iface.ifname); } } captured_packets++; } exit_pcap = NULL; rc = true; fprintf(stderr, "\n%"PRIu64" packets captured\n", captured_packets); if (pcap_stats(pcap, &ps) == 0) { fprintf(stderr, "%u packets dropped by kernel\n", ps.ps_drop); if (ps.ps_ifdrop != 0) fprintf(stderr, "%u packets dropped by interface\n", ps.ps_ifdrop); } error_exit: if (pcap_dumper) pcap_dump_close(pcap_dumper); if (pcap) pcap_close(pcap); return rc; } /***************************************************************************** * append_snprintf() *****************************************************************************/ int append_snprintf(char **buf, size_t *buf_len, size_t *offset, const char *format, ...) { int len; va_list args; if (buf == NULL || *buf == NULL || buf_len == NULL || *buf_len <= 0 || offset == NULL || *buf_len - *offset <= 0) return -EINVAL; while (true) { char *new_buf; size_t new_buf_len; va_start(args, format); len = vsnprintf(*buf + *offset, *buf_len - *offset, format, args); va_end(args); if ((size_t)len < (*buf_len - *offset)) { *offset += len; len = 0; break; } if (*buf_len >= 2048) return -ENOMEM; new_buf_len = *buf_len * 2; new_buf = realloc(*buf, new_buf_len); if (!new_buf) return -ENOMEM; *buf = new_buf; *buf_len = new_buf_len; } return len; } /***************************************************************************** * get_program_names_all() *****************************************************************************/ static char *get_program_names_all(struct capture_programs *progs, int skip_index) { char *program_names; size_t size = 128; size_t offset = 0; program_names = malloc(size); if (!program_names) return NULL; for (unsigned int i = 0; i < progs->nr_of_progs; i++) { const char *kname = xdp_program__name(progs->progs[i].prog); const char *fname = progs->progs[i].func; uint32_t id = xdp_program__id(progs->progs[i].prog); if (skip_index != (int)i) { if (append_snprintf(&program_names, &size, &offset, "%s%s@%d", i == 0 ? "" : ",", fname ? fname : kname, id) < 0) { free(program_names); return NULL; } } else { if (append_snprintf(&program_names, &size, &offset, "%s%s@%d", i == 0 ? "" : ",", "", id) < 0) { free(program_names); return NULL; } } } return program_names; } /***************************************************************************** * find_func_matches() *****************************************************************************/ static size_t find_func_matches(const struct btf *btf, const char *func_name, const char **found_name, bool print, int print_id, bool exact) { const struct btf_type *t, *match; size_t len, matches = 0; const char *name; int nr_types, i; if (!btf) { pr_debug("No BTF found for program\n"); return 0; } len = strlen(func_name); nr_types = btf__type_cnt(btf); for (i = 1; i < nr_types; i++) { t = btf__type_by_id(btf, i); if (!btf_is_func(t)) continue; name = btf__name_by_offset(btf, t->name_off); if (!strncmp(name, func_name, len)) { pr_debug("Found func %s matching %s\n", name, func_name); if (print) { if (print_id < 0) pr_warn(" %s\n", name); else pr_warn(" %s@%d\n", name, print_id); } /* Do an exact match if the user specified a function * name, or if there is no possibility of truncation * because the length is different from the truncated * length. */ if (strlen(name) == len && (exact || len != BPF_OBJ_NAME_LEN - 1)) { *found_name = name; return 1; /* exact match */ } /* prefix, may not be unique */ matches++; match = t; } } if (exact) return 0; if (matches == 1) *found_name = btf__name_by_offset(btf, match->name_off); return matches; } /***************************************************************************** * match_target_function() *****************************************************************************/ static int match_target_function(struct dumpopt *cfg, struct capture_programs *all_progs, char *prog_name, int prog_id) { int i; unsigned int matches = 0; for (i = 0; i < (int)all_progs->nr_of_progs; i++) { const char *kname = xdp_program__name(all_progs->progs[i].prog); if (prog_id != -1 && xdp_program__id(all_progs->progs[i].prog) != (uint32_t) prog_id) continue; if (!strncmp(kname, prog_name, strlen(kname))) { if (all_progs->progs[i].func == NULL) { if (find_func_matches(xdp_program__btf(all_progs->progs[i].prog), prog_name, &all_progs->progs[i].func, false, -1, true) == 1) { all_progs->progs[i].rx_capture = cfg->rx_capture; matches++; } else if (strlen(prog_name) <= BPF_OBJ_NAME_LEN - 1) { /* If the user cut and paste the * truncated function name, make sure * we tell him all the possible options! */ matches = UINT_MAX; break; } } else if (!strcmp(all_progs->progs[i].func, prog_name)) { all_progs->progs[i].rx_capture = cfg->rx_capture; matches++; } } if (prog_id != -1) break; } if (!matches) { if (prog_id == -1) pr_warn("ERROR: Can't find function '%s' on interface!\n", prog_name); else pr_warn("ERROR: Can't find function '%s' in interface program %d!\n", prog_name, prog_id); return -ENOENT; } else if (matches == 1) { return 0; } if (matches != UINT_MAX) { pr_warn("ERROR: The function '%s' exists in multiple programs!\n", prog_name); } else { if (prog_id == -1) pr_warn("ERROR: Can't identify the full XDP '%s' function!\n", prog_name); else pr_warn("ERROR: Can't identify the full XDP '%s' function in program %d!\n", prog_name, prog_id); } pr_warn("The following is a list of candidates:\n"); for (i = 0; i < (int)all_progs->nr_of_progs; i++) { uint32_t cur_prog_id = xdp_program__id(all_progs->progs[i].prog); const char *func_dummy; if (prog_id != -1 && cur_prog_id != (uint32_t) prog_id) continue; find_func_matches(xdp_program__btf(all_progs->progs[i].prog), xdp_program__name(all_progs->progs[i].prog), &func_dummy, true, (prog_id == -1 && matches == UINT_MAX) ? -1 : (int) cur_prog_id, false); if (prog_id != -1) break; } pr_warn("Please use the -p option to pick the correct one.\n"); if (!strcmp("all", cfg->program_names)) { char *program_names = get_program_names_all(all_progs, i); if (program_names) { pr_warn("Command line to replace 'all':\n %s\n", program_names); free(program_names); } } return -EAGAIN; } /***************************************************************************** * check_btf() *****************************************************************************/ static bool check_btf(struct xdp_program *prog) { if (xdp_program__btf(prog)) return true; pr_warn("ERROR: xdpdump requires BTF information, but that is missing " "from the loaded XDP program!\n"); return false; } /***************************************************************************** * find_target() * * What is this function trying to do? It will return a list of programs to * capture on, based on the configured program-names. If this parameter is * not given, it will attach to the first (main) program. * * Note that the kernel API will truncate function names at BPF_OBJ_NAME_LEN * so we need to guess the correct function if not explicitly given with * the program-names option. * *****************************************************************************/ static int find_target(struct dumpopt *cfg, struct xdp_multiprog *mp, struct capture_programs *tgt_progs) { const char *func; struct xdp_program *prog, *p; struct capture_programs progs; size_t matches; char *prog_name; char *prog_safe_ptr; char *program_names = cfg->program_names; prog = xdp_multiprog__main_prog(mp); if (!check_btf(prog)) return -EINVAL; /* First take care of the default case, i.e. no function supplied */ if (!program_names) { /* The libxdp code optimization where it skips the dispatcher * if only one program is loaded. If this is the case, we need * to attach to the actual first program, not the dispatcher. */ if (xdp_multiprog__program_count(mp) == 1) { prog = xdp_multiprog__next_prog(NULL, mp); if (!check_btf(prog)) return -EINVAL; } matches = find_func_matches(xdp_program__btf(prog), xdp_program__name(prog), &func, false, -1, false); if (!matches) { pr_warn("ERROR: Can't find function '%s' on interface!\n", xdp_program__name(prog)); return -ENOENT; } else if (matches == 1) { tgt_progs->nr_of_progs = 1; tgt_progs->progs[0].prog = prog; tgt_progs->progs[0].func = func; tgt_progs->progs[0].rx_capture = cfg->rx_capture; return 0; } pr_warn("ERROR: Can't identify the full XDP main function!\n" "The following is a list of candidates:\n"); find_func_matches(xdp_program__btf(prog), xdp_program__name(prog), &func, true, -1, false); pr_warn("Please use the -p option to pick the correct one.\n"); return -EAGAIN; } /* We end up here if we have a configured function(s), which can be * any function in one of the programs attached. In the case of * multiple programs we can even have duplicate functions amongst * programs and we need a way to differentiate. We do this by * supplying the @. See the -D output for the program IDs. * We also have the "all" keyword, which will specify that all * functions need to be traced. */ /* Fill in the all_prog data structure to make matching easier */ memset(&progs, 0, sizeof(progs)); progs.progs[progs.nr_of_progs].prog = prog; matches = find_func_matches(xdp_program__btf(prog), xdp_program__name(prog), &progs.progs[progs.nr_of_progs].func, false, -1, false); if (matches != 1) progs.progs[progs.nr_of_progs].func = NULL; progs.nr_of_progs++; for (p = xdp_multiprog__next_prog(NULL, mp); p; p = xdp_multiprog__next_prog(p, mp)) { progs.progs[progs.nr_of_progs].prog = p; matches = find_func_matches(xdp_program__btf(p), xdp_program__name(p), &progs.progs[progs.nr_of_progs].func, false, -1, false); if (matches != 1) progs.progs[progs.nr_of_progs].func = NULL; progs.nr_of_progs++; if (progs.nr_of_progs >= MAX_LOADED_XDP_PROGRAMS) break; } /* If "all" option is specified create temp program names */ if (!strcmp("all", program_names)) { program_names = get_program_names_all(&progs, -1); if (!program_names) { pr_warn("ERROR: Out of memory for 'all' programs!\n"); return -ENOMEM; } } /* Split up the --program-names and walk over it */ for (prog_name = strtok_r(program_names, ",", &prog_safe_ptr); prog_name != NULL; prog_name = strtok_r(NULL, ",", &prog_safe_ptr)) { int rc; unsigned long id = -1; char *id_str = strchr(prog_name, '@'); char *alloc_name = NULL; if (id_str) { unsigned int i; char *endptr; errno = 0; id_str++; id = strtoul(id_str, &endptr, 10); if ((errno == ERANGE && id == ULONG_MAX) || (errno != 0 && id == 0) || *endptr != '\0' || endptr == id_str) { pr_warn("ERROR: Can't extract valid program id from \"%s\"!\n", prog_name); if (cfg->program_names != program_names) free(program_names); return -EINVAL; } for (i = 0; i < progs.nr_of_progs; i++) { if (id == xdp_program__id(progs.progs[i].prog)) break; } if (i >= progs.nr_of_progs) { pr_warn("ERROR: Invalid program id supplied, \"%s\"!\n", prog_name); if (cfg->program_names != program_names) free(program_names); return -EINVAL; } alloc_name = strndup(prog_name, id_str - prog_name - 1); if (!alloc_name) { pr_warn("ERROR: Out of memory while processing program-name argument!\n"); if (cfg->program_names != program_names) free(program_names); return -ENOMEM; } prog_name = alloc_name; } else { /* If no @id was specified, verify if the program name * was not a program_id. If so, locate the name and * use it in the lookup below. */ char *endptr; unsigned long prog_id; prog_id = strtoul(prog_name, &endptr, 10); if (!((errno == ERANGE && prog_id == ULONG_MAX) || (errno != 0 && prog_id == 0) || *endptr != '\0' || endptr == prog_name)) { for (unsigned int i = 0; i < progs.nr_of_progs; i++) { if (prog_id == xdp_program__id(progs.progs[i].prog)) { alloc_name = strdup(progs.progs[i].func); if (alloc_name) { id = prog_id; prog_name = alloc_name; } break; } } } } rc = match_target_function(cfg, &progs, prog_name, id); free(alloc_name); if (rc < 0) { if (cfg->program_names != program_names) free(program_names); return rc; } } #if 0 /* Removed this optimization for now as it will save one packet when * three programs are loaded, two for four, etc. In addition, it will * make the packet flow looks a bit weird, without it's more clear * which programs the dispatcher has executed. */ if (cfg->rx_capture == (RX_FLAG_FENTRY | RX_FLAG_FEXIT)) { /* If we do entry and exit captures we can remove fentry from * back to back programs to skip storing an identical packet. * We keep fexit due to the reported return code. * * First program is the dispatches (which should not modify * the packet, but we can't be sure). So we skip this and the * first sub-programs fexit). */ for (int i = 2; i < progs.nr_of_progs; i++) if (progs.progs[i-1].rx_capture & RX_FLAG_FENTRY) progs.progs[i].rx_capture &= ~RX_FLAG_FENTRY; } #endif if (cfg->program_names != program_names) free(program_names); /* Copy all the programs that need capture actions */ memset(tgt_progs, 0, sizeof(*tgt_progs)); for (unsigned int i = 0; i < progs.nr_of_progs; i++) { if (!progs.progs[i].rx_capture) continue; tgt_progs->progs[tgt_progs->nr_of_progs].prog = progs.progs[i].prog; tgt_progs->progs[tgt_progs->nr_of_progs].func = progs.progs[i].func; tgt_progs->progs[tgt_progs->nr_of_progs].rx_capture = progs.progs[i].rx_capture; tgt_progs->nr_of_progs++; } return 0; } /***************************************************************************** * get_loaded_program_info() *****************************************************************************/ static char *get_loaded_program_info(struct dumpopt *cfg) { char *info; size_t info_size = 128; size_t info_offset = 0; struct xdp_multiprog *mp = NULL; info = malloc(info_size); if (!info) return NULL; if (append_snprintf(&info, &info_size, &info_offset, "Capture was taken on interface %s, with the " "following XDP programs loaded:\n", cfg->iface.ifname) < 0) goto error_out; mp = xdp_multiprog__get_from_ifindex(cfg->iface.ifindex); if (IS_ERR_OR_NULL(mp)) { if (append_snprintf(&info, &info_size, &info_offset, " %s()\n", "")) goto error_out; } else { struct xdp_program *prog = NULL; if (append_snprintf(&info, &info_size, &info_offset, " %s()\n", xdp_program__name( xdp_multiprog__main_prog(mp))) < 0) goto error_out; while ((prog = xdp_multiprog__next_prog(prog, mp))) { if (append_snprintf(&info, &info_size, &info_offset, " %s()\n", xdp_program__name(prog)) < 0) goto error_out; } xdp_multiprog__close(mp); } return info; error_out: xdp_multiprog__close(mp); free(info); return NULL; } /***************************************************************************** * add_interfaces_to_pcapng() *****************************************************************************/ static bool add_interfaces_to_pcapng(struct dumpopt *cfg, struct xpcapng_dumper *pcapng_dumper, struct capture_programs *progs) { uint64_t if_speed; char if_drv[260]; if_speed = get_if_speed(&cfg->iface); if_drv[0] = 0; get_if_drv_info(&cfg->iface, if_drv, sizeof(if_drv)); for (unsigned int i = 0; i < progs->nr_of_progs; i++) { char if_name[128]; if (try_snprintf(if_name, sizeof(if_name), "%s:%s()@fentry", cfg->iface.ifname, progs->progs[i].func)) { pr_warn("ERROR: Could not format interface name, %s:%s()@fentry!\n", cfg->iface.ifname, progs->progs[i].func); return false; } if (xpcapng_dump_add_interface(pcapng_dumper, cfg->snaplen, if_name, NULL, NULL, if_speed, 9 /* nsec resolution */, if_drv) < 0) { pr_warn("ERROR: Can't add %s interface to PcapNG file!\n", if_name); return false; } if (try_snprintf(if_name, sizeof(if_name), "%s:%s()@fexit", cfg->iface.ifname, progs->progs[i].func)) { pr_warn("ERROR: Could not format interface name, %s:%s()@fexit!\n", cfg->iface.ifname, progs->progs[i].func); return false; } if (xpcapng_dump_add_interface(pcapng_dumper, cfg->snaplen, if_name, NULL, NULL, if_speed, 9 /* nsec resolution */, if_drv) < 0) { pr_warn("ERROR: Can't add %s interface to PcapNG file!\n", if_name); return false; } } return true; } static void print_compat_error(const char *what) { #if defined(__x86_64__) || defined(__i686__) pr_warn("ERROR: The kernel does not support " "fentry %s because it is too old!", what); #else pr_warn("ERROR: The kernel does not support " "fentry %s on the current CPU architecture!", what); #endif } /***************************************************************************** * load_and_attach_trace() *****************************************************************************/ static bool load_and_attach_trace(struct dumpopt *cfg, struct capture_programs *progs, unsigned int idx) { int err; struct bpf_object *trace_obj = NULL; struct bpf_program *trace_prog_fentry; struct bpf_program *trace_prog_fexit; struct bpf_link *trace_link_fentry = NULL; struct bpf_link *trace_link_fexit = NULL; struct bpf_map *perf_map; struct bpf_map *data_map; struct trace_configuration trace_cfg; if (idx >= progs->nr_of_progs || progs->nr_of_progs == 0) { pr_warn("ERROR: Attach program ID invalid!\n"); return false; } progs->progs[idx].attached = false; if (progs->progs[idx].rx_capture == 0) { pr_warn("ERROR: No RX capture mode to attach to!\n"); return false; } silence_libbpf_logging(); rlimit_loop: /* Load the trace program object */ trace_obj = open_bpf_file("xdpdump_bpf.o", NULL); err = libbpf_get_error(trace_obj); if (err) { pr_warn("ERROR: Can't open XDP trace program: %s(%d)\n", strerror(-err), err); trace_obj = NULL; goto error_exit; } /* Set the ifIndex in the DATA map */ data_map = bpf_object__find_map_by_name(trace_obj, "xdpdump_.data"); if (!data_map) { pr_warn("ERROR: Can't find the .data MAP in the trace " "program!\n"); goto error_exit; } if (bpf_map__value_size(data_map) != sizeof(trace_cfg)) { pr_warn("ERROR: Can't find the correct sized .data MAP in the " "trace program!\n"); goto error_exit; } trace_cfg.capture_if_ifindex = cfg->iface.ifindex; trace_cfg.capture_snaplen = cfg->snaplen; trace_cfg.capture_prog_index = idx; if (bpf_map__set_initial_value(data_map, &trace_cfg, sizeof(trace_cfg))) { pr_warn("ERROR: Can't set initial .data MAP in the trace " "program!\n"); goto error_exit; } /* Locate the fentry and fexit functions */ trace_prog_fentry = bpf_object__find_program_by_name(trace_obj, "trace_on_entry"); if (!trace_prog_fentry) { pr_warn("ERROR: Can't find XDP trace fentry function!\n"); goto error_exit; } trace_prog_fexit = bpf_object__find_program_by_name(trace_obj, "trace_on_exit"); if (!trace_prog_fexit) { pr_warn("ERROR: Can't find XDP trace fexit function!\n"); goto error_exit; } /* Before we can load the object in memory we need to set the attach * point to our function. */ bpf_program__set_expected_attach_type(trace_prog_fentry, BPF_TRACE_FENTRY); bpf_program__set_expected_attach_type(trace_prog_fexit, BPF_TRACE_FEXIT); bpf_program__set_attach_target(trace_prog_fentry, xdp_program__fd(progs->progs[idx].prog), progs->progs[idx].func); bpf_program__set_attach_target(trace_prog_fexit, xdp_program__fd(progs->progs[idx].prog), progs->progs[idx].func); /* Reuse the xdpdump_perf_map for all programs */ perf_map = bpf_object__find_map_by_name(trace_obj, "xdpdump_perf_map"); if (!perf_map) { pr_warn("ERROR: Can't find xdpdump_perf_map in trace program!\n"); goto error_exit; } if (idx != 0) { err = bpf_map__reuse_fd(perf_map, progs->progs[0].perf_map_fd); if (err) { pr_warn("ERROR: Can't reuse xdpdump_perf_map: %s\n", strerror(-err)); goto error_exit; } } /* Load the bpf object into memory */ err = bpf_object__load(trace_obj); if (err) { if (err == -EPERM && !double_rlimit()) { bpf_object__close(trace_obj); goto rlimit_loop; } else if (err == -E2BIG) { print_compat_error("function load"); } else { char err_msg[STRERR_BUFSIZE]; libbpf_strerror(err, err_msg, sizeof(err_msg)); pr_warn("ERROR: Can't load eBPF object: %s(%d)\n", err_msg, err); } goto error_exit; } /* Attach trace programs only in the direction(s) needed */ if (progs->progs[idx].rx_capture & RX_FLAG_FENTRY) { trace_link_fentry = bpf_program__attach_trace(trace_prog_fentry); err = libbpf_get_error(trace_link_fentry); if (err) { if (err == -ENOTSUPP) print_compat_error("function attach"); else pr_warn("ERROR: Can't attach XDP trace fentry " "function: %s\n", strerror(-err)); goto error_exit; } } if (progs->progs[idx].rx_capture & RX_FLAG_FEXIT) { trace_link_fexit = bpf_program__attach_trace(trace_prog_fexit); err = libbpf_get_error(trace_link_fexit); if (err) { pr_warn("ERROR: Can't attach XDP trace fexit function: %s\n", strerror(-err)); goto error_exit; } } /* Figure out the fd for the BPF_MAP_TYPE_PERF_EVENT_ARRAY trace map. */ if (idx == 0) { progs->progs[idx].perf_map_fd = bpf_map__fd(perf_map); if (progs->progs[idx].perf_map_fd < 0) { pr_warn("ERROR: Can't get xdpdump_perf_map file descriptor: %s\n", strerror(errno)); return false; } } else { progs->progs[idx].perf_map_fd = progs->progs[0].perf_map_fd; } progs->progs[idx].attached = true; progs->progs[idx].fentry_link = trace_link_fentry; progs->progs[idx].fexit_link = trace_link_fexit; progs->progs[idx].prog_obj = trace_obj; return true; error_exit: bpf_link__destroy(trace_link_fentry); bpf_link__destroy(trace_link_fexit); bpf_object__close(trace_obj); return false; } /***************************************************************************** * load_and_attach_traces() *****************************************************************************/ static bool load_and_attach_traces(struct dumpopt *cfg, struct capture_programs *progs) { for (unsigned int i = 0; i < progs->nr_of_progs; i++) if (!load_and_attach_trace(cfg, progs, i)) return false; return true; } /***************************************************************************** * detach_trace() *****************************************************************************/ static void detach_trace(struct capture_programs *progs, unsigned int idx) { if (idx >= progs->nr_of_progs || progs->nr_of_progs == 0 || !progs->progs[idx].attached) return; bpf_link__destroy(progs->progs[idx].fentry_link); bpf_link__destroy(progs->progs[idx].fexit_link); bpf_object__close(progs->progs[idx].prog_obj); progs->progs[idx].attached = false; } /***************************************************************************** * detach_traces() *****************************************************************************/ static void detach_traces(struct capture_programs *progs) { for (unsigned int i = 0; i < progs->nr_of_progs; i++) detach_trace(progs, i); } /***************************************************************************** * load_xdp_trace_program() *****************************************************************************/ static bool load_xdp_trace_program(struct dumpopt *cfg, struct capture_programs *progs) { DECLARE_LIBXDP_OPTS(xdp_program_opts, opts, 0); int fd, rc; char errmsg[STRERR_BUFSIZE]; struct xdp_program *prog; struct bpf_map *perf_map; struct bpf_map *data_map; struct trace_configuration trace_cfg; if (!cfg || !progs) return false; silence_libbpf_logging(); silence_libxdp_logging(); opts.find_filename = "xdpdump_xdp.o"; opts.prog_name = "xdpdump"; prog = xdp_program__create(&opts); if (libxdp_get_error(prog)) { int err = libxdp_get_error(prog); libxdp_strerror(err, errmsg, sizeof(errmsg)); pr_warn("ERROR: Can't open XDP trace program: %s(%d)\n", errmsg, err); return false; } perf_map = bpf_object__find_map_by_name(xdp_program__bpf_obj(prog), "xdpdump_perf_map"); if (!perf_map) { pr_warn("ERROR: Can't find xdpdump_perf_map in the xdp program!\n"); goto error_exit; } /* Set the trace configuration in the DATA map */ data_map = bpf_object__find_map_by_name(xdp_program__bpf_obj(prog), "xdpdump_.data"); if (!data_map) { pr_warn("ERROR: Can't find the .data MAP in the xdp program!\n"); goto error_exit; } if (bpf_map__value_size(data_map) != sizeof(trace_cfg)) { pr_warn("ERROR: Can't find the correct sized .data MAP in the xdp program!\n"); goto error_exit; } trace_cfg.capture_if_ifindex = cfg->iface.ifindex; trace_cfg.capture_snaplen = cfg->snaplen; trace_cfg.capture_prog_index = 0; if (bpf_map__set_initial_value(data_map, &trace_cfg, sizeof(trace_cfg))) { pr_warn("ERROR: Can't set initial .data MAP in the xdp program!\n"); goto error_exit; } do { rc = xdp_program__attach(prog, cfg->iface.ifindex, cfg->load_xdp_mode, 0); } while (rc == -EPERM && !double_rlimit()); if (rc) { libxdp_strerror(rc, errmsg, sizeof(errmsg)); pr_warn("ERROR: Can't attach XDP trace program: %s(%d)\n", errmsg, rc); goto error_exit; } fd = bpf_map__fd(perf_map); if (fd < 0) { pr_warn("ERROR: Can't get xdpdump_perf_map file descriptor: %s\n", strerror(errno)); xdp_program__detach(prog, cfg->iface.ifindex, cfg->load_xdp_mode, 0); goto error_exit; } progs->progs[0].prog = prog; progs->progs[0].func = xdp_program__name(prog); progs->progs[0].rx_capture = RX_FLAG_FENTRY; progs->progs[0].perf_map_fd = fd; progs->nr_of_progs = 1; return true; error_exit: xdp_program__close(prog); return false; } /***************************************************************************** * unload_xdp_trace_program() *****************************************************************************/ static void unload_xdp_trace_program(struct dumpopt *cfg, struct capture_programs *progs) { if (!progs || progs->nr_of_progs != 1) return; xdp_program__detach(progs->progs[0].prog, cfg->iface.ifindex, cfg->load_xdp_mode, 0); xdp_program__close(progs->progs[0].prog); progs->progs[0].prog = NULL; progs->nr_of_progs = 0; } /***************************************************************************** * capture_on_interface() *****************************************************************************/ static bool capture_on_interface(struct dumpopt *cfg) { int err, cnt; bool rc = false; bool load_xdp = false; bool promiscuous = false; pcap_t *pcap = NULL; pcap_dumper_t *pcap_dumper = NULL; struct xpcapng_dumper *pcapng_dumper = NULL; struct perf_buffer *perf_buf = NULL; struct perf_event_attr perf_attr = { .sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME, .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_BPF_OUTPUT, .sample_period = 1, .wakeup_events = 1, }; struct perf_handler_ctx perf_ctx; struct xdp_multiprog *mp; struct capture_programs tgt_progs = {}; mp = xdp_multiprog__get_from_ifindex(cfg->iface.ifindex); if (IS_ERR_OR_NULL(mp) || xdp_multiprog__main_prog(mp) == NULL) { if (!cfg->load_xdp) { pr_warn("WARNING: Specified interface does not have an XDP program loaded%s," "\n capturing in legacy mode!\n", IS_ERR_OR_NULL(mp) ? "" : " in software"); xdp_multiprog__close(mp); return capture_on_legacy_interface(cfg); } pr_warn("WARNING: Specified interface does not have an XDP program loaded%s!\n" " Will load a capture only XDP program!\n", IS_ERR_OR_NULL(mp) ? "" : " in software"); load_xdp = true; } if (!load_xdp) { if (find_target(cfg, mp, &tgt_progs)) goto error_exit; if (tgt_progs.nr_of_progs == 0) { pr_warn("ERROR: Failed finding any attached XDP program!\n"); goto error_exit; } } /* Enable promiscuous mode if requested. */ if (cfg->promiscuous) { err = set_if_promiscuous_mode(&cfg->iface, true, &cfg->promiscuous); if (err) { pr_warn("ERROR: Failed setting promiscuous mode: %s(%d)\n", strerror(-err), -err); goto error_exit; } promiscuous = true; } /* Load and attach programs */ if (!load_xdp) { if (!load_and_attach_traces(cfg, &tgt_progs)) { /* Actual errors are reported in the above function. */ goto error_exit; } } else { if (!load_xdp_trace_program(cfg, &tgt_progs)) { /* Actual errors are reported in the above function. */ goto error_exit; } } /* Open the pcap handle */ if (cfg->pcap_file) { if (cfg->use_pcap) { pcap = pcap_open_dead(DLT_EN10MB, cfg->snaplen); if (!pcap) { pr_warn("ERROR: Can't open pcap dead handler!\n"); goto error_exit; } pcap_dumper = pcap_dump_open(pcap, cfg->pcap_file); if (!pcap_dumper) { pr_warn("ERROR: Can't open pcap file for writing!\n"); goto error_exit; } } else { char *program_info; struct utsname utinfo; char os_info[260]; memset(&utinfo, 0, sizeof(utinfo)); uname(&utinfo); os_info[0] = 0; if (try_snprintf(os_info, sizeof(os_info), "%s %s %s %s", utinfo.sysname, utinfo.nodename, utinfo.release, utinfo.version)) { pr_warn("ERROR: Could not format OS information!\n"); goto error_exit; } program_info = get_loaded_program_info(cfg); if (!program_info) { pr_warn("ERROR: Could not format program information!\n"); goto error_exit; } pcapng_dumper = xpcapng_dump_open(cfg->pcap_file, program_info, utinfo.machine, os_info, "xdpdump v" TOOLS_VERSION); free(program_info); if (!pcapng_dumper) { pr_warn("ERROR: Can't open PcapNG file for writing!\n"); goto error_exit; } if (!add_interfaces_to_pcapng(cfg, pcapng_dumper, &tgt_progs)) { /* Error output is handled in * add_interfaces_to_pcapng() */ goto error_exit; } } } /* No more error conditions, display some capture information */ fprintf(stderr, "listening on %s, ingress XDP program ", cfg->iface.ifname); for (unsigned int i = 0; i < tgt_progs.nr_of_progs; i++) fprintf(stderr, "ID %u func %s, ", xdp_program__id(tgt_progs.progs[i].prog), tgt_progs.progs[i].func); fprintf(stderr, "capture mode %s, capture size %d bytes\n", get_capture_mode_string(tgt_progs.progs[0].rx_capture), cfg->snaplen); /* Setup perf context */ memset(&perf_ctx, 0, sizeof(perf_ctx)); perf_ctx.cfg = cfg; perf_ctx.xdp_progs = &tgt_progs; perf_ctx.pcap = pcap; perf_ctx.pcap_dumper = pcap_dumper; perf_ctx.pcapng_dumper = pcapng_dumper; if (get_epoch_to_uptime_delta(&perf_ctx.epoch_delta)) goto error_exit; /* Determine the perf wakeup_events value to use */ #ifdef HAVE_LIBBPF_PERF_BUFFER__CONSUME if (cfg->pcap_file) { if (cfg->pcap_file[0] == '-' && cfg->pcap_file[1] == 0) { /* If we pipe trough stdio we do not want to buffer * any packets in the perf ring. */ perf_attr.wakeup_events = 1; } else { /* * If no specific wakeup value is specified assume * an average packet size of 2K we would like to * fill without losing any packets. */ uint32_t events = PERF_MMAP_PAGE_COUNT * getpagesize() / (libbpf_num_possible_cpus() ?: 1) / 2048; if (events > 0) perf_attr.wakeup_events = min(PERF_MAX_WAKEUP_EVENTS, events); } } else { /* Only buffer in perf ring when using pcap_file */ perf_attr.wakeup_events = 1; } /* Cmdline option --perf-wakeup can override buffering levels */ if (cfg->perf_wakeup) perf_attr.wakeup_events = cfg->perf_wakeup; #endif pr_debug("perf-wakeup value uses is %u\n", perf_attr.wakeup_events); #ifdef HAVE_LIBBPF_PERF_BUFFER__NEW_RAW /* the configure check looks for the 6-argument variant of the function */ perf_buf = perf_buffer__new_raw(tgt_progs.progs[0].perf_map_fd, PERF_MMAP_PAGE_COUNT, &perf_attr, handle_perf_event, &perf_ctx, NULL); #else struct perf_buffer_raw_opts perf_opts = {}; /* Setup perf ring buffers */ perf_opts.attr = &perf_attr; perf_opts.event_cb = handle_perf_event; perf_opts.ctx = &perf_ctx; perf_buf = perf_buffer__new_raw(tgt_progs.progs[0].perf_map_fd, PERF_MMAP_PAGE_COUNT, &perf_opts); #endif if (perf_buf == NULL) { pr_warn("ERROR: Failed to allocate raw perf buffer: %s(%d)", strerror(errno), errno); goto error_exit; } /* Loop trough the dumper */ while (!exit_xdpdump) { cnt = perf_buffer__poll(perf_buf, 1000); if (cnt < 0 && errno != EINTR) { pr_warn("ERROR: Perf buffer polling failed: %s(%d)", strerror(errno), errno); goto error_exit; } } #ifdef HAVE_LIBBPF_PERF_BUFFER__CONSUME perf_buffer__consume(perf_buf); #endif fprintf(stderr, "\n%"PRIu64" packets captured\n", perf_ctx.captured_packets); fprintf(stderr, "%"PRIu64" packets dropped by perf ring\n", perf_ctx.missed_events); rc = true; error_exit: /* Cleanup all our resources */ if (promiscuous && cfg->promiscuous) { err = set_if_promiscuous_mode(&cfg->iface, false, NULL); if (err) pr_warn("ERROR: Failed disabling promiscuous mode: " "%s(%d)\n", strerror(-err), -err); } perf_buffer__free(perf_buf); xpcapng_dump_close(pcapng_dumper); if (pcap_dumper) pcap_dump_close(pcap_dumper); if (pcap) pcap_close(pcap); if (load_xdp) unload_xdp_trace_program(cfg, &tgt_progs); else detach_traces(&tgt_progs); xdp_multiprog__close(mp); return rc; } /***************************************************************************** * signal_handler() *****************************************************************************/ static void signal_handler(__unused int signo) { exit_xdpdump = true; if (exit_pcap) pcap_breakloop(exit_pcap); } /***************************************************************************** * main() *****************************************************************************/ int main(int argc, char **argv) { if (parse_cmdline_args(argc, argv, xdpdump_options, &cfg_dumpopt, sizeof(cfg_dumpopt), PROG_NAME, PROG_NAME, "XDPDump tool to dump network traffic", &defaults_dumpopt) != 0) return EXIT_FAILURE; /* If all the options are parsed ok, make sure we are root! */ if (check_bpf_environ()) return EXIT_FAILURE; if (cfg_dumpopt.snaplen == 0) cfg_dumpopt.snaplen = DEFAULT_SNAP_LEN; if (cfg_dumpopt.rx_capture == 0) cfg_dumpopt.rx_capture = RX_FLAG_FENTRY; /* See if we need to dump interfaces and exit */ if (cfg_dumpopt.list_interfaces) { if (iface_print_status(NULL)) return EXIT_SUCCESS; return EXIT_FAILURE; } /* Check if the system does not have more cores than we assume. */ if (sysconf(_SC_NPROCESSORS_CONF) > MAX_CPUS) { pr_warn("ERROR: System has more cores (%ld) than maximum " "supported (%d)!\n", sysconf(_SC_NPROCESSORS_CONF), MAX_CPUS); return EXIT_FAILURE; } /* From here on we assume we need to capture data on an interface */ if (signal(SIGINT, signal_handler) == SIG_ERR || signal(SIGHUP, signal_handler) == SIG_ERR || signal(SIGTERM, signal_handler) == SIG_ERR) { pr_warn("ERROR: Failed assigning signal handler: %s\n", strerror(errno)); return EXIT_FAILURE; } if (cfg_dumpopt.iface.ifname == NULL) { pr_warn("ERROR: You must specific an interface to capture on!\n"); return EXIT_FAILURE; } if (!capture_on_interface(&cfg_dumpopt)) return EXIT_FAILURE; return EXIT_SUCCESS; } xdp-tools-1.5.4/xdp-dump/xdpdump.h0000644000175100001660000000362415003640462016412 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /****************************************************************************** * Multiple include protection ******************************************************************************/ #ifndef _XDPDUMP_H_ #define _XDPDUMP_H_ /****************************************************************************** * General definitions ******************************************************************************/ #define PERF_MAX_WAKEUP_EVENTS 64 #define PERF_MMAP_PAGE_COUNT 256 #define MAX_CPUS 512 /****************************************************************************** * General used macros ******************************************************************************/ #ifndef __packed #define __packed __attribute__((packed)) #endif /***************************************************************************** * trace configuration structure *****************************************************************************/ struct trace_configuration { __u32 capture_if_ifindex; __u32 capture_snaplen; __u32 capture_prog_index; }; /***************************************************************************** * perf data structures *****************************************************************************/ #define MDF_DIRECTION_FEXIT 1 struct pkt_trace_metadata { __u32 ifindex; __u32 rx_queue; __u16 pkt_len; __u16 cap_len; __u16 flags; __u16 prog_index; int action; } __packed; #ifndef __bpf__ struct perf_sample_event { struct perf_event_header header; __u64 time; __u32 size; struct pkt_trace_metadata metadata; unsigned char packet[]; }; struct perf_lost_event { struct perf_event_header header; __u64 id; __u64 lost; }; #endif /****************************************************************************** * End-of include file ******************************************************************************/ #endif /* _XDPDUMP_H_ */ xdp-tools-1.5.4/xdp-dump/xdpdump.80000644000175100001660000002533115003640462016331 0ustar runnerdocker.TH "xdpdump" "8" "JANUARY 13, 2021" "V1.5.4" "a simple tcpdump like tool for capturing packets at the XDP layer" .SH "NAME" xdpdump \- a simple tcpdump like tool for capturing packets at the XDP layer .SH "SYNOPSIS" .PP \fIxdpdump\fP is a simple XDP packet capture tool that tries to behave similar to \fItcpdump\fP, however, it has no packet filter or decode capabilities. .PP This can be used for debugging XDP programs that are already loaded on an interface. Packets can be dumped/inspected before on \fBentry\fP to XDP program, or after at \fBexit\fP from an XDP program. Furthermore, at \fBexit\fP the XDP action is also captured. This means that even packets that are dropped at the XDP layer can be captured via this tool. .PP \fIxdpdump\fP works by attaching a bpf trace program to the XDP entry and/or exit function which stores the raw packet in a perf trace buffer. If no XDP program is loaded this approach can not be used and the tool will use a libpcap live-capture to be backward compatible. .SS "Running xdpdump" .PP The syntax for running \fIxdpdump\fP is: .RS .nf \fCUsage: xdpdump [options] XDPDump tool to dump network traffic Options: --rx-capture Capture point for the rx direction (valid values: entry,exit) -D, --list-interfaces Print the list of available interfaces -i, --interface Name of interface to capture on --perf-wakeup Wake up xdpdump every packets -p, --program-names Specific program to attach to -s, --snapshot-length Minimum bytes of packet to capture --use-pcap Use legacy pcap format for XDP traces -w, --write Write raw packets to pcap file -x, --hex Print the full packet in hex -v, --verbose Enable verbose logging (-vv: more verbose) --version Display version information -h, --help Show this help \fP .fi .RE .SH "The options explained" .PP The \fIxdpdump\fP tool tries to mimic the basic \fItcpdump\fP options, but just in case below each of the available options is explained: .SS "--rx-capture " .PP Specify where the ingress packet gets captured. Either at the entry of the XDP program and/or exit of the XDP program. Valid options are \fBentry\fP, \fBexit\fP, or both \fBentry,exit\fP. The packet at \fBexit\fP can be modified by the XDP program. If you are interested to see both the original and modified packet, use the \fBentry,exit\fP option. With this, each packet is captured twice. The default value for this is \fBentry\fP. .SS "-D, --list-interfaces" .PP Display a list of available interfaces and any XDP program loaded .SS "--load-xdp-mode" .PP Specifies which loader mode to use with the \fI\-\-load\-xdp\-program\fP option. The valid values are ‘native’, which is the default in-driver XDP mode, ‘skb’, which causes the so-called skb mode (also known as generic XDP) to be used, ‘hw’ which causes the program to be offloaded to the hardware, or ‘unspecified’ which leaves it up to the kernel to pick a mode (which it will do by picking native mode if the driver supports it, or generic mode otherwise). Note that using ‘unspecified’ can make it difficult to predict what mode a program will end up being loaded in. For this reason, the default is ‘native’. .SS "--load-xdp-program" .PP If no XDP program is loaded on the interface, by default, xdpdump will fallback to libpcap's live capture mode to capture the packets. Alternatively, with this option, you can ask xdpdump to load an XDP program to capture the packets directly. .SS "-i, --interface " .PP Listen on interface \fIifname\fP. Note that if no XDP program is loaded on the interface it will use libpcap's live capture mode to capture the packets. .SS "--perf-wakeup " .PP Let the Kernel wake up \fIxdpdump\fP once for every \fI\fP being posted in the perf ring buffer. The higher the number the less the impact is on the actual XDP program. The default value is 0, which automatically calculates the value based on the available CPUs/buffers. Use -v to see the actual used value. .SS "-p, --program-names [|all]" .PP This option allows you to capture packets for a specific, set of, or all XDP programs loaded on the interface. You can either specify the actual program names or program IDs separated by commas. In the case where multiple programs are attached with the same name, you should use the program ID. Use the -D option to see the loaded programs and their IDs. .PP In addition, the Linux API does not provide the full name of the attached eBPF entry function if it's longer than 15 characters. xdpdump will try to guess the correct function name from the available BTF debug information. However, if multiple functions exist with the same leading name, it can not pick the correct one. It will dump the available functions, and you can choose the correct one, and supply it with this option. If you have programs with duplicate long names, you also need to specify the program ID with the full name. This can be done by adding the id to the name with the \fI@\fP suffix. .SS "-P, --promiscuous-mode" .PP This option puts the interface into promiscuous mode. .SS "-s, --snapshot-length " .PP Capture \fBsnaplen\fP bytes of a packet rather than the default 262144 bytes. .SS "--use-pcap" .PP Use legacy pcap format for XDP traces. By default, it will use the PcapNG format so that it can store various metadata. .SS "-w, --write " .PP Write the raw packets to a pcap file rather than printing them out hexadecimal. Standard output is used if \fBfile\fP is \fI\-\fP. .SS "-x, --hex" .PP When dumping packets on the console also print the full packet content in hex. .SS "-v, --verbose" .PP Enable debug logging. Specify twice for even more verbosity. .SS "--version" .PP Display \fIxpdump\fP version information and exit. .SS "-h, --help" .PP Display a summary of the available options .SH "Examples" .PP The below will load the \fIxdp\-filter\fP program on eth0, but it does not do any actual filtering: .RS .nf \fC# xdp-filter load --mode skb eth0 # # xdpdump -D Interface Prio Program name Mode ID Tag Chain actions -------------------------------------------------------------------------------------- lo eth0 xdp_dispatcher skb 10651 d51e469e988d81da => 10 xdpfilt_alw_all 10669 0b394f43ab24501c XDP_PASS \fP .fi .RE .PP Now we can try \fIxdpdump\fP: .RS .nf \fC# xdpdump -i eth0 -x listening on eth0, ingress XDP program ID 10651 func xdp_dispatcher, capture mode entry, capture size 262144 bytes 1584373839.460733895: xdp_dispatcher()@entry: packet size 102 bytes, captured 102 bytes on if_index 2, rx queue 0, id 1 0x0000: 52 54 00 db 44 b6 52 54 00 34 38 da 08 00 45 48 RT..D.RT.48...EH 0x0010: 00 58 d7 dd 40 00 40 06 ec c3 c0 a8 7a 01 c0 a8 .X..@.@.....z... 0x0020: 7a 64 9c de 00 16 0d d5 c6 bc 46 c9 bb 11 80 18 zd........F..... 0x0030: 01 f5 7b b4 00 00 01 01 08 0a 77 0a 8c b8 40 12 ..{.......w...@. 0x0040: cc a6 00 00 00 10 54 ce 6e 20 c3 e7 da 6c 08 42 ......T.n ...l.B 0x0050: d6 d9 ee 42 42 f0 82 c9 4f 12 ed 7b 19 ab 22 0d ...BB...O..{..". 0x0060: 09 29 a9 ee df 89 .).... 1584373839.462340808: xdp_dispatcher()@entry: packet size 66 bytes, captured 66 bytes on if_index 2, rx queue 0, id 2 0x0000: 52 54 00 db 44 b6 52 54 00 34 38 da 08 00 45 48 RT..D.RT.48...EH 0x0010: 00 34 d7 de 40 00 40 06 ec e6 c0 a8 7a 01 c0 a8 .4..@.@.....z... 0x0020: 7a 64 9c de 00 16 0d d5 c6 e0 46 c9 bc 85 80 10 zd........F..... 0x0030: 01 f5 74 0c 00 00 01 01 08 0a 77 0a 8c ba 40 12 ..t.......w...@. 0x0040: d2 34 .4 ^C 2 packets captured 0 packets dropped by perf ring \fP .fi .RE .PP Below are two more examples redirecting the capture file to \fItcpdump\fP or \fItshark\fP: .RS .nf \fC# xdpdump -i eth0 -w - | tcpdump -r - -n listening on eth0, ingress XDP program ID 10651 func xdp_dispatcher, capture mode entry, capture size 262144 bytes reading from file -, link-type EN10MB (Ethernet) 15:55:09.075887 IP 192.168.122.1.40928 > 192.168.122.100.ssh: Flags [P.], seq 3857553815:3857553851, ack 3306438882, win 501, options [nop,nop,TS val 1997449167 ecr 1075234328], length 36 15:55:09.077756 IP 192.168.122.1.40928 > 192.168.122.100.ssh: Flags [.], ack 37, win 501, options [nop,nop,TS val 1997449169 ecr 1075244363], length 0 15:55:09.750230 IP 192.168.122.1.40928 > 192.168.122.100.ssh: Flags [P.], seq 36:72, ack 37, win 501, options [nop,nop,TS val 1997449842 ecr 1075244363], length 36 \fP .fi .RE .RS .nf \fC# xdpdump -i eth0 -w - | tshark -r - -n listening on eth0, ingress XDP program ID 10651 func xdp_dispatcher, capture mode entry, capture size 262144 bytes 1 0.000000 192.168.122.1 → 192.168.122.100 SSH 102 Client: Encrypted packet (len=36) 2 0.000646 192.168.122.1 → 192.168.122.100 TCP 66 40158 → 22 [ACK] Seq=37 Ack=37 Win=1467 Len=0 TSval=1997621571 TSecr=1075416765 3 12.218164 192.168.122.1 → 192.168.122.100 SSH 102 Client: Encrypted packet (len=36) \fP .fi .RE .PP One final example capturing specific XDP programs loaded on the interface: .RS .nf \fC# xdpdump -D Interface Prio Program name Mode ID Tag Chain actions -------------------------------------------------------------------------------------- lo eth0 xdp_dispatcher skb 10558 d51e469e988d81da => 5 xdp_test_prog_w 10576 b5a46c6e9935298c XDP_PASS => 10 xdp_pass 10582 3b185187f1855c4c XDP_PASS => 10 xdp_pass 10587 3b185187f1855c4c XDP_PASS \fP .fi .RE .PP We would like to see the packets on the \fIxdp_dispatcher()\fP and the 2nd \fIxdp_pass()\fP program: .RS .nf \fC# xdpdump -i eth0 --rx-capture=entry,exit -p xdp_dispatcher,xdp_pass@10587 or # xdpdump -i eth0 --rx-capture=entry,exit -p 10558,10587 listening on eth0, ingress XDP program ID 10558 func xdp_dispatcher, ID 10587 func xdp_pass, capture mode entry/exit, capture size 262144 bytes 1607694215.501287259: xdp_dispatcher()@entry: packet size 102 bytes on if_index 2, rx queue 0, id 1 1607694215.501371504: xdp_pass()@entry: packet size 102 bytes on if_index 2, rx queue 0, id 1 1607694215.501383099: xdp_pass()@exit[PASS]: packet size 102 bytes on if_index 2, rx queue 0, id 1 1607694215.501394709: xdp_dispatcher()@exit[PASS]: packet size 102 bytes on if_index 2, rx queue 0, id 1 ^C 4 packets captured 0 packets dropped by perf ring \fP .fi .RE .SH "BUGS" .PP Please report any bugs on Github: \fIhttps://github.com/xdp-project/xdp-tools/issues\fP .SH "AUTHOR" .PP \fIxdpdump\fP was written by Eelco Chaudron xdp-tools-1.5.4/xdp-dump/.gitignore0000644000175100001660000000001315003640462016535 0ustar runnerdocker*~ xdpdump xdp-tools-1.5.4/xdp-dump/xdpdump_xdp.c0000644000175100001660000000442715003640462017262 0ustar runnerdocker// SPDX-License-Identifier: GPL-2.0 /***************************************************************************** * Include files *****************************************************************************/ #include #include #include #include #include "xdpdump.h" /***************************************************************************** * Macros *****************************************************************************/ #define min(x, y) ((x) < (y) ? x : y) /***************************************************************************** * Local definitions and global variables *****************************************************************************/ struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); __uint(max_entries, MAX_CPUS); __type(key, int); __type(value, __u32); } xdpdump_perf_map SEC(".maps"); /***************************************************************************** * .data section value storing the capture configuration *****************************************************************************/ struct trace_configuration trace_cfg SEC(".data"); /***************************************************************************** * XDP trace program *****************************************************************************/ SEC("xdp") int xdpdump(struct xdp_md *xdp) { void *data_end = (void *)(long)xdp->data_end; void *data = (void *)(long)xdp->data; struct pkt_trace_metadata metadata; if (data >= data_end || trace_cfg.capture_if_ifindex != xdp->ingress_ifindex) return XDP_PASS; metadata.prog_index = trace_cfg.capture_prog_index; metadata.ifindex = xdp->ingress_ifindex; metadata.rx_queue = xdp->rx_queue_index; metadata.pkt_len = (__u16)(data_end - data); metadata.cap_len = min(metadata.pkt_len, trace_cfg.capture_snaplen); metadata.action = 0; metadata.flags = 0; bpf_perf_event_output(xdp, &xdpdump_perf_map, ((__u64) metadata.cap_len << 32) | BPF_F_CURRENT_CPU, &metadata, sizeof(metadata)); return XDP_PASS; } /***************************************************************************** * License *****************************************************************************/ char _license[] SEC("license") = "GPL"; xdp-tools-1.5.4/xdp-dump/Makefile0000644000175100001660000000110015003640462016203 0ustar runnerdocker# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) TOOL_NAME := xdpdump XDP_TARGETS := xdpdump_bpf xdpdump_xdp USER_TARGETS := xdpdump TEST_FILE := tests/test-xdpdump.sh # Disable warnings about VLAs not being at the end of a structure when building # with clang. The code is fine, but clang's complaint coupled with -Werror would # break the build. See https://github.com/xdp-project/xdp-tools/issues/304 CFLAGS += "-Wno-gnu-variable-sized-type-not-at-end" LIB_DIR = ../lib USER_LIBS = -lpcap MAN_PAGE := xdpdump.8 include $(LIB_DIR)/common.mk xdp-tools-1.5.4/version.mk0000644000175100001660000000034515003640462015035 0ustar runnerdockerTOOLS_VERSION := "1.5.4" # Conditionally defined make target makes it possible to print the version # defined above by running 'make -f version.mk' ifeq ($(MAKEFILE_LIST),version.mk) print_version: @echo $(TOOLS_VERSION) endif xdp-tools-1.5.4/Makefile0000644000175100001660000000412515003640462014457 0ustar runnerdocker # SPDX-License-Identifier: GPL-2.0 # Top level Makefile for xdp-tools ifeq ("$(origin V)", "command line") VERBOSE = $(V) endif ifndef VERBOSE VERBOSE = 0 endif ifeq ($(VERBOSE),0) MAKEFLAGS += --no-print-directory endif include version.mk include config.mk UTILS := xdp-filter xdp-loader xdp-dump ifneq ($(BPFTOOL),) UTILS += xdp-bench xdp-forward xdp-monitor xdp-trafficgen endif SUBDIRS := lib $(UTILS) .PHONY: check_submodule help clobber distclean clean install test libxdp $(SUBDIRS) all: $(SUBDIRS) lib: config.mk check_submodule @echo; echo $@; $(MAKE) -C $@ libxdp: config.mk check_submodule @echo; echo lib; $(MAKE) -C lib $@ libxdp_install: libxdp @$(MAKE) -C lib $@ $(UTILS): lib @echo; echo $@; $(MAKE) -C $@ help: @echo "Make Targets:" @echo " all - build binaries" @echo " clean - remove products of build" @echo " distclean - remove configuration and build" @echo " install - install binaries on local machine" @echo " test - run test suite" @echo " archive - create tarball of all sources" @echo "" @echo "Make Arguments:" @echo " V=[0|1] - set build verbosity level" config.mk: configure sh configure check_submodule: @if [ -d .git ] && `git submodule status lib/libbpf | grep -q '^+'`; then \ echo "" ;\ echo "** WARNING **: git submodule SHA-1 out-of-sync" ;\ echo " consider running: git submodule update" ;\ echo "" ;\ fi\ clobber: touch config.mk $(MAKE) clean rm -f config.mk cscope.* compile_commands.json distclean: clobber clean: check_submodule @for i in $(SUBDIRS); \ do $(MAKE) -C $$i clean; done install: all @for i in $(SUBDIRS); \ do $(MAKE) -C $$i install; done test: all @for i in lib/libxdp $(UTILS); do \ echo; echo test $$i; $(MAKE) -C $$i test; \ if [ $$? -ne 0 ]; then failed="y"; fi; \ done; \ if [ ! -z $$failed ]; then exit 1; fi archive: xdp-tools-$(TOOLS_VERSION).tar.gz .PHONY: xdp-tools-$(TOOLS_VERSION).tar.gz xdp-tools-$(TOOLS_VERSION).tar.gz: @./mkarchive.sh "$(TOOLS_VERSION)" compile_commands.json: clean compiledb make V=1